hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fb8b63ad2ffbee810610ac48848eca279fdeb691
| 47
|
py
|
Python
|
primeiro programa/primeiro_programa.py
|
Cesario115/Ola-mundo
|
2949ff2c9dc1b5f8bc70825072751b19920019af
|
[
"MIT"
] | null | null | null |
primeiro programa/primeiro_programa.py
|
Cesario115/Ola-mundo
|
2949ff2c9dc1b5f8bc70825072751b19920019af
|
[
"MIT"
] | null | null | null |
primeiro programa/primeiro_programa.py
|
Cesario115/Ola-mundo
|
2949ff2c9dc1b5f8bc70825072751b19920019af
|
[
"MIT"
] | null | null | null |
print('='*50)
print("Olá mundo!")
print('='*50)
| 15.666667
| 19
| 0.574468
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 19
| 0.395833
|
fb8c8cf1deb8bca5c92c0e2fc8aa8a95783848a5
| 6,922
|
py
|
Python
|
src/third_party/swiftshader/third_party/subzero/pydir/wasm-run-torture-tests.py
|
rhencke/engine
|
1016db292c4e73374a0a11536b18303c9522a224
|
[
"BSD-3-Clause"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
src/third_party/swiftshader/third_party/subzero/pydir/wasm-run-torture-tests.py
|
rhencke/engine
|
1016db292c4e73374a0a11536b18303c9522a224
|
[
"BSD-3-Clause"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
src/third_party/swiftshader/third_party/subzero/pydir/wasm-run-torture-tests.py
|
rhencke/engine
|
1016db292c4e73374a0a11536b18303c9522a224
|
[
"BSD-3-Clause"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
#!/usr/bin/env python2
#===- subzero/wasm-run-torture-tests.py - Subzero WASM Torture Test Driver ===//
#
# The Subzero Code Generator
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===-----------------------------------------------------------------------===//
from __future__ import print_function
import argparse
import glob
import multiprocessing
import os
import Queue
import shutil
import StringIO
import sys
import threading
IGNORED_TESTS = set([
# The remaining tests are known waterfall failures
'20010122-1.c.wasm',
'20031003-1.c.wasm',
'20071018-1.c.wasm',
'20071120-1.c.wasm',
'20071220-1.c.wasm',
'20071220-2.c.wasm',
'20101011-1.c.wasm',
'alloca-1.c.wasm',
'bitfld-3.c.wasm',
'bitfld-5.c.wasm',
'builtin-bitops-1.c.wasm',
'conversion.c.wasm',
'eeprof-1.c.wasm',
'frame-address.c.wasm',
'pr17377.c.wasm',
'pr32244-1.c.wasm',
'pr34971.c.wasm',
'pr36765.c.wasm',
'pr39228.c.wasm',
'pr43008.c.wasm',
'pr47237.c.wasm',
'pr60960.c.wasm',
'va-arg-pack-1.c.wasm',
'20000717-5.c.wasm', # abort() (also works without emcc)
'20001203-2.c.wasm', # assert fail (works without emcc)
'20040811-1.c.wasm', # OOB trap
'20070824-1.c.wasm', # abort() (also works without emcc)
'arith-rand-ll.c.wasm', # abort() (works without emcc)
'arith-rand.c.wasm', # abort() (works without emcc)
'pr23135.c.wasm', # OOB trap (works without emcc)
'pr34415.c.wasm', # (empty output?)
'pr36339.c.wasm', # abort() (works without emcc)
'pr38048-2.c.wasm', # abort() (works without emcc)
'pr42691.c.wasm', # abort() (works without emcc)
'pr43220.c.wasm', # OOB trap (works without emcc)
'pr43269.c.wasm', # abort() (works without emcc)
'vla-dealloc-1.c.wasm', # OOB trap (works without emcc)
'20051012-1.c.wasm', # error reading binary
'921208-2.c.wasm', # error reading binary
'920501-1.c.wasm', # error reading binary
'call-trap-1.c.wasm', # error reading binary
'pr44942.c.wasm', # error reading binary
'920625-1.c.wasm', # abort() (also fails without emcc)
'931004-10.c.wasm', # abort() (also fails without emcc)
'931004-12.c.wasm', # abort() (also fails without emcc)
'931004-14.c.wasm', # abort() (also fails without emcc)
'931004-6.c.wasm', # abort() (also fails without emcc)
'pr38051.c.wasm', # (empty output?) (fails without emcc)
'pr38151.c.wasm', # abort() (fails without emcc)
'pr44575.c.wasm', # abort() (fails without emcc)
'strct-stdarg-1.c.wasm', # abort() (fails without emcc)
'strct-varg-1.c.wasm', # abort() (fails without emcc)
'va-arg-22.c.wasm', # abort() (fails without emcc)
'stdarg-3.c.wasm', # abort() (fails without emcc)
'pr56982.c.wasm', # missing setjmp (wasm.js check did not catch)
'20010605-2.c.wasm', # missing __netf2
'20020413-1.c.wasm', # missing __lttf2
'20030914-1.c.wasm', # missing __floatsitf
'20040709-1.c.wasm', # missing __netf2
'20040709-2.c.wasm', # missing __netf2
'20050121-1.c.wasm', # missing __floatsitf
'20080502-1.c.wasm', # missing __eqtf2
'920501-8.c.wasm', # missing __extenddftf2
'930513-1.c.wasm', # missing __extenddftf2
'930622-2.c.wasm', # missing __floatditf
'960215-1.c.wasm', # missing __addtf3
'960405-1.c.wasm', # missing __eqtf2
'960513-1.c.wasm', # missing __subtf3
'align-2.c.wasm', # missing __eqtf2
'complex-6.c.wasm', # missing __subtf3
'complex-7.c.wasm', # missing __netf2
'pr49218.c.wasm', # missing __fixsfti
'pr54471.c.wasm', # missing __multi3
'regstack-1.c.wasm', # missing __addtf3
'stdarg-1.c.wasm', # missing __netf2
'stdarg-2.c.wasm', # missing __floatsitf
'va-arg-5.c.wasm', # missing __eqtf2
'va-arg-6.c.wasm', # missing __eqtf2
'struct-ret-1.c.wasm', # missing __extenddftf2
])
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('--translate-only', action='store_true')
parser.add_argument('tests', nargs='*')
args = parser.parse_args()
OUT_DIR = "./build/wasm-torture"
results_lock = threading.Lock()
compile_count = 0
compile_failures = []
run_count = 0
run_failures = []
def run_test(test_file, verbose=False):
global args
global compile_count
global compile_failures
global results_lock
global run_count
global run_failures
global OUT_DIR
global IGNORED_TESTS
run_test = not args.translate_only
test_name = os.path.basename(test_file)
obj_file = os.path.join(OUT_DIR, test_name + ".o")
exe_file = os.path.join(OUT_DIR, test_name + ".exe")
if not verbose and test_name in IGNORED_TESTS:
print("\033[1;34mSkipping {}\033[1;m".format(test_file))
return
cmd = """LD_LIBRARY_PATH=../../../../v8/out/native/lib.target ./pnacl-sz \
-filetype=obj -target=x8632 {} -threads=0 -O2 \
-verbose=wasm -o {}""".format(test_file, obj_file)
if not verbose:
cmd += " &> /dev/null"
out = StringIO.StringIO()
out.write(test_file + " ...");
status = os.system(cmd);
if status != 0:
print('\033[1;31m[compile fail]\033[1;m', file=out)
with results_lock:
compile_failures.append(test_file)
else:
compile_count += 1
# Try to link and run the program.
cmd = "clang -g -m32 {} -o {} " + \
"./runtime/szrt.c ./runtime/wasm-runtime.cpp -lm -lstdc++"
cmd = cmd.format(obj_file, exe_file)
if not run_test or os.system(cmd) == 0:
if not run_test or os.system(exe_file) == 0:
with results_lock:
run_count += 1
print('\033[1;32m[ok]\033[1;m', file=out)
else:
with results_lock:
run_failures.append(test_file)
print('\033[1;33m[run fail]\033[1;m', file=out)
else:
with results_lock:
run_failures.append(test_file)
print('\033[1;33m[run fail]\033[1;m', file=out)
sys.stdout.write(out.getvalue())
verbose = args.verbose
if len(args.tests) > 0:
test_files = args.tests
else:
test_files = glob.glob("./emwasm-torture-out/*.wasm")
if os.path.exists(OUT_DIR):
shutil.rmtree(OUT_DIR)
os.mkdir(OUT_DIR)
tasks = Queue.Queue()
def worker():
while True:
run_test(tasks.get(), verbose)
tasks.task_done()
for i in range(multiprocessing.cpu_count()):
t = threading.Thread(target=worker)
t.daemon = True
t.start()
for test_file in test_files:
tasks.put(test_file)
tasks.join()
if len(compile_failures) > 0:
print()
print("Compilation failures:")
print("=====================\n")
for f in compile_failures:
print(" \033[1;31m" + f + "\033[1;m")
if len(run_failures) > 0:
print()
print("Run failures:")
print("=============\n")
for f in run_failures:
print(" \033[1;33m" + f + "\033[1;m")
print("\n\033[1;32m{}\033[1;m / \033[1;33m{}\033[1;m / {} tests passed"
.format(run_count, compile_count - run_count,
run_count + len(compile_failures) + len(run_failures)))
| 30.095652
| 80
| 0.647501
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,998
| 0.577579
|
fb8c8d25a3c49500542e0cea8201311268f861d1
| 4,455
|
py
|
Python
|
game_vs_ai.py
|
fernandojosuece/Minimax_AI_connect4
|
e0110b7d3d25494b7e950c078eacd1337ee14f17
|
[
"MIT"
] | null | null | null |
game_vs_ai.py
|
fernandojosuece/Minimax_AI_connect4
|
e0110b7d3d25494b7e950c078eacd1337ee14f17
|
[
"MIT"
] | null | null | null |
game_vs_ai.py
|
fernandojosuece/Minimax_AI_connect4
|
e0110b7d3d25494b7e950c078eacd1337ee14f17
|
[
"MIT"
] | null | null | null |
import numpy as np
import pygame
import sys
import math
import random
from board import Board
from ai import Minimax_AI
# function to draw the board in pygame
def draw_board(board):
for c in range(COLUMN_COUNT):
for r in range(ROW_COUNT):
pygame.draw.rect(screen, colors["blue"], (c*SQUARESIZE, r *
SQUARESIZE+SQUARESIZE, SQUARESIZE, SQUARESIZE))
pygame.draw.circle(screen, colors["black"], (int(
c*SQUARESIZE+SQUARESIZE/2), int(r*SQUARESIZE+SQUARESIZE+SQUARESIZE/2)), RADIUS)
for c in range(COLUMN_COUNT):
for r in range(ROW_COUNT):
if board[r][c] == 1:
pygame.draw.circle(screen, colors["red"], (int(
c*SQUARESIZE+SQUARESIZE/2), height-int(r*SQUARESIZE+SQUARESIZE/2)), RADIUS)
elif board[r][c] == 2:
pygame.draw.circle(screen, colors["yellow"], (int(
c*SQUARESIZE+SQUARESIZE/2), height-int(r*SQUARESIZE+SQUARESIZE/2)), RADIUS)
pygame.display.update()
if __name__ == '__main__':
# colors for game
colors = {"blue": (0, 0, 255),
"black": (0, 0, 0),
"red": (255, 0, 0),
"yellow": (255, 255, 0)}
# size of board
ROW_COUNT = 6
COLUMN_COUNT = 7
# create board
board = Board(ROW_COUNT, COLUMN_COUNT)
# players
players = [1, 2]
# initialize AI
ai_depth = 6
ai_player = random.choice(players)
ai = Minimax_AI(ai_depth, ai_player, ROW_COUNT, COLUMN_COUNT)
# decide turns; if turn is 0 player moves first
if ai_player == 2:
turn = 0
else:
turn = 1
pygame.init()
SQUARESIZE = 100
width = COLUMN_COUNT * SQUARESIZE
height = (ROW_COUNT+1) * SQUARESIZE
size = (width, height)
RADIUS = int(SQUARESIZE/2 - 5)
screen = pygame.display.set_mode(size)
draw_board(board.status)
pygame.display.update()
myfont = pygame.font.SysFont("monospace", 75)
game_over = False
while not game_over:
# Ask for Player 1 Input
if turn == 0:
turn_over = False
while not turn_over:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEMOTION:
pygame.draw.rect(
screen, colors["black"], (0, 0, width, SQUARESIZE))
posx = event.pos[0]
if turn == 0:
pygame.draw.circle(
screen, colors["red"], (posx, int(SQUARESIZE/2)), RADIUS)
else:
pygame.draw.circle(
screen, colors["yellow"], (posx, int(SQUARESIZE/2)), RADIUS)
pygame.display.update()
if event.type == pygame.MOUSEBUTTONDOWN:
pygame.draw.rect(
screen, colors["black"], (0, 0, width, SQUARESIZE))
# print(event.pos)
posx = event.pos[0]
col = int(math.floor(posx/SQUARESIZE))
if board.is_valid_location(col):
row = board.get_next_open_row(col)
board.insert_piece(row, col, 1)
turn_over = True
if board.is_winning_position(1):
label = myfont.render(
"You win!!", 1, colors["red"])
screen.blit(label, (40, 10))
game_over = True
draw_board(board.status)
# Ask for Player 2 Input
else:
col = ai.make_move(board.status)
if board.is_valid_location(col):
row = board.get_next_open_row(col)
board.insert_piece(row, col, 2)
if board.is_winning_position(2):
label = myfont.render(
"AI win!!", 1, colors["red"])
screen.blit(label, (40, 10))
game_over = True
draw_board(board.status)
turn += 1
turn = turn % 2
if game_over:
pygame.time.wait(3000)
| 32.05036
| 101
| 0.489787
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 352
| 0.079012
|
fb8cf968314b4148ab23bd50a8ea481955bbd517
| 963
|
py
|
Python
|
{{cookiecutter.project_slug}}/sources/app/config/settings/components/security.py
|
AsheKR/cookiecutter-django
|
d0402aefcc2eeaffa747faa7ef50ad97286bfcca
|
[
"BSD-3-Clause"
] | null | null | null |
{{cookiecutter.project_slug}}/sources/app/config/settings/components/security.py
|
AsheKR/cookiecutter-django
|
d0402aefcc2eeaffa747faa7ef50ad97286bfcca
|
[
"BSD-3-Clause"
] | null | null | null |
{{cookiecutter.project_slug}}/sources/app/config/settings/components/security.py
|
AsheKR/cookiecutter-django
|
d0402aefcc2eeaffa747faa7ef50ad97286bfcca
|
[
"BSD-3-Clause"
] | null | null | null |
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = True
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
SECURE_HSTS_SECONDS = 31536000
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = True
| 53.5
| 86
| 0.805815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 710
| 0.737279
|
fb8cffb83eb9a76e5f50b56af16a0174fdb1dc32
| 12,118
|
py
|
Python
|
dabest/bootstrap_tools.py
|
nitishkumarmishra/DABEST
|
82490f587e9b0180f29baa2daf44aa86cc3f52aa
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
dabest/bootstrap_tools.py
|
nitishkumarmishra/DABEST
|
82490f587e9b0180f29baa2daf44aa86cc3f52aa
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
dabest/bootstrap_tools.py
|
nitishkumarmishra/DABEST
|
82490f587e9b0180f29baa2daf44aa86cc3f52aa
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
#!/usr/bin/python
# -*-coding: utf-8 -*-
# Author: Joses Ho
# Email : joseshowh@gmail.com
from __future__ import division
class bootstrap:
'''Computes the summary statistic and a bootstrapped confidence interval.
Keywords:
x1, x2: array-like
The data in a one-dimensional array form. Only x1 is required.
If x2 is given, the bootstrapped summary difference between
the two groups (x2-x1) is computed.
NaNs are automatically discarded.
paired: boolean, default False
Whether or not x1 and x2 are paired samples.
statfunction: callable, default np.mean
The summary statistic called on data.
smoothboot: boolean, default False
Taken from seaborn.algorithms.bootstrap.
If True, performs a smoothed bootstrap (draws samples from a kernel
destiny estimate).
alpha: float, default 0.05
Denotes the likelihood that the confidence interval produced
_does not_ include the true summary statistic. When alpha = 0.05,
a 95% confidence interval is produced.
reps: int, default 5000
Number of bootstrap iterations to perform.
Returns:
An `bootstrap` object reporting the summary statistics, percentile CIs,
bias-corrected and accelerated (BCa) CIs, and the settings used.
summary: float
The summary statistic.
is_difference: boolean
Whether or not the summary is the difference between two groups.
If False, only x1 was supplied.
is_paired: boolean
Whether or not the difference reported is between 2 paired groups.
statistic: callable
The function used to compute the summary.
reps: int
The number of bootstrap iterations performed.
stat_array: array.
A sorted array of values obtained by bootstrapping the input arrays.
ci: float
The size of the confidence interval reported (in percentage).
pct_ci_low, pct_ci_high: floats
The upper and lower bounds of the confidence interval as computed
by taking the percentage bounds.
pct_low_high_indices: array
An array with the indices in `stat_array` corresponding to the
percentage confidence interval bounds.
bca_ci_low, bca_ci_high: floats
The upper and lower bounds of the bias-corrected and accelerated
(BCa) confidence interval. See Efron 1977.
bca_low_high_indices: array
An array with the indices in `stat_array` corresponding to the BCa
confidence interval bounds.
pvalue_1samp_ttest: float
P-value obtained from scipy.stats.ttest_1samp. If 2 arrays were
(x1 and x2), returns 'NIL'.
See https://docs.scipy.org/doc/scipy-1.0.0/reference/generated/scipy.stats.ttest_1samp.html
pvalue_2samp_ind_ttest: float
P-value obtained from scipy.stats.ttest_ind.
If a single array was given (x1 only), or if `paired` is True,
returns 'NIL'.
See https://docs.scipy.org/doc/scipy-1.0.0/reference/generated/scipy.stats.ttest_ind.html
pvalue_2samp_related_ttest: float
P-value obtained from scipy.stats.ttest_rel.
If a single array was given (x1 only), or if `paired` is False,
returns 'NIL'.
See https://docs.scipy.org/doc/scipy-1.0.0/reference/generated/scipy.stats.ttest_rel.html
pvalue_wilcoxon: float
P-value obtained from scipy.stats.wilcoxon.
If a single array was given (x1 only), or if `paired` is False,
returns 'NIL'.
The Wilcoxons signed-rank test is a nonparametric paired test of
the null hypothesis that the related samples x1 and x2 are from
the same distribution.
See https://docs.scipy.org/doc/scipy-1.0.0/reference/scipy.stats.wilcoxon.html
pvalue_mann_whitney: float
Two-sided p-value obtained from scipy.stats.mannwhitneyu.
If a single array was given (x1 only), returns 'NIL'.
The Mann-Whitney U-test is a nonparametric unpaired test of the null
hypothesis that x1 and x2 are from the same distribution.
See https://docs.scipy.org/doc/scipy-1.0.0/reference/generated/scipy.stats.mannwhitneyu.html
'''
def __init__(self, x1, x2=None,
paired=False,
statfunction=None,
smoothboot=False,
alpha_level=0.05,
reps=5000):
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.stats import norm
from numpy.random import randint
from scipy.stats import ttest_1samp, ttest_ind, ttest_rel
from scipy.stats import mannwhitneyu, wilcoxon, norm
import warnings
# Turn to pandas series.
x1 = pd.Series(x1).dropna()
diff = False
# Initialise statfunction
if statfunction == None:
statfunction = np.mean
# Compute two-sided alphas.
if alpha_level > 1. or alpha_level < 0.:
raise ValueError("alpha_level must be between 0 and 1.")
alphas = np.array([alpha_level/2., 1-alpha_level/2.])
sns_bootstrap_kwargs = {'func': statfunction,
'n_boot': reps,
'smooth': smoothboot}
if paired:
# check x2 is not None:
if x2 is None:
raise ValueError('Please specify x2.')
else:
x2 = pd.Series(x2).dropna()
if len(x1)!=len(x2):
raise ValueError('x1 and x2 are not the same length.')
if (x2 is None) or (paired is True) :
if x2 is None:
tx = x1
paired = False
ttest_single = ttest_1samp(x1,0)[1]
ttest_2_ind = 'NIL'
ttest_2_paired = 'NIL'
wilcoxonresult = 'NIL'
else:
diff = True
tx = x2 - x1
ttest_single = 'NIL'
ttest_2_ind = 'NIL'
ttest_2_paired = ttest_rel(x1,x2)[1]
wilcoxonresult = wilcoxon(x1,x2)[1]
mannwhitneyresult = 'NIL'
# Turns data into array, then tuple.
tdata = (tx,)
# The value of the statistic function applied
# just to the actual data.
summ_stat = statfunction(*tdata)
statarray = sns.algorithms.bootstrap(tx, **sns_bootstrap_kwargs)
statarray.sort()
# Get Percentile indices
pct_low_high = np.round((reps-1)*alphas)
pct_low_high = np.nan_to_num(pct_low_high).astype('int')
elif x2 is not None and paired is False:
diff = True
x2 = pd.Series(x2).dropna()
# Generate statarrays for both arrays.
ref_statarray = sns.algorithms.bootstrap(x1, **sns_bootstrap_kwargs)
exp_statarray = sns.algorithms.bootstrap(x2, **sns_bootstrap_kwargs)
tdata = exp_statarray - ref_statarray
statarray = tdata.copy()
statarray.sort()
tdata=(tdata,) # Note tuple form.
# The difference as one would calculate it.
summ_stat = statfunction(x2) - statfunction(x1)
# Get Percentile indices
pct_low_high = np.round((reps-1)*alphas)
pct_low_high = np.nan_to_num(pct_low_high).astype('int')
# Statistical tests.
ttest_single='NIL'
ttest_2_ind = ttest_ind(x1,x2)[1]
ttest_2_paired='NIL'
mannwhitneyresult = mannwhitneyu(x1, x2,
alternative='two-sided')[1]
wilcoxonresult='NIL'
# Get Bias-Corrected Accelerated indices convenience function invoked.
bca_low_high = bca(tdata, alphas, statarray,
statfunction, summ_stat, reps)
# Warnings for unstable or extreme indices.
for ind in [pct_low_high, bca_low_high]:
if np.any(ind == 0) or np.any(ind == reps-1):
warnings.warn("Some values used extremal samples;"
" results are probably unstable.")
elif np.any(ind<10) or np.any(ind>=reps-10):
warnings.warn("Some values used top 10 low/high samples;"
" results may be unstable.")
self.summary = summ_stat
self.is_paired = paired
self.is_difference = diff
self.statistic = str(statfunction)
self.n_reps = reps
self.ci=(1-alpha_level)*100
self.stat_array = np.array(statarray)
self.pct_ci_low = statarray[pct_low_high[0]]
self.pct_ci_high = statarray[pct_low_high[1]]
self.pct_low_high_indices = pct_low_high
self.bca_ci_low = statarray[bca_low_high[0]]
self.bca_ci_high = statarray[bca_low_high[1]]
self.bca_low_high_indices = bca_low_high
self.pvalue_1samp_ttest = ttest_single
self.pvalue_2samp_ind_ttest = ttest_2_ind
self.pvalue_2samp_paired_ttest = ttest_2_paired
self.pvalue_wilcoxon = wilcoxonresult
self.pvalue_mann_whitney = mannwhitneyresult
self.results = {'stat_summary': self.summary,
'is_difference': diff,
'is_paired': paired,
'bca_ci_low': self.bca_ci_low,
'bca_ci_high': self.bca_ci_high,
'ci': self.ci}
def __repr__(self):
import numpy as np
if 'mean' in self.statistic:
stat = 'mean'
elif 'median' in self.statistic:
stat = 'median'
else:
stat = self.statistic
diff_types = {True: 'paired', False: 'unpaired'}
if self.is_difference:
a = 'The {} {} difference is {}.'.format(diff_types[self.is_paired],
stat, self.summary)
else:
a = 'The {} is {}.'.format(stat, self.summary)
b = '[{} CI: {}, {}]'.format(self.ci, self.bca_ci_low, self.bca_ci_high)
return '\n'.join([a, b])
def jackknife_indexes(data):
# Taken without modification from scikits.bootstrap package.
"""
From the scikits.bootstrap package.
Given an array, returns a list of arrays where each array is a set of
jackknife indexes.
For a given set of data Y, the jackknife sample J[i] is defined as the
data set Y with the ith data point deleted.
"""
import numpy as np
base = np.arange(0,len(data))
return (np.delete(base,i) for i in base)
def bca(data, alphas, statarray, statfunction, ostat, reps):
'''
Subroutine called to calculate the BCa statistics.
Borrowed heavily from scikits.bootstrap code.
'''
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.stats import norm
from numpy.random import randint
# The bias correction value.
z0 = norm.ppf( ( 1.0*np.sum(statarray < ostat, axis = 0) ) / reps )
# Statistics of the jackknife distribution
jackindexes = jackknife_indexes(data[0])
jstat = [statfunction(*(x[indexes] for x in data))
for indexes in jackindexes]
jmean = np.mean(jstat,axis = 0)
# Acceleration value
a = np.divide(np.sum( (jmean - jstat)**3, axis = 0 ),
( 6.0 * np.sum( (jmean - jstat)**2, axis = 0)**1.5 )
)
if np.any(np.isnan(a)):
nanind = np.nonzero(np.isnan(a))
warnings.warn("Some acceleration values were undefined."
"This is almost certainly because all values"
"for the statistic were equal. Affected"
"confidence intervals will have zero width and"
"may be inaccurate (indexes: {})".format(nanind))
zs = z0 + norm.ppf(alphas).reshape(alphas.shape+(1,)*z0.ndim)
avals = norm.cdf(z0 + zs/(1-a*zs))
nvals = np.round((reps-1)*avals)
nvals = np.nan_to_num(nvals).astype('int')
return nvals
| 36.173134
| 104
| 0.605793
| 10,124
| 0.835451
| 0
| 0
| 0
| 0
| 0
| 0
| 6,180
| 0.509985
|
fb8db21fcb68449e4d69cc6ce7881b29676db85c
| 864
|
py
|
Python
|
Numbers/floor_tiles_cost.py
|
lucasc896/Projects
|
01ec687b07e4b56554c89ecc244fe5979c489826
|
[
"MIT"
] | null | null | null |
Numbers/floor_tiles_cost.py
|
lucasc896/Projects
|
01ec687b07e4b56554c89ecc244fe5979c489826
|
[
"MIT"
] | null | null | null |
Numbers/floor_tiles_cost.py
|
lucasc896/Projects
|
01ec687b07e4b56554c89ecc244fe5979c489826
|
[
"MIT"
] | null | null | null |
import math as ma
# note all sizes in m^2
# all costs in pounds
def get_details():
return {'w': get_value("Width:", float),
'h': get_value("Height:", float),
'cost': get_value("Cost per tile:", float),}
def get_value(text = "enter_val", expected_type = None):
while True:
r_in = raw_input(text)
try:
r_in = expected_type(r_in)
return r_in
except ValueError:
print "Incorrect variable type entered. Expected: %s" % expected_type
def get_cost(d = {}):
for key in ['w', 'h', 'cost', 'tile_area']:
assert key in d
total_cost = d['w']*d['h']*d['cost']/d['tile_area']
return total_cost
if __name__ == "__main__":
vals = get_details()
vals['tile_area'] = 0.04 #0.2m squared tiles
print "\n > Total cost: %.2f\n" % get_cost(vals)
| 23.351351
| 81
| 0.572917
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 260
| 0.300926
|
fb8ef4de168793f84748b01a69155748193991bb
| 2,579
|
py
|
Python
|
pyflow/demo/cwdDemo/cwdDemo.py
|
quejebo/pyflow
|
99718942f9ea4ac0ceacde17c8006068ef19f2c8
|
[
"BSD-2-Clause"
] | 3
|
2019-05-29T23:01:51.000Z
|
2020-02-20T21:36:55.000Z
|
pyflow/demo/cwdDemo/cwdDemo.py
|
quejebo/pyflow
|
99718942f9ea4ac0ceacde17c8006068ef19f2c8
|
[
"BSD-2-Clause"
] | null | null | null |
pyflow/demo/cwdDemo/cwdDemo.py
|
quejebo/pyflow
|
99718942f9ea4ac0ceacde17c8006068ef19f2c8
|
[
"BSD-2-Clause"
] | 2
|
2020-10-31T00:49:40.000Z
|
2021-04-28T18:56:40.000Z
|
#!/usr/bin/env python
#
# pyFlow - a lightweight parallel task engine
#
# Copyright (c) 2012-2017 Illumina, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#
#
# demonstrate/test addTask() cwd option
#
import os.path
import sys
# add module path by hand
#
scriptDir=os.path.abspath(os.path.dirname(__file__))
sys.path.append(scriptDir+"/../../src")
from pyflow import WorkflowRunner
# all pyflow workflows are written into classes derived from
# pyflow.WorkflowRunner:
#
class CwdWorkflow(WorkflowRunner) :
# a workflow is defined by overloading the
# WorkflowRunner.workflow() method:
#
def workflow(self) :
# get cwd and its parent for the addTask cwd test
#
cwd=os.getcwd()
parentdir=os.path.abspath(os.path.join(cwd,".."))
self.flowLog("testing pyflow cwd: '%s' parentdir: '%s'" % (cwd,parentdir))
# task will fail unless pwd == parentdir:
#
# test both absolute and relative cwd arguments:
#
self.addTask("testAbsCwd","[ $(pwd) == '%s' ]; exit $?" % (parentdir),cwd=parentdir)
self.addTask("testRelCwd","[ $(pwd) == '%s' ]; exit $?" % (parentdir),cwd="..")
# Instantiate the workflow
#
wflow = CwdWorkflow()
# Run the worklow:
#
retval=wflow.run(mode="local")
sys.exit(retval)
| 29.643678
| 92
| 0.711904
| 698
| 0.270648
| 0
| 0
| 0
| 0
| 0
| 0
| 1,969
| 0.763474
|
fb90339a9b070648b3ffa1426d143af98658172e
| 345
|
py
|
Python
|
cc/apps/coder/admin.py
|
mavroskardia/codechallenge
|
a5fee4ba73be186d90daafca50819a6817ad3d27
|
[
"MIT"
] | null | null | null |
cc/apps/coder/admin.py
|
mavroskardia/codechallenge
|
a5fee4ba73be186d90daafca50819a6817ad3d27
|
[
"MIT"
] | null | null | null |
cc/apps/coder/admin.py
|
mavroskardia/codechallenge
|
a5fee4ba73be186d90daafca50819a6817ad3d27
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Coder, Level
class CoderAdmin(admin.ModelAdmin):
pass
class LevelAdmin(admin.ModelAdmin):
list_display_links = ('id',)
list_display = ('id', 'name', 'starting_xp')
list_editable = ('name', 'starting_xp')
admin.site.register(Coder, CoderAdmin)
admin.site.register(Level, LevelAdmin)
| 20.294118
| 45
| 0.750725
| 193
| 0.55942
| 0
| 0
| 0
| 0
| 0
| 0
| 46
| 0.133333
|
fb906bc559344cfcb6abb67edb4b96c49dafc738
| 4,423
|
py
|
Python
|
src/py/env.py
|
timgates42/lithp
|
607d20fe18ca0a2af026c12d223bb802746fe7e7
|
[
"MIT"
] | 76
|
2015-01-10T08:16:15.000Z
|
2022-02-18T05:22:29.000Z
|
mi/env.py
|
mountain/mu
|
9834a5aea2ade8ad4462fa959d2d00c129335b7c
|
[
"MIT"
] | null | null | null |
mi/env.py
|
mountain/mu
|
9834a5aea2ade8ad4462fa959d2d00c129335b7c
|
[
"MIT"
] | 14
|
2015-01-11T17:07:10.000Z
|
2021-07-25T00:23:59.000Z
|
# The `Environment` class represents the dynamic environment of McCarthy's original Lisp. The creation of
# this class is actually an interesting story. As many of you probably know, [Paul Graham wrote a paper and
# code for McCarthy's original Lisp](http://www.paulgraham.com/rootsoflisp.html) and it was my first exposure to
# the stark simplicity of the language. The simplicity is breath-taking!
#
# However, while playing around with the code I found that in using the core functions (i.e. `null.`, `not.`, etc.)
# I was not experiencing the full effect of the original. That is, the original Lisp was dynamically scoped, but
# the Common Lisp used to implement and run (CLisp in the latter case) Graham's code was lexically scoped. Therefore,
# by attempting to write high-level functions using only the magnificent 7 and Graham's core functions in the Common Lisp
# I was taking advantage of lexical scope; something not available to McCarthy and company. Of course, the whole reason
# that Graham wrote `eval.` was to enforce dynamic scoping (he used a list of symbol-value pairs where the dynamic variables
# were added to its front when introduced). However, that was extremely cumbersome to use:
#
# (eval. 'a '((a 1) (a 2)))
# ;=> 1
#
# So I then implemented a simple REPL in Common Lisp that fed input into `eval.` and maintained the current environment list.
# That was fun, but I wasn't sure that I was learning anything at all. Therefore, years later I came across the simple
# REPL and decided to try to implement my own core environment for the magnificent 7 to truly get a feel for what it took
# to build a simple language up from scratch. I suppose if I were a real manly guy then I would have found an IBM 704, but
# that would be totally insane. (email me if you have one that you'd like to sell for cheap)
#
# Anyway, the point of this is that I needed to start with creating an `Environment` that provided dynamic scoping, and the
# result is this.
class Environment:
# The binding are stored in a simple dict and the stack discipline is emulated through the `parent` link
def __init__(self, par=None, bnd=None):
if bnd:
self.binds = bnd
else:
self.binds = {}
self.parent = par
if par:
self.level = self.parent.level + 1
else:
self.level = 0
# Getting a binding potentially requires the traversal of the parent link
def get(self, key):
if key in self.binds:
return self.binds[key]
elif self.parent:
return self.parent.get(key)
else:
raise ValueError("Invalid symbol " + key)
# Setting a binding is symmetric to getting
def set(self, key, value):
if key in self.binds:
self.binds[key] = value
elif self.parent:
self.parent.set(key,value)
else:
self.binds[key] = value
def definedp(self, key):
if key in self.binds.keys():
return True
return False
# Push a new binding by creating a new Env
#
# Dynamic scope works like a stack. Whenever a variable is created it's binding is pushed onto a
# global stack. In this case, the stack is simulated through a chain of parent links. So if you were to
# create the following:
#
# (label a nil)
# (label frobnicate (lambda () (cons a nil)))
#
# ((lambda (a)
# (frobnicate))
# (quote x))
#
# Then the stack would look like the figure below within the body of `frobnicate`:
#
# | |
# | |
# | a = 'x |
# | ------- |
# | a = nil |
# +---------+
#
# Meaning that when accessing `a`, `frobnicate` will get the binding at the top of the stack, producing the result `(x)`. This push/pop
# can become difficult, so people have to do all kinds of tricks to avoid confusion (i.e. pseudo-namespace via variable naming schemes).
#
def push(self, bnd=None):
return Environment(self, bnd)
def pop(self):
return self.parent
def __repr__( self):
ret = "\nEnvironment %s:\n" % self.level
keys = [i for i in self.binds.keys() if not i[:2] == "__"]
for key in keys:
ret = ret + " %5s: %s\n" % (key, self.binds[key])
return ret
| 42.941748
| 141
| 0.640968
| 2,423
| 0.547818
| 0
| 0
| 0
| 0
| 0
| 0
| 3,111
| 0.703369
|
fb90bddf16d6e083178b78c9099cb96b4aea6048
| 4,416
|
py
|
Python
|
tests/storage/test_filesystem.py
|
dilyanpalauzov/vdirsyncer
|
a3cf8e67f6396c91172b34896b828a0293ecf3c5
|
[
"BSD-3-Clause"
] | 888
|
2016-03-16T12:03:14.000Z
|
2022-03-28T17:45:44.000Z
|
tests/storage/test_filesystem.py
|
dilyanpalauzov/vdirsyncer
|
a3cf8e67f6396c91172b34896b828a0293ecf3c5
|
[
"BSD-3-Clause"
] | 499
|
2016-03-15T14:18:47.000Z
|
2022-03-30T02:12:40.000Z
|
tests/storage/test_filesystem.py
|
dilyanpalauzov/vdirsyncer
|
a3cf8e67f6396c91172b34896b828a0293ecf3c5
|
[
"BSD-3-Clause"
] | 135
|
2016-03-25T12:50:14.000Z
|
2022-03-25T00:28:59.000Z
|
import subprocess
import aiostream
import pytest
from vdirsyncer.storage.filesystem import FilesystemStorage
from vdirsyncer.vobject import Item
from . import StorageTests
class TestFilesystemStorage(StorageTests):
storage_class = FilesystemStorage
@pytest.fixture
def get_storage_args(self, tmpdir):
async def inner(collection="test"):
rv = {"path": str(tmpdir), "fileext": ".txt", "collection": collection}
if collection is not None:
rv = await self.storage_class.create_collection(**rv)
return rv
return inner
def test_is_not_directory(self, tmpdir):
with pytest.raises(OSError):
f = tmpdir.join("hue")
f.write("stub")
self.storage_class(str(tmpdir) + "/hue", ".txt")
@pytest.mark.asyncio
async def test_broken_data(self, tmpdir):
s = self.storage_class(str(tmpdir), ".txt")
class BrokenItem:
raw = "Ц, Ш, Л, ж, Д, З, Ю".encode()
uid = "jeezus"
ident = uid
with pytest.raises(TypeError):
await s.upload(BrokenItem)
assert not tmpdir.listdir()
@pytest.mark.asyncio
async def test_ident_with_slash(self, tmpdir):
s = self.storage_class(str(tmpdir), ".txt")
await s.upload(Item("UID:a/b/c"))
(item_file,) = tmpdir.listdir()
assert "/" not in item_file.basename and item_file.isfile()
@pytest.mark.asyncio
async def test_ignore_tmp_files(self, tmpdir):
"""Test that files with .tmp suffix beside .ics files are ignored."""
s = self.storage_class(str(tmpdir), ".ics")
await s.upload(Item("UID:xyzxyz"))
(item_file,) = tmpdir.listdir()
item_file.copy(item_file.new(ext="tmp"))
assert len(tmpdir.listdir()) == 2
assert len(await aiostream.stream.list(s.list())) == 1
@pytest.mark.asyncio
async def test_ignore_tmp_files_empty_fileext(self, tmpdir):
"""Test that files with .tmp suffix are ignored with empty fileext."""
s = self.storage_class(str(tmpdir), "")
await s.upload(Item("UID:xyzxyz"))
(item_file,) = tmpdir.listdir()
item_file.copy(item_file.new(ext="tmp"))
assert len(tmpdir.listdir()) == 2
# assert False, tmpdir.listdir() # enable to see the created filename
assert len(await aiostream.stream.list(s.list())) == 1
@pytest.mark.asyncio
async def test_ignore_files_typical_backup(self, tmpdir):
"""Test file-name ignorance with typical backup ending ~."""
ignorext = "~" # without dot
storage = self.storage_class(str(tmpdir), "", fileignoreext=ignorext)
await storage.upload(Item("UID:xyzxyz"))
(item_file,) = tmpdir.listdir()
item_file.copy(item_file.new(basename=item_file.basename + ignorext))
assert len(tmpdir.listdir()) == 2
assert len(await aiostream.stream.list(storage.list())) == 1
@pytest.mark.asyncio
async def test_too_long_uid(self, tmpdir):
storage = self.storage_class(str(tmpdir), ".txt")
item = Item("UID:" + "hue" * 600)
href, etag = await storage.upload(item)
assert item.uid not in href
@pytest.mark.asyncio
async def test_post_hook_inactive(self, tmpdir, monkeypatch):
def check_call_mock(*args, **kwargs):
raise AssertionError()
monkeypatch.setattr(subprocess, "call", check_call_mock)
s = self.storage_class(str(tmpdir), ".txt", post_hook=None)
await s.upload(Item("UID:a/b/c"))
@pytest.mark.asyncio
async def test_post_hook_active(self, tmpdir, monkeypatch):
calls = []
exe = "foo"
def check_call_mock(call, *args, **kwargs):
calls.append(True)
assert len(call) == 2
assert call[0] == exe
monkeypatch.setattr(subprocess, "call", check_call_mock)
s = self.storage_class(str(tmpdir), ".txt", post_hook=exe)
await s.upload(Item("UID:a/b/c"))
assert calls
@pytest.mark.asyncio
async def test_ignore_git_dirs(self, tmpdir):
tmpdir.mkdir(".git").mkdir("foo")
tmpdir.mkdir("a")
tmpdir.mkdir("b")
expected = {"a", "b"}
actual = {
c["collection"] async for c in self.storage_class.discover(str(tmpdir))
}
assert actual == expected
| 33.969231
| 83
| 0.619339
| 4,245
| 0.959756
| 0
| 0
| 3,898
| 0.881302
| 3,587
| 0.810988
| 567
| 0.128194
|
fb910124233c9edcba63460a96cd8cc70f5f9da6
| 1,635
|
py
|
Python
|
covid_sicilia.py
|
Cip0/covid-ita-graph
|
cb788b845940168ce45abbd6203f464bfe91ea46
|
[
"CC0-1.0"
] | null | null | null |
covid_sicilia.py
|
Cip0/covid-ita-graph
|
cb788b845940168ce45abbd6203f464bfe91ea46
|
[
"CC0-1.0"
] | null | null | null |
covid_sicilia.py
|
Cip0/covid-ita-graph
|
cb788b845940168ce45abbd6203f464bfe91ea46
|
[
"CC0-1.0"
] | null | null | null |
import pandas as pd
from datetime import timedelta, date
import matplotlib.pyplot as plt
def daterange(start_date, end_date):
for n in range(int((end_date - start_date).days)):
yield start_date + timedelta(n)
def getFileByDate(date = 'latest'):
url = 'https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-regioni/dpc-covid19-ita-regioni-' + date + '.csv' #20200927.csv'
df = pd.read_csv(url, error_bad_lines=False)
return df
#default region is Sicily
def getValue(daily, column = 'nuovi_positivi', region='Sicilia'):
regRaw = daily.loc[daily['denominazione_regione'] == region]
regRaw.loc[regRaw['denominazione_regione'] == region]
return regRaw[column].to_numpy()[0] #regRaw.at[16, column] #return daily.iloc[2, 17]
def getAll(column, region):
start_date = date(2020, 2, 24)
end_date = date(2020, 4, 10)
end_date = date.today()
result = []
for single_date in daterange(start_date, end_date):
day = single_date.strftime("%Y%m%d")
result.append(getValue(getFileByDate(day), column, region))
return result
nuovi_positivi = getAll('nuovi_positivi', 'Sicilia')
#deceduti = getAll('deceduti', 'Sicilia')
#dimessi_guariti = getAll('dimessi_guariti', 'Sicilia')
nuovi_positivi = pd.Series(nuovi_positivi, index=pd.date_range('2/24/2020', periods=len(nuovi_positivi)))
#deceduti = pd.Series(deceduti, index=pd.date_range('2/24/2020', periods=len(deceduti)))
#dimessi_guariti = pd.Series(dimessi_guariti, index=pd.date_range('2/24/2020', periods=len(dimessi_guariti)))
plt.figure();
ax = nuovi_positivi.plot()
#deceduti.plot(ax=ax)
#dimessi_guariti.plot(ax=ax)
plt.show()
| 24.402985
| 135
| 0.727217
| 0
| 0
| 131
| 0.080122
| 0
| 0
| 0
| 0
| 656
| 0.401223
|
fb91820e24893ea883a473af50f1667df4df55ca
| 28,698
|
py
|
Python
|
mhcflurry/select_allele_specific_models_command.py
|
ignatovmg/mhcflurry
|
a4b0ac96ebe7f8be7e4b37f21c430f567ac630e6
|
[
"Apache-2.0"
] | 113
|
2018-02-07T05:01:40.000Z
|
2022-03-24T14:22:58.000Z
|
mhcflurry/select_allele_specific_models_command.py
|
ignatovmg/mhcflurry
|
a4b0ac96ebe7f8be7e4b37f21c430f567ac630e6
|
[
"Apache-2.0"
] | 106
|
2015-09-15T20:12:20.000Z
|
2017-12-23T01:54:54.000Z
|
mhcflurry/select_allele_specific_models_command.py
|
ignatovmg/mhcflurry
|
a4b0ac96ebe7f8be7e4b37f21c430f567ac630e6
|
[
"Apache-2.0"
] | 33
|
2018-07-09T18:16:44.000Z
|
2022-02-21T17:38:26.000Z
|
"""
Model select class1 single allele models.
"""
import argparse
import os
import signal
import sys
import time
import traceback
import random
from functools import partial
from pprint import pprint
import numpy
import pandas
from scipy.stats import kendalltau, percentileofscore, pearsonr
from sklearn.metrics import roc_auc_score
import tqdm # progress bar
tqdm.monitor_interval = 0 # see https://github.com/tqdm/tqdm/issues/481
from .class1_affinity_predictor import Class1AffinityPredictor
from .common import normalize_allele_name
from .encodable_sequences import EncodableSequences
from .common import configure_logging, random_peptides
from .local_parallelism import worker_pool_with_gpu_assignments_from_args, add_local_parallelism_args
from .regression_target import from_ic50
# To avoid pickling large matrices to send to child processes when running in
# parallel, we use this global variable as a place to store data. Data that is
# stored here before creating the thread pool will be inherited to the child
# processes upon fork() call, allowing us to share large data with the workers
# via shared memory.
GLOBAL_DATA = {}
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument(
"--data",
metavar="FILE.csv",
required=False,
help=(
"Model selection data CSV. Expected columns: "
"allele, peptide, measurement_value"))
parser.add_argument(
"--exclude-data",
metavar="FILE.csv",
required=False,
help=(
"Data to EXCLUDE from model selection. Useful to specify the original "
"training data used"))
parser.add_argument(
"--models-dir",
metavar="DIR",
required=True,
help="Directory to read models")
parser.add_argument(
"--out-models-dir",
metavar="DIR",
required=True,
help="Directory to write selected models")
parser.add_argument(
"--out-unselected-predictions",
metavar="FILE.csv",
help="Write predictions for validation data using unselected predictor to "
"FILE.csv")
parser.add_argument(
"--unselected-accuracy-scorer",
metavar="SCORER",
default="combined:mass-spec,mse")
parser.add_argument(
"--unselected-accuracy-scorer-num-samples",
type=int,
default=1000)
parser.add_argument(
"--unselected-accuracy-percentile-threshold",
type=float,
metavar="X",
default=95)
parser.add_argument(
"--allele",
default=None,
nargs="+",
help="Alleles to select models for. If not specified, all alleles with "
"enough measurements will be used.")
parser.add_argument(
"--combined-min-models",
type=int,
default=8,
metavar="N",
help="Min number of models to select per allele when using combined selector")
parser.add_argument(
"--combined-max-models",
type=int,
default=1000,
metavar="N",
help="Max number of models to select per allele when using combined selector")
parser.add_argument(
"--combined-min-contribution-percent",
type=float,
default=1.0,
metavar="X",
help="Use only model selectors that can contribute at least X %% to the "
"total score. Default: %(default)s")
parser.add_argument(
"--mass-spec-min-measurements",
type=int,
metavar="N",
default=1,
help="Min number of measurements required for an allele to use mass-spec model "
"selection")
parser.add_argument(
"--mass-spec-min-models",
type=int,
default=8,
metavar="N",
help="Min number of models to select per allele when using mass-spec selector")
parser.add_argument(
"--mass-spec-max-models",
type=int,
default=1000,
metavar="N",
help="Max number of models to select per allele when using mass-spec selector")
parser.add_argument(
"--mse-min-measurements",
type=int,
metavar="N",
default=1,
help="Min number of measurements required for an allele to use MSE model "
"selection")
parser.add_argument(
"--mse-min-models",
type=int,
default=8,
metavar="N",
help="Min number of models to select per allele when using MSE selector")
parser.add_argument(
"--mse-max-models",
type=int,
default=1000,
metavar="N",
help="Max number of models to select per allele when using MSE selector")
parser.add_argument(
"--scoring",
nargs="+",
default=["mse", "consensus"],
help="Scoring procedures to use in order")
parser.add_argument(
"--consensus-min-models",
type=int,
default=8,
metavar="N",
help="Min number of models to select per allele when using consensus selector")
parser.add_argument(
"--consensus-max-models",
type=int,
default=1000,
metavar="N",
help="Max number of models to select per allele when using consensus selector")
parser.add_argument(
"--consensus-num-peptides-per-length",
type=int,
default=10000,
help="Num peptides per length to use for consensus scoring")
parser.add_argument(
"--mass-spec-regex",
metavar="REGEX",
default="mass[- ]spec",
help="Regular expression for mass-spec data. Runs on measurement_source col."
"Default: %(default)s.")
parser.add_argument(
"--verbosity",
type=int,
help="Keras verbosity. Default: %(default)s",
default=0)
add_local_parallelism_args(parser)
def run(argv=sys.argv[1:]):
global GLOBAL_DATA
# On sigusr1 print stack trace
print("To show stack trace, run:\nkill -s USR1 %d" % os.getpid())
signal.signal(signal.SIGUSR1, lambda sig, frame: traceback.print_stack())
args = parser.parse_args(argv)
args.out_models_dir = os.path.abspath(args.out_models_dir)
configure_logging(verbose=args.verbosity > 1)
input_predictor = Class1AffinityPredictor.load(args.models_dir)
print("Loaded: %s" % input_predictor)
if args.allele:
alleles = [normalize_allele_name(a) for a in args.allele]
else:
alleles = input_predictor.supported_alleles
metadata_dfs = {}
if args.data:
df = pandas.read_csv(args.data)
print("Loaded data: %s" % (str(df.shape)))
df = df.loc[
(df.peptide.str.len() >= 8) & (df.peptide.str.len() <= 15)
]
print("Subselected to 8-15mers: %s" % (str(df.shape)))
# Allele names in data are assumed to be already normalized.
df = df.loc[df.allele.isin(alleles)].dropna()
print("Selected %d alleles: %s" % (len(alleles), ' '.join(alleles)))
if args.exclude_data:
exclude_df = pandas.read_csv(args.exclude_data)
metadata_dfs["model_selection_exclude"] = exclude_df
print("Loaded exclude data: %s" % (str(df.shape)))
df["_key"] = df.allele + "__" + df.peptide
exclude_df["_key"] = exclude_df.allele + "__" + exclude_df.peptide
df["_excluded"] = df._key.isin(exclude_df._key.unique())
print("Excluding measurements per allele (counts): ")
print(df.groupby("allele")._excluded.sum())
print("Excluding measurements per allele (fractions): ")
print(df.groupby("allele")._excluded.mean())
df = df.loc[~df._excluded]
del df["_excluded"]
del df["_key"]
print("Reduced data to: %s" % (str(df.shape)))
metadata_dfs["model_selection_data"] = df
df["mass_spec"] = df.measurement_source.str.contains(
args.mass_spec_regex)
else:
df = None
if args.out_unselected_predictions:
df["unselected_prediction"] = input_predictor.predict(
alleles=df.allele.values,
peptides=df.peptide.values)
df.to_csv(args.out_unselected_predictions)
print("Wrote: %s" % args.out_unselected_predictions)
selectors = {}
selector_to_model_selection_kwargs = {}
def make_selector(
scoring,
combined_min_contribution_percent=args.combined_min_contribution_percent):
if scoring in selectors:
return (
selectors[scoring], selector_to_model_selection_kwargs[scoring])
start = time.time()
if scoring.startswith("combined:"):
model_selection_kwargs = {
'min_models': args.combined_min_models,
'max_models': args.combined_max_models,
}
component_selectors = []
for component_selector in scoring.split(":", 1)[1].split(","):
component_selectors.append(
make_selector(
component_selector)[0])
selector = CombinedModelSelector(
component_selectors,
min_contribution_percent=combined_min_contribution_percent)
elif scoring == "mse":
model_selection_kwargs = {
'min_models': args.mse_min_models,
'max_models': args.mse_max_models,
}
min_measurements = args.mse_min_measurements
selector = MSEModelSelector(
df=df.loc[~df.mass_spec],
predictor=input_predictor,
min_measurements=min_measurements)
elif scoring == "mass-spec":
mass_spec_df = df.loc[df.mass_spec]
model_selection_kwargs = {
'min_models': args.mass_spec_min_models,
'max_models': args.mass_spec_max_models,
}
min_measurements = args.mass_spec_min_measurements
selector = MassSpecModelSelector(
df=mass_spec_df,
predictor=input_predictor,
min_measurements=min_measurements)
elif scoring == "consensus":
model_selection_kwargs = {
'min_models': args.consensus_min_models,
'max_models': args.consensus_max_models,
}
selector = ConsensusModelSelector(
predictor=input_predictor,
num_peptides_per_length=args.consensus_num_peptides_per_length)
else:
raise ValueError("Unsupported scoring method: %s" % scoring)
print("Instantiated model selector %s in %0.2f sec." % (
scoring, time.time() - start))
return (selector, model_selection_kwargs)
for scoring in args.scoring:
(selector, model_selection_kwargs) = make_selector(scoring)
selectors[scoring] = selector
selector_to_model_selection_kwargs[scoring] = model_selection_kwargs
unselected_accuracy_scorer = None
if args.unselected_accuracy_scorer:
# Force running all selectors by setting combined_min_contribution_percent=0.
unselected_accuracy_scorer = make_selector(
args.unselected_accuracy_scorer,
combined_min_contribution_percent=0.0)[0]
print("Using unselected accuracy scorer: %s" % unselected_accuracy_scorer)
GLOBAL_DATA["unselected_accuracy_scorer"] = unselected_accuracy_scorer
print("Selectors for alleles:")
allele_to_selector = {}
allele_to_model_selection_kwargs = {}
for allele in alleles:
selector = None
for possible_selector in args.scoring:
if selectors[possible_selector].usable_for_allele(allele=allele):
selector = selectors[possible_selector]
print("%20s %s" % (allele, selector.plan_summary(allele)))
break
if selector is None:
raise ValueError("No selectors usable for allele: %s" % allele)
allele_to_selector[allele] = selector
allele_to_model_selection_kwargs[allele] = (
selector_to_model_selection_kwargs[possible_selector])
GLOBAL_DATA["args"] = args
GLOBAL_DATA["input_predictor"] = input_predictor
GLOBAL_DATA["unselected_accuracy_scorer"] = unselected_accuracy_scorer
GLOBAL_DATA["allele_to_selector"] = allele_to_selector
GLOBAL_DATA["allele_to_model_selection_kwargs"] = allele_to_model_selection_kwargs
if not os.path.exists(args.out_models_dir):
print("Attempting to create directory: %s" % args.out_models_dir)
os.mkdir(args.out_models_dir)
print("Done.")
result_predictor = Class1AffinityPredictor(metadata_dataframes=metadata_dfs)
worker_pool = worker_pool_with_gpu_assignments_from_args(args)
start = time.time()
if worker_pool is None:
# Serial run
print("Running in serial.")
results = (
model_select(allele) for allele in alleles)
else:
# Parallel run
random.shuffle(alleles)
results = worker_pool.imap_unordered(
partial(model_select, constant_data=GLOBAL_DATA),
alleles,
chunksize=1)
unselected_summary = []
model_selection_dfs = []
for result in tqdm.tqdm(results, total=len(alleles)):
pprint(result)
summary_dict = dict(result)
summary_dict["retained"] = result["selected"] is not None
del summary_dict["selected"]
unselected_summary.append(summary_dict)
if result['selected'] is not None:
model_selection_dfs.append(
result['selected'].metadata_dataframes['model_selection'])
result_predictor.merge_in_place([result['selected']])
if model_selection_dfs:
model_selection_df = pandas.concat(
model_selection_dfs, ignore_index=True)
model_selection_df["selector"] = model_selection_df.allele.map(
allele_to_selector)
result_predictor.metadata_dataframes["model_selection"] = (
model_selection_df)
result_predictor.metadata_dataframes["unselected_summary"] = (
pandas.DataFrame(unselected_summary))
print("Done model selecting for %d alleles." % len(alleles))
result_predictor.save(args.out_models_dir)
model_selection_time = time.time() - start
if worker_pool:
worker_pool.close()
worker_pool.join()
print("Model selection time %0.2f min." % (model_selection_time / 60.0))
print("Predictor written to: %s" % args.out_models_dir)
class ScrambledPredictor(object):
def __init__(self, predictor):
self.predictor = predictor
self._predictions = {}
self._allele = None
def predict(self, peptides, allele):
if peptides not in self._predictions:
self._predictions[peptides] = pandas.Series(
self.predictor.predict(peptides=peptides, allele=allele))
self._allele = allele
assert allele == self._allele
return self._predictions[peptides].sample(frac=1.0).values
def model_select(allele, constant_data=GLOBAL_DATA):
unselected_accuracy_scorer = constant_data["unselected_accuracy_scorer"]
selector = constant_data["allele_to_selector"][allele]
model_selection_kwargs = constant_data[
"allele_to_model_selection_kwargs"
][allele]
predictor = constant_data["input_predictor"]
args = constant_data["args"]
unselected_accuracy_scorer_samples = constant_data["args"].unselected_accuracy_scorer_num_samples
result_dict = {
"allele": allele
}
unselected_score = None
unselected_score_percentile = None
unselected_score_scrambled_mean = None
if unselected_accuracy_scorer:
unselected_score_function = (
unselected_accuracy_scorer.score_function(allele))
additional_metadata = {}
unselected_score = unselected_score_function(
predictor, additional_metadata_out=additional_metadata)
scrambled_predictor = ScrambledPredictor(predictor)
scrambled_scores = numpy.array([
unselected_score_function(
scrambled_predictor)
for _ in range(unselected_accuracy_scorer_samples)
])
unselected_score_scrambled_mean = scrambled_scores.mean()
unselected_score_percentile = percentileofscore(
scrambled_scores, unselected_score)
print(
"Unselected score and percentile",
allele,
unselected_score,
unselected_score_percentile,
additional_metadata)
result_dict.update(
dict(("unselected_%s" % key, value)
for (key, value)
in additional_metadata.items()))
selected = None
threshold = args.unselected_accuracy_percentile_threshold
if unselected_score_percentile is None or unselected_score_percentile >= threshold:
selected = predictor.model_select(
score_function=selector.score_function(allele=allele),
alleles=[allele],
**model_selection_kwargs)
result_dict["unselected_score_plan"] = (
unselected_accuracy_scorer.plan_summary(allele)
if unselected_accuracy_scorer else None)
result_dict["selector_score_plan"] = selector.plan_summary(allele)
result_dict["unselected_accuracy_score_percentile"] = unselected_score_percentile
result_dict["unselected_score"] = unselected_score
result_dict["unselected_score_scrambled_mean"] = unselected_score_scrambled_mean
result_dict["selected"] = selected
result_dict["num_models"] = len(selected.neural_networks) if selected else None
return result_dict
def cache_encoding(predictor, peptides):
# Encode the peptides for each neural network, so the encoding
# becomes cached.
for network in predictor.neural_networks:
network.peptides_to_network_input(peptides)
class ScoreFunction(object):
"""
Thin wrapper over a score function (Class1AffinityPredictor -> float).
Used to keep a summary string associated with the function.
"""
def __init__(self, function, summary=None):
self.function = function
self.summary = summary if summary else "(n/a)"
def __call__(self, *args, **kwargs):
return self.function(*args, **kwargs)
class CombinedModelSelector(object):
"""
Model selector that computes a weighted average over other model selectors.
"""
def __init__(self, model_selectors, weights=None, min_contribution_percent=1.0):
if weights is None:
weights = numpy.ones(shape=(len(model_selectors),))
self.model_selectors = model_selectors
self.selector_to_weight = dict(zip(self.model_selectors, weights))
self.min_contribution_percent = min_contribution_percent
def usable_for_allele(self, allele):
return any(
selector.usable_for_allele(allele)
for selector in self.model_selectors)
def plan_summary(self, allele):
return self.score_function(allele, dry_run=True).summary
def score_function(self, allele, dry_run=False):
selector_to_max_weighted_score = {}
for selector in self.model_selectors:
weight = self.selector_to_weight[selector]
if selector.usable_for_allele(allele):
max_weighted_score = selector.max_absolute_value(allele) * weight
else:
max_weighted_score = 0
selector_to_max_weighted_score[selector] = max_weighted_score
max_total_score = sum(selector_to_max_weighted_score.values())
# Use only selectors that can contribute >1% to the total score
selectors_to_use = [
selector
for selector in self.model_selectors
if (
selector_to_max_weighted_score[selector] >
max_total_score * self.min_contribution_percent / 100.0)
]
summary = ", ".join([
"%s(|%.3f|)" % (
selector.plan_summary(allele),
selector_to_max_weighted_score[selector])
for selector in selectors_to_use
])
if dry_run:
score = None
else:
score_functions_and_weights = [
(selector.score_function(allele=allele),
self.selector_to_weight[selector])
for selector in selectors_to_use
]
def score(predictor, additional_metadata_out=None):
scores = numpy.array([
score_function(
predictor,
additional_metadata_out=additional_metadata_out) * weight
for (score_function, weight) in score_functions_and_weights
])
if additional_metadata_out is not None:
additional_metadata_out["combined_score_terms"] = str(
list(scores))
return scores.sum()
return ScoreFunction(score, summary=summary)
class ConsensusModelSelector(object):
"""
Model selector that scores sub-ensembles based on their Kendall tau
consistency with the full ensemble over a set of random peptides.
"""
def __init__(
self,
predictor,
num_peptides_per_length=10000,
multiply_score_by_value=10.0):
(min_length, max_length) = predictor.supported_peptide_lengths
peptides = []
for length in range(min_length, max_length + 1):
peptides.extend(
random_peptides(num_peptides_per_length, length=length))
self.peptides = EncodableSequences.create(peptides)
self.predictor = predictor
self.multiply_score_by_value = multiply_score_by_value
cache_encoding(self.predictor, self.peptides)
def usable_for_allele(self, allele):
return True
def max_absolute_value(self, allele):
return self.multiply_score_by_value
def plan_summary(self, allele):
return "consensus (%d points)" % len(self.peptides)
def score_function(self, allele):
full_ensemble_predictions = self.predictor.predict(
allele=allele,
peptides=self.peptides)
def score(predictor, additional_metadata_out=None):
predictions = predictor.predict(
allele=allele,
peptides=self.peptides,
)
tau = kendalltau(predictions, full_ensemble_predictions).correlation
if additional_metadata_out is not None:
additional_metadata_out["score_consensus_tau"] = tau
return tau * self.multiply_score_by_value
return ScoreFunction(
score, summary=self.plan_summary(allele))
class MSEModelSelector(object):
"""
Model selector that uses mean-squared error to score models. Inequalities
are supported.
"""
def __init__(
self,
df,
predictor,
min_measurements=1,
multiply_score_by_data_size=True):
self.df = df
self.predictor = predictor
self.min_measurements = min_measurements
self.multiply_score_by_data_size = multiply_score_by_data_size
def usable_for_allele(self, allele):
return (self.df.allele == allele).sum() >= self.min_measurements
def max_absolute_value(self, allele):
if self.multiply_score_by_data_size:
return (self.df.allele == allele).sum()
else:
return 1.0
def plan_summary(self, allele):
return self.score_function(allele).summary
def score_function(self, allele):
sub_df = self.df.loc[self.df.allele == allele].reset_index(drop=True)
peptides = EncodableSequences.create(sub_df.peptide.values)
def score(predictor, additional_metadata_out=None):
predictions = predictor.predict(
allele=allele,
peptides=peptides,
)
deviations = from_ic50(predictions) - from_ic50(
sub_df.measurement_value)
if 'measurement_inequality' in sub_df.columns:
# Must reverse meaning of inequality since we are working with
# transformed 0-1 values, which are anti-correlated with the ic50s.
# The measurement_inequality column is given in terms of ic50s.
deviations.loc[
(
(sub_df.measurement_inequality == "<") & (deviations > 0)) |
((sub_df.measurement_inequality == ">") & (deviations < 0))
] = 0.0
score_mse = (1 - (deviations ** 2).mean())
if additional_metadata_out is not None:
additional_metadata_out["score_MSE"] = 1 - score_mse
# We additionally include other scores on (=) measurements as
# a convenience
eq_df = sub_df
if 'measurement_inequality' in sub_df.columns:
eq_df = sub_df.loc[
sub_df.measurement_inequality == "="
]
additional_metadata_out["score_pearsonr"] = (
pearsonr(
numpy.log(eq_df.measurement_value.values),
numpy.log(predictions[eq_df.index.values]))[0])
for threshold in [500, 5000, 15000]:
if (eq_df.measurement_value < threshold).nunique() == 2:
additional_metadata_out["score_AUC@%d" % threshold] = (
roc_auc_score(
(eq_df.measurement_value < threshold).values,
-1 * predictions[eq_df.index.values]))
return score_mse * (
len(sub_df) if self.multiply_score_by_data_size else 1)
summary = "mse (%d points)" % (len(sub_df))
return ScoreFunction(score, summary=summary)
class MassSpecModelSelector(object):
"""
Model selector that uses PPV of differentiating decoys from hits from
mass-spec experiments.
"""
def __init__(
self,
df,
predictor,
decoys_per_length=0,
min_measurements=100,
multiply_score_by_data_size=True):
# Index is peptide, columns are alleles
hit_matrix = df.groupby(
["peptide", "allele"]).measurement_value.count().unstack().fillna(
0).astype(bool)
if decoys_per_length:
(min_length, max_length) = predictor.supported_peptide_lengths
decoys = []
for length in range(min_length, max_length + 1):
decoys.extend(
random_peptides(decoys_per_length, length=length))
decoy_matrix = pandas.DataFrame(
index=decoys, columns=hit_matrix.columns, dtype=bool)
decoy_matrix[:] = False
full_matrix = pandas.concat([hit_matrix, decoy_matrix])
else:
full_matrix = hit_matrix
if len(full_matrix) > 0:
full_matrix = full_matrix.sample(frac=1.0).astype(float)
self.df = full_matrix
self.predictor = predictor
self.min_measurements = min_measurements
self.multiply_score_by_data_size = multiply_score_by_data_size
self.peptides = EncodableSequences.create(full_matrix.index.values)
cache_encoding(self.predictor, self.peptides)
@staticmethod
def ppv(y_true, predictions):
df = pandas.DataFrame({"prediction": predictions, "y_true": y_true})
return df.sort_values("prediction", ascending=True)[
: int(y_true.sum())
].y_true.mean()
def usable_for_allele(self, allele):
return allele in self.df.columns and (
self.df[allele].sum() >= self.min_measurements)
def max_absolute_value(self, allele):
if self.multiply_score_by_data_size:
return self.df[allele].sum()
else:
return 1.0
def plan_summary(self, allele):
return self.score_function(allele).summary
def score_function(self, allele):
total_hits = self.df[allele].sum()
total_decoys = (self.df[allele] == 0).sum()
multiplier = total_hits if self.multiply_score_by_data_size else 1
def score(predictor, additional_metadata_out=None):
predictions = predictor.predict(
allele=allele,
peptides=self.peptides,
)
ppv = self.ppv(self.df[allele], predictions)
if additional_metadata_out is not None:
additional_metadata_out["score_mass_spec_PPV"] = ppv
# We additionally compute AUC score.
additional_metadata_out["score_mass_spec_AUC"] = roc_auc_score(
self.df[allele].values, -1 * predictions)
return ppv * multiplier
summary = "mass-spec (%d hits / %d decoys)" % (total_hits, total_decoys)
return ScoreFunction(score, summary=summary)
if __name__ == '__main__':
run()
| 36.007528
| 101
| 0.643703
| 11,746
| 0.409297
| 0
| 0
| 241
| 0.008398
| 0
| 0
| 5,750
| 0.200362
|
fb93199fe7cc80dd48c6b172980feb9638eeb2ac
| 623
|
py
|
Python
|
CircuitPython_Made_Easy_On_CPX/cpx_temperature_neopixels.py
|
joewalk102/Adafruit_Learning_System_Guides
|
2bda607f8c433c661a2d9d40b4db4fd132334c9a
|
[
"MIT"
] | 665
|
2017-09-27T21:20:14.000Z
|
2022-03-31T09:09:25.000Z
|
CircuitPython_Made_Easy_On_CPX/cpx_temperature_neopixels.py
|
joewalk102/Adafruit_Learning_System_Guides
|
2bda607f8c433c661a2d9d40b4db4fd132334c9a
|
[
"MIT"
] | 641
|
2017-10-03T19:46:37.000Z
|
2022-03-30T18:28:46.000Z
|
CircuitPython_Made_Easy_On_CPX/cpx_temperature_neopixels.py
|
joewalk102/Adafruit_Learning_System_Guides
|
2bda607f8c433c661a2d9d40b4db4fd132334c9a
|
[
"MIT"
] | 734
|
2017-10-02T22:47:38.000Z
|
2022-03-30T14:03:51.000Z
|
import time
from adafruit_circuitplayground.express import cpx
import simpleio
cpx.pixels.auto_write = False
cpx.pixels.brightness = 0.3
# Set these based on your ambient temperature for best results!
minimum_temp = 24
maximum_temp = 30
while True:
# temperature value remapped to pixel position
peak = simpleio.map_range(cpx.temperature, minimum_temp, maximum_temp, 0, 10)
print(cpx.temperature)
print(int(peak))
for i in range(0, 10, 1):
if i <= peak:
cpx.pixels[i] = (0, 255, 255)
else:
cpx.pixels[i] = (0, 0, 0)
cpx.pixels.show()
time.sleep(0.05)
| 24.92
| 81
| 0.667737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 109
| 0.17496
|
fb944044bfb463a8599ee79bec50521d35a9aa25
| 1,086
|
py
|
Python
|
Python_Fundamentals/06_Object_And_Classes/task_object_and_classes/d_exercises.py
|
Dochko0/Python
|
e9612c4e842cfd3d9a733526cc7485765ef2238f
|
[
"MIT"
] | null | null | null |
Python_Fundamentals/06_Object_And_Classes/task_object_and_classes/d_exercises.py
|
Dochko0/Python
|
e9612c4e842cfd3d9a733526cc7485765ef2238f
|
[
"MIT"
] | null | null | null |
Python_Fundamentals/06_Object_And_Classes/task_object_and_classes/d_exercises.py
|
Dochko0/Python
|
e9612c4e842cfd3d9a733526cc7485765ef2238f
|
[
"MIT"
] | null | null | null |
class Exercises:
def __init__(self, topic, course_name, judge_contest_link, problems):
self.topic = topic
self.course_name = course_name
self.judge_contest_link = judge_contest_link
self.problems = [*problems]
def get_info(self):
info = f'Exercises: {self.topic}\n' \
f'Problems for exercises and homework for the "{self.course_name}" course @ SoftUni.' \
f'\nCheck your solutions here: {self.judge_contest_link}\n'
for p in range(len(self.problems)):
if p == len(self.problems) - 1:
info += f'{p + 1}. {self.problems[p]}'
else:
info += f'{p + 1}. {self.problems[p]}\n'
return info
num = 1
items = []
while True:
line_input = input()
if line_input == 'go go go':
break
topic, course_name, judge_contest_link, all_problems = list(line_input.split(' -> '))
problems = all_problems.split(', ')
items.append(Exercises(topic, course_name, judge_contest_link, problems))
for i in items:
print(i.get_info())
| 29.351351
| 99
| 0.604052
| 727
| 0.669429
| 0
| 0
| 0
| 0
| 0
| 0
| 254
| 0.233886
|
fb9503385f519e775914f1b2f2d3dd6a4f2477ad
| 15,037
|
py
|
Python
|
Machine Learning Summer School 2019 (London, UK)/tutorials/mcmc/2 - markov_chain_monte_carlo.py
|
xuedong/rlss2019
|
d7468c2fcf269d8afd6fb0f44993aa9797867944
|
[
"MIT"
] | null | null | null |
Machine Learning Summer School 2019 (London, UK)/tutorials/mcmc/2 - markov_chain_monte_carlo.py
|
xuedong/rlss2019
|
d7468c2fcf269d8afd6fb0f44993aa9797867944
|
[
"MIT"
] | null | null | null |
Machine Learning Summer School 2019 (London, UK)/tutorials/mcmc/2 - markov_chain_monte_carlo.py
|
xuedong/rlss2019
|
d7468c2fcf269d8afd6fb0f44993aa9797867944
|
[
"MIT"
] | null | null | null |
############################################################
# Copyright 2019 Michael Betancourt
# Licensed under the new BSD (3-clause) license:
#
# https://opensource.org/licenses/BSD-3-Clause
############################################################
############################################################
#
# Initial setup
#
############################################################
import matplotlib.pyplot as plot
import scipy.stats as stats
import numpy
import math
light = "#DCBCBC"
light_highlight = "#C79999"
mid = "#B97C7C"
mid_highlight = "#A25050"
dark = "#8F2727"
dark_highlight = "#7C0000"
green = "#00FF00"
# To facilitate the computation of Markov chain Monte Carlo estimators
# let's define a _Welford accumulator_ that computes empirical summaries
# of a sample in a single pass
def welford_summary(x, L = 100):
summary = [0] * (L + 1)
for n in range(len(x)):
delta = x[n] - summary[0]
summary[0] += delta / (n + 1)
for l in range(L):
if n > l:
summary[l + 1] += delta * (x[n - l] - summary[0])
norm = 1.0 / (len(x) - 1)
for l in range(L): summary[l + 1] *= norm
return summary
# We can then use the Welford accumulator output to compute the
# Markov chain Monte Carlo estimators and their properties
def compute_mcmc_stats(x, L = 20):
summary = welford_summary(x, L)
mean = summary[0]
var = summary[1]
acov = summary[1:(L + 1)]
# Compute the effective sample size
rho_hat_s = [0] * L
rho_hat_s[1] = acov[1] / var
# First we transform our autocovariances into Geyer's initial positive sequence
max_s = 1
for s in [ 2 * i + 1 for i in range((L - 1) / 2) ]:
rho_hat_even = acov[s + 1] / var
rho_hat_odd = acov[s + 2] / var;
max_s = s + 2
if rho_hat_even + rho_hat_odd > 0:
rho_hat_s[s + 1] = rho_hat_even
rho_hat_s[s + 2] = rho_hat_odd
else:
break
# Then we transform this output into Geyer's initial monotone sequence
for s in [ 2 * i + 3 for i in range((max_s - 2)/ 2) ]:
if rho_hat_s[s + 1] + rho_hat_s[s + 2] > rho_hat_s[s - 1] + rho_hat_s[s]:
rho_hat_s[s + 1] = 0.5 * (rho_hat_s[s - 1] + rho_hat_s[s])
rho_hat_s[s + 2] = rho_hat_s[s + 1]
ess = len(x) / (1.0 + 2 * sum(rho_hat_s))
return [mean, math.sqrt(var / ess), math.sqrt(var), ess]
# To generate our samples we'll use numpy's pseudo random number
# generator which needs to be seeded to achieve reproducible
# results
numpy.random.seed(seed=8675309)
# To ensure accurate results let's generate pretty large samples
N = 10000
# To see how results scale with dimension we'll consider
# behavior one thorugh ten dimensions
Ds = [ n + 1 for n in range(10) ]
idxs = [ idx for idx in range(Ds[-1]) for r in range(2) ]
plot_Ds = [ D + delta for D in Ds for delta in [-0.5, 0.5]]
############################################################
#
# How does the Random Walk Metropolis algorithm perform
# on a target distribution with a two-dimensional Gaussian
# density function?
#
############################################################
# Target density
def target_lpdf(x):
return - 0.5 * ( (x[0] - 1)**2 + (x[1] + 1)**2 ) \
- 0.5 * 2 * math.log(6.283185307179586)
# Tune proposal density
sigma = 1.4
# A place to store our Markov chain
# D columns for the parameters and one extra column
# for the Metropolis acceptance probability
D = 2
mcmc_samples = [[0] * (D + 1) for _ in range(N)]
# Randomly seed the initial state
mcmc_samples[0][0] = stats.norm.rvs(0, 3)
mcmc_samples[0][1] = stats.norm.rvs(0, 3)
mcmc_samples[0][2] = 1
for n in range(1, N):
x0 = [ mcmc_samples[n - 1][0], mcmc_samples[n - 1][1]]
xp = [ stats.norm.rvs(x0[0], sigma), stats.norm.rvs(x0[1], sigma) ]
# Compute acceptance probability
accept_prob = 1
if target_lpdf(xp) < target_lpdf(x0):
accept_prob = math.exp(target_lpdf(xp) - target_lpdf(x0))
mcmc_samples[n][D] = accept_prob
# Apply Metropolis correction
u = stats.uniform.rvs(0, 1)
if accept_prob > u:
mcmc_samples[n][0] = xp[0]
mcmc_samples[n][1] = xp[1]
else:
mcmc_samples[n][0] = x0[0]
mcmc_samples[n][1] = x0[1]
# Compute MCMC estimator statistics, leaving
# out the first 100 samples as warmup
compute_mcmc_stats([ s[0] for s in mcmc_samples[100:] ])
compute_mcmc_stats([ s[1] for s in mcmc_samples[100:] ])
# Plot convergence of MCMC estimators for each parameter
stride = 250
M = N / stride
iters = [ stride * (i + 1) for i in range(N / stride) ]
x1_mean = [0] * M
x1_se = [0] * M
x2_mean = [0] * M
x2_se = [0] * M
for m in range(M):
running_samples = [ s[0] for s in mcmc_samples[100:iters[m]] ]
mcmc_stats = compute_mcmc_stats(running_samples)
x1_mean[m] = mcmc_stats[0]
x1_se[m] = mcmc_stats[1]
running_samples = [ s[1] for s in mcmc_samples[100:iters[m]] ]
mcmc_stats = compute_mcmc_stats(running_samples)
x2_mean[m] = mcmc_stats[0]
x2_se[m] = mcmc_stats[1]
plot.fill_between(iters,
[ x1_mean[m] - 2 * x1_se[m] for m in range(M) ],
[ x1_mean[m] + 2 * x1_se[m] for m in range(M) ],
facecolor=light, color=light)
plot.plot(iters, x1_mean, color=dark)
plot.plot([iters[0], iters[-1]], [1, 1], color='grey', linestyle='--')
plot.gca().set_xlim([0, N])
plot.gca().set_xlabel("Iteration")
plot.gca().set_ylim([-2, 2])
plot.gca().set_ylabel("Monte Carlo Estimator")
plot.show()
plot.fill_between(iters,
[ x2_mean[m] - 2 * x2_se[m] for m in range(M) ],
[ x2_mean[m] + 2 * x2_se[m] for m in range(M) ],
facecolor=light, color=light)
plot.plot(iters, x2_mean, color=dark)
plot.plot([iters[0], iters[-1]], [-1, -1], color='grey', linestyle='--')
plot.gca().set_xlim([0, N])
plot.gca().set_xlabel("Iteration")
plot.gca().set_ylim([-2, 2])
plot.gca().set_ylabel("Monte Carlo Estimator")
plot.show()
############################################################
#
# How does the Random Walk Metropolis algorithm perform
# on a target distribution with a funnel density function?
#
############################################################
# Target density
def target_lpdf(x):
return - 0.5 * ( x[0]**2 + x[1]**2 + ( (x[2] - x[0]) / math.exp(x[1]) )**2 ) \
- 0.5 * 3 * math.log(6.283185307179586) - 0.5 * x[2]
# Tune proposal density
sigma = 1.4
# A place to store our Markov chain
# D columns for the parameters and one extra column
# for the Metropolis acceptance probability
D = 3
mcmc_samples = [[0] * (D + 1) for _ in range(N)]
# Randomly seed the initial state
mcmc_samples[0][0] = stats.norm.rvs(0, 3)
mcmc_samples[0][1] = stats.norm.rvs(0, 3)
mcmc_samples[0][2] = stats.norm.rvs(0, 3)
mcmc_samples[0][3] = 1
for n in range(1, N):
x0 = [ mcmc_samples[n - 1][0],
mcmc_samples[n - 1][1],
mcmc_samples[n - 1][2]]
xp = [ stats.norm.rvs(x0[0], sigma),
stats.norm.rvs(x0[1], sigma),
stats.norm.rvs(x0[2], sigma) ]
# Compute acceptance probability
accept_prob = 1
if target_lpdf(xp) < target_lpdf(x0):
accept_prob = math.exp(target_lpdf(xp) - target_lpdf(x0))
mcmc_samples[n][D] = accept_prob
# Apply Metropolis correction
u = stats.uniform.rvs(0, 1)
if accept_prob > u:
mcmc_samples[n][0] = xp[0]
mcmc_samples[n][1] = xp[1]
mcmc_samples[n][2] = xp[2]
else:
mcmc_samples[n][0] = x0[0]
mcmc_samples[n][1] = x0[1]
mcmc_samples[n][2] = x0[2]
# Compute MCMC estimator statistics, leaving
# out the first 100 samples as warmup
compute_mcmc_stats([ s[0] for s in mcmc_samples[100:] ])
compute_mcmc_stats([ s[1] for s in mcmc_samples[100:] ])
compute_mcmc_stats([ s[2] for s in mcmc_samples[100:] ])
# Plot convergence of MCMC estimators for each parameter
stride = 250
M = N / stride
iters = [ stride * (i + 1) for i in range(N / stride) ]
mu_mean = [0] * M
mu_se = [0] * M
log_tau_mean = [0] * M
log_tau_se = [0] * M
for m in range(M):
running_samples = [ s[0] for s in mcmc_samples[100:iters[m]] ]
mcmc_stats = compute_mcmc_stats(running_samples)
mu_mean[m] = mcmc_stats[0]
mu_se[m] = mcmc_stats[1]
running_samples = [ s[1] for s in mcmc_samples[100:iters[m]] ]
mcmc_stats = compute_mcmc_stats(running_samples)
log_tau_mean[m] = mcmc_stats[0]
log_tau_se[m] = mcmc_stats[1]
plot.fill_between(iters,
[ mu_mean[m] - 2 * mu_se[m] for m in range(M) ],
[ mu_mean[m] + 2 * mu_se[m] for m in range(M) ],
facecolor=light, color=light)
plot.plot(iters, mu_mean, color=dark)
plot.plot([iters[0], iters[-1]], [0, 0], color='grey', linestyle='--')
plot.gca().set_xlim([0, N])
plot.gca().set_xlabel("Iteration")
plot.gca().set_ylim([-1, 1])
plot.gca().set_ylabel("Monte Carlo Estimator")
plot.show()
plot.fill_between(iters,
[ log_tau_mean[m] - 2 * log_tau_se[m] for m in range(M) ],
[ log_tau_mean[m] + 2 * log_tau_se[m] for m in range(M) ],
facecolor=light, color=light)
plot.plot(iters, log_tau_mean, color=dark)
plot.plot([iters[0], iters[-1]], [0, 0], color='grey', linestyle='--')
plot.gca().set_xlim([0, N])
plot.gca().set_xlabel("Iteration")
plot.gca().set_ylim([-1, 8])
plot.gca().set_ylabel("Monte Carlo Estimator")
plot.show()
############################################################
#
# How does the effective sample size of a Random Walk
# Metropolis Markov chain vary with the dimension of
# the target distribution?
#
############################################################
def target_lpdf(x):
return - 0.5 * sum([ x_n**2 for x_n in x ]) \
- 0.5 * len(x) * math.log(6.283185307179586)
############################################################
# First let's use a constant Markov transition
############################################################
accept_prob_means = [0] * len(Ds)
accept_prob_ses = [0] * len(Ds)
ave_eff_sample_sizes = [0] * len(Ds)
# Tune proposal density
sigma = 1.4
for D in Ds:
# A place to store our Markov chain
# D columns for the parameters and one extra column
# for the Metropolis acceptance probability
mcmc_samples = [[0] * (D + 1) for _ in range(N)]
# Seeding the initial state with an exact sample
# from the target distribution ensures that we
# start in the typical set and avoid having to
# worry about warmup.
for d in range(D):
mcmc_samples[0][d] = stats.norm.rvs(0, 3)
mcmc_samples[0][D] = 1
for n in range(1, N):
x0 = [ mcmc_samples[n - 1][d] for d in range(D) ]
xp = [ stats.norm.rvs(x0[d], sigma) for d in range(D) ]
# Compute acceptance probability
accept_prob = 1
if target_lpdf(xp) < target_lpdf(x0):
accept_prob = math.exp(target_lpdf(xp) - target_lpdf(x0))
mcmc_samples[n][D] = accept_prob
# Apply Metropolis correction
u = stats.uniform.rvs(0, 1)
if accept_prob > u:
mcmc_samples[n][0:D] = xp
else:
mcmc_samples[n][0:D] = x0
# Estimate average acceptance probability
# Compute MCMC estimator statistics
mcmc_stats = compute_mcmc_stats([ s[D] for s in mcmc_samples])
accept_prob_means[D - 1] = mcmc_stats[0]
accept_prob_ses[D - 1] = mcmc_stats[1]
# Estimate effective sample size
eff_sample_sizes = [ compute_mcmc_stats([ s[d] for s in mcmc_samples])[3] \
for d in range(D) ]
ave_eff_sample_sizes[D - 1] = sum(eff_sample_sizes) / D
f, axarr = plot.subplots(1, 2)
axarr[0].set_title("")
axarr[0].fill_between(plot_Ds,
[ accept_prob_means[idx] - 2 * accept_prob_ses[idx] for idx in idxs ],
[ accept_prob_means[idx] + 2 * accept_prob_ses[idx] for idx in idxs ],
facecolor=dark, color=dark)
axarr[0].plot(plot_Ds, [ accept_prob_means[idx] for idx in idxs], color=dark_highlight)
axarr[0].set_xlim([Ds[0], Ds[-1]])
axarr[0].set_xlabel("Dimension")
axarr[0].set_ylim([0, 1])
axarr[0].set_ylabel("Average Acceptance Probability")
axarr[1].set_title("")
axarr[1].plot(plot_Ds, [ ave_eff_sample_sizes[idx] / N for idx in idxs],
color=dark_highlight)
axarr[1].set_xlim([Ds[0], Ds[-1]])
axarr[1].set_xlabel("Dimension")
axarr[1].set_ylim([0, 0.3])
axarr[1].set_ylabel("Average Effective Sample Size Per Iteration")
plot.show()
############################################################
# Now let's use an (approximately) optimally tuned Markov
# transition for each dimension
############################################################
accept_prob_means = [0] * len(Ds)
accept_prob_ses = [0] * len(Ds)
ave_eff_sample_sizes = [0] * len(Ds)
# Approximately optimal proposal tuning
opt_sigmas = [2.5, 1.75, 1.5, 1.2, 1.15, 1.0, 0.95, 0.85, 0.8, 0.75]
# Tune proposal density
sigma = 1.4
for D in Ds:
# A place to store our Markov chain
# D columns for the parameters and one extra column
# for the Metropolis acceptance probability
mcmc_samples = [[0] * (D + 1) for _ in range(N)]
# Seeding the initial state with an exact sample
# from the target distribution ensures that we
# start in the typical set and avoid having to
# worry about warmup.
for d in range(D):
mcmc_samples[0][d] = stats.norm.rvs(0, 3)
mcmc_samples[0][D] = 1
for n in range(1, N):
x0 = [ mcmc_samples[n - 1][d] for d in range(D) ]
xp = [ stats.norm.rvs(x0[d], opt_sigmas[D - 1]) for d in range(D) ]
# Compute acceptance probability
accept_prob = 1
if target_lpdf(xp) < target_lpdf(x0):
accept_prob = math.exp(target_lpdf(xp) - target_lpdf(x0))
mcmc_samples[n][D] = accept_prob
# Apply Metropolis correction
u = stats.uniform.rvs(0, 1)
if accept_prob > u:
mcmc_samples[n][0:D] = xp
else:
mcmc_samples[n][0:D] = x0
# Estimate average acceptance probability
# Compute MCMC estimator statistics
mcmc_stats = compute_mcmc_stats([ s[D] for s in mcmc_samples])
accept_prob_means[D - 1] = mcmc_stats[0]
accept_prob_ses[D - 1] = mcmc_stats[1]
# Estimate effective sample size
eff_sample_sizes = [ compute_mcmc_stats([ s[d] for s in mcmc_samples])[3] \
for d in range(D) ]
ave_eff_sample_sizes[D - 1] = sum(eff_sample_sizes) / D
f, axarr = plot.subplots(1, 2)
axarr[0].set_title("")
axarr[0].fill_between(plot_Ds,
[ accept_prob_means[idx] - 2 * accept_prob_ses[idx] for idx in idxs ],
[ accept_prob_means[idx] + 2 * accept_prob_ses[idx] for idx in idxs ],
facecolor=dark, color=dark)
axarr[0].plot(plot_Ds, [ accept_prob_means[idx] for idx in idxs], color=dark_highlight)
axarr[0].set_xlim([Ds[0], Ds[-1]])
axarr[0].set_xlabel("Dimension")
axarr[0].set_ylim([0, 1])
axarr[0].set_ylabel("Average Acceptance Probability")
axarr[1].set_title("")
axarr[1].plot(plot_Ds, [ ave_eff_sample_sizes[idx] / N for idx in idxs],
color=dark_highlight)
axarr[1].set_xlim([Ds[0], Ds[-1]])
axarr[1].set_xlabel("Dimension")
axarr[1].set_ylim([0, 0.3])
axarr[1].set_ylabel("Average Effective Sample Size Per Iteration")
plot.show()
| 31.524109
| 92
| 0.608499
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,517
| 0.300392
|
fb9534493c6c33c290455dafb3878a1f3ed9246b
| 454
|
py
|
Python
|
opendbc/generator/test_generator.py
|
darknight111/openpilot3
|
a0c755fbe1889f26404a8225816f57e89fde7bc2
|
[
"MIT"
] | 116
|
2018-03-07T09:00:10.000Z
|
2020-04-06T18:37:45.000Z
|
opendbc/generator/test_generator.py
|
darknight111/openpilot3
|
a0c755fbe1889f26404a8225816f57e89fde7bc2
|
[
"MIT"
] | 66
|
2020-04-09T20:27:57.000Z
|
2022-01-27T14:39:24.000Z
|
opendbc/generator/test_generator.py
|
darknight111/openpilot3
|
a0c755fbe1889f26404a8225816f57e89fde7bc2
|
[
"MIT"
] | 154
|
2020-04-08T21:41:22.000Z
|
2022-03-17T21:05:33.000Z
|
#!/usr/bin/env python3
import os
import filecmp
import tempfile
from opendbc.generator.generator import create_all, opendbc_root
def test_generator():
with tempfile.TemporaryDirectory() as d:
create_all(d)
ignore = [f for f in os.listdir(opendbc_root) if not f.endswith('_generated.dbc')]
comp = filecmp.dircmp(opendbc_root, d, ignore=ignore)
assert len(comp.diff_files) == 0, f"Different files: {comp.diff_files}"
test_generator()
| 26.705882
| 86
| 0.746696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 75
| 0.165198
|
fb95d8cb21b1ef70d5c86b417371dd007196c3a0
| 3,575
|
py
|
Python
|
src/vigorish/scrape/brooks_pitchfx/scrape_task.py
|
a-luna/vigorish
|
6cede5ced76c7d2c9ad0aacdbd2b18c2f1ee4ee6
|
[
"MIT"
] | 2
|
2021-07-15T13:53:33.000Z
|
2021-07-25T17:03:29.000Z
|
src/vigorish/scrape/brooks_pitchfx/scrape_task.py
|
a-luna/vigorish
|
6cede5ced76c7d2c9ad0aacdbd2b18c2f1ee4ee6
|
[
"MIT"
] | 650
|
2019-05-18T07:00:12.000Z
|
2022-01-21T19:38:55.000Z
|
src/vigorish/scrape/brooks_pitchfx/scrape_task.py
|
a-luna/vigorish
|
6cede5ced76c7d2c9ad0aacdbd2b18c2f1ee4ee6
|
[
"MIT"
] | 2
|
2020-03-28T21:01:31.000Z
|
2022-01-06T05:16:11.000Z
|
import vigorish.database as db
from vigorish.enums import DataSet, ScrapeCondition
from vigorish.scrape.brooks_pitchfx.parse_html import parse_pitchfx_log
from vigorish.scrape.scrape_task import ScrapeTaskABC
from vigorish.status.update_status_brooks_pitchfx import update_status_brooks_pitchfx_log
from vigorish.util.dt_format_strings import DATE_ONLY_2
from vigorish.util.result import Result
class ScrapeBrooksPitchFx(ScrapeTaskABC):
def __init__(self, app, db_job):
self.data_set = DataSet.BROOKS_PITCHFX
self.req_data_set = DataSet.BROOKS_PITCH_LOGS
super().__init__(app, db_job)
def check_prerequisites(self, game_date):
brooks_pitch_logs = db.DateScrapeStatus.verify_all_brooks_pitch_logs_scraped_for_date(
self.db_session, game_date
)
if brooks_pitch_logs:
return Result.Ok()
date_str = game_date.strftime(DATE_ONLY_2)
error = (
f"Brooks pitch logs for date {date_str} have not been scraped, unable to scrape "
"Brooks pitchfx data until this has been done."
)
return Result.Fail(error)
def check_current_status(self, game_date):
if self.scrape_condition == ScrapeCondition.ALWAYS:
return Result.Ok()
scraped_brooks_pitchfx = db.DateScrapeStatus.verify_all_brooks_pitchfx_scraped_for_date(
self.db_session, game_date
)
return Result.Ok() if not scraped_brooks_pitchfx else Result.Fail("skip")
def parse_scraped_html(self):
parsed = 0
for game_date in self.date_range:
pitch_logs_for_date = self.scraped_data.get_all_brooks_pitch_logs_for_date(game_date)
if not pitch_logs_for_date:
date_str = game_date.strftime(DATE_ONLY_2)
error = f"Failed to retrieve {self.req_data_set} for date: {date_str}"
return Result.Fail(error)
for pitch_logs_for_game in pitch_logs_for_date:
game_id = pitch_logs_for_game.bbref_game_id
self.spinner.text = self.url_tracker.parse_html_report(parsed, game_id)
for pitch_log in pitch_logs_for_game.pitch_logs:
if not pitch_log.parsed_all_info:
continue
if pitch_log.pitch_app_id not in self.url_tracker.parse_url_ids:
continue
html = self.url_tracker.get_html(pitch_log.pitch_app_id)
result = parse_pitchfx_log(html, pitch_log)
if result.failure:
return result
pitchfx_log = result.value
result = self.scraped_data.save_json(self.data_set, pitchfx_log)
if result.failure:
return Result.Fail(f"Error! {result.error} (ID: {pitch_log.pitch_app_id})")
result = self.update_status(pitchfx_log)
if result.failure:
return Result.Fail(f"Error! {result.error} (ID: {pitch_log.pitch_app_id})")
parsed += 1
self.spinner.text = self.url_tracker.parse_html_report(parsed, game_id)
self.db_session.commit()
return Result.Ok()
def parse_html(self, url_details):
pass
def update_status(self, parsed_data):
result = update_status_brooks_pitchfx_log(self.db_session, parsed_data)
if result.failure:
return result
self.db_session.commit()
return Result.Ok()
| 45.833333
| 99
| 0.645315
| 3,177
| 0.888671
| 0
| 0
| 0
| 0
| 0
| 0
| 306
| 0.085594
|
fb96d6004d5d3c7514625831b9038ed27e6e0930
| 10,309
|
py
|
Python
|
mayan/apps/rest_api/classes.py
|
atitaya1412/Mayan-EDMS
|
bda9302ba4b743e7d829ad118b8b836221888172
|
[
"Apache-2.0"
] | 336
|
2019-05-09T07:05:19.000Z
|
2022-03-25T09:50:22.000Z
|
mayan/apps/rest_api/classes.py
|
atitaya1412/Mayan-EDMS
|
bda9302ba4b743e7d829ad118b8b836221888172
|
[
"Apache-2.0"
] | 9
|
2019-10-29T00:12:27.000Z
|
2021-09-09T15:16:51.000Z
|
mayan/apps/rest_api/classes.py
|
atitaya1412/Mayan-EDMS
|
bda9302ba4b743e7d829ad118b8b836221888172
|
[
"Apache-2.0"
] | 257
|
2019-05-14T10:26:37.000Z
|
2022-03-30T03:37:36.000Z
|
from collections import namedtuple
import io
import json
from furl import furl
from django.core.handlers.wsgi import WSGIRequest
from django.http.request import QueryDict
from django.template import Variable, VariableDoesNotExist
from django.test.client import MULTIPART_CONTENT
from django.urls import resolve
from django.urls.exceptions import Resolver404
from mayan.apps.organizations.settings import setting_organization_url_base_path
from mayan.apps.templating.classes import Template
from .literals import API_VERSION
class BatchResponse:
def __init__(self, name, status_code, data, headers):
self.name = name
self.status_code = status_code
self.data = data
self.headers = headers
class NestableLazyIterator:
def __init__(
self, iterable_string, context, context_list_index, parent_iterator=None
):
self.iterable_string = iterable_string
self.context = context
self.context_list_index = context_list_index
self.parent_iterator = parent_iterator
self.items = None
self.index = 0
def __iter__(self):
return self
def __next__(self):
# Setup the initial values on the initial access.
if not self.items:
if self.parent_iterator:
next(self.parent_iterator)
self.update_iterable_object()
if self.index == len(self.items):
self.index = 0
if self.parent_iterator:
next(self.parent_iterator)
else:
raise StopIteration
self.update_iterable_object()
value = self.items[self.index]
self.context['iterables'][self.context_list_index] = value
self.index += 1
return value
def update_iterable_object(self):
self.items = Variable(var=self.iterable_string).resolve(context=self.context)
RenderedContent = namedtuple(
typename='RenderedContent', field_names=(
'body', 'include', 'method', 'name', 'url'
)
)
class BatchRequest:
def __init__(
self, collection, name, url, body=None, group_name=None,
include='true', iterables=None, method='GET'
):
self.collection = collection
self.body = body or {}
self.include = include
self.group_name = group_name
self.iterables = iterables
self.method = method
self.name = name
self.url = url
def execute(self):
if self.iterables:
# Initialize the iterables list to allow using any index.
self.collection.context['iterables'] = [None] * len(self.iterables)
iterator = None
for iterable_index, iterable in enumerate(self.iterables):
iterator = NestableLazyIterator(
context=self.collection.context,
context_list_index=iterable_index,
iterable_string=iterable, parent_iterator=iterator
)
while True:
try:
next(iterator)
except StopIteration:
break
except VariableDoesNotExist as exception:
self.collection.responses[self.name] = {
'data': {'error': str(exception)},
'include': 'true',
'is_response': True
}
return
else:
rendered_content = self.render_content()
BatchRequest(
collection=self.collection,
body=rendered_content.body,
group_name=self.group_name,
include=rendered_content.include,
method=rendered_content.method,
name=rendered_content.name,
url=rendered_content.url
).execute()
else:
rendered_content = self.render_content()
url_parts = furl(rendered_content.url)
try:
resolver_match = resolve(path=url_parts.pathstr)
except Resolver404 as exception:
self.collection.responses[rendered_content.name] = {
'data': {
'error': '"{}" not found'.format(exception.args[0]['path'])
},
'include': 'true',
'is_response': True,
'status_code': 404
}
return
else:
environ = getattr(
self.collection.view_request, 'environ', {}
).copy()
environ['REQUEST_METHOD'] = rendered_content.method
environ['PATH_INFO'] = self.url
environ['QUERY_STRING'] = url_parts.querystr
post_query_dict = QueryDict(mutable=True)
post_query_dict.update(rendered_content.body)
json_body = json.dumps(post_query_dict)
request_data = json_body.encode('utf-8')
environ['wsgi.input'] = io.BytesIO(request_data)
environ['CONTENT_LENGTH'] = str(len(request_data))
if rendered_content.method == 'POST':
environ['CONTENT_TYPE'] = MULTIPART_CONTENT
else:
environ['CONTENT_TYPE'] = 'application/octet-stream'
request = WSGIRequest(environ=environ)
request.LANGUAGE_CODE = getattr(
self.collection.view_request, 'LANGUAGE_CODE', None
)
request.POST = post_query_dict
request._read_started = True
request.auth = getattr(
self.collection.view_request, 'auth', None
)
request.csrf_processing_done = True
request.session = getattr(
self.collection.view_request, 'session', None
)
request.user = getattr(
self.collection.view_request, 'user', None
)
response = resolver_match.func(
request=request, **resolver_match.kwargs
)
result = {
'data': response.data,
'headers': {key: value for key, value in response.items()},
'include': rendered_content.include,
'is_response': True,
'status_code': response.status_code
}
self.collection.context[rendered_content.name] = result
self.collection.responses[rendered_content.name] = result
if self.group_name:
self.collection.context.setdefault('groups', {})
self.collection.context['groups'].setdefault(
self.group_name, []
)
self.collection.context['groups'][self.group_name].append(
result
)
def render_content(self):
rendered_body = {}
for key, value in self.body.items():
rendered_key = Template(template_string=key).render(
context=self.collection.context
)
rendered_value = Template(template_string=value).render(
context=self.collection.context
)
rendered_body[rendered_key] = rendered_value
rendered_include = Template(template_string=self.include).render(
context=self.collection.context
)
rendered_method = Template(template_string=self.method).render(
context=self.collection.context
)
rendered_name = Template(template_string=self.name).render(
context=self.collection.context
)
rendered_url = Template(template_string=self.url).render(
context=self.collection.context
)
return RenderedContent(
body=rendered_body, include=rendered_include,
method=rendered_method, name=rendered_name, url=rendered_url
)
class BatchRequestCollection:
def __init__(self, request_list=None):
self.requests = []
for request_index, request_dict in enumerate(request_list):
request_dict.update(
{'collection': self}
)
try:
self.requests.append(BatchRequest(**request_dict))
except Exception as exception:
raise ValueError(
'Error instantiating request #{}; {}'.format(
request_index, exception
)
) from exception
def execute(self, view_request):
self.context = {'view_request': view_request}
self.responses = {}
self.view_request = view_request
for request in self.requests:
request.execute()
# Convert responses in context into response class instances.
result = []
for key, value in self.responses.items():
if json.loads(s=value.get('include', 'true')):
result.append(
BatchResponse(
name=key,
status_code=value.get('status_code', 0),
data=value.get('data', {}),
headers=value.get('headers', {}),
)
)
return result
class Endpoint:
def __init__(self, label, viewname=None, kwargs=None):
self.label = label
self.kwargs = kwargs
if viewname:
self.viewname = viewname
else:
installation_base_url = setting_organization_url_base_path.value
if installation_base_url:
installation_base_url = '/{}'.format(installation_base_url)
else:
installation_base_url = ''
self.url = '{}/api/v{}/{}/'.format(
installation_base_url, API_VERSION, self.label
)
try:
self.viewname = resolve(path=self.url).view_name
except Resolver404:
self.viewname = None
| 34.363333
| 85
| 0.549811
| 9,629
| 0.934038
| 0
| 0
| 0
| 0
| 0
| 0
| 752
| 0.072946
|
fb97354673fa2e5ae7cab8bfd23169b53bcbcce7
| 254
|
py
|
Python
|
python/griddly/util/rllib/torch/agents/common.py
|
maichmueller/Griddly
|
25b978a08f13226de2831d0941af0f37fea12718
|
[
"MIT"
] | 93
|
2020-05-29T14:36:46.000Z
|
2022-03-28T02:58:04.000Z
|
python/griddly/util/rllib/torch/agents/common.py
|
maichmueller/Griddly
|
25b978a08f13226de2831d0941af0f37fea12718
|
[
"MIT"
] | 35
|
2020-07-22T16:43:03.000Z
|
2022-03-30T19:50:20.000Z
|
python/griddly/util/rllib/torch/agents/common.py
|
maichmueller/Griddly
|
25b978a08f13226de2831d0941af0f37fea12718
|
[
"MIT"
] | 13
|
2020-07-22T08:24:28.000Z
|
2022-01-28T06:58:38.000Z
|
import numpy as np
from torch import nn
def layer_init(layer, std=np.sqrt(2), bias_const=0.0):
"""
Simple function to init layers
"""
nn.init.orthogonal_(layer.weight, std)
nn.init.constant_(layer.bias, bias_const)
return layer
| 21.166667
| 54
| 0.685039
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 46
| 0.181102
|
fb97fb152011251fca82737d7f9e6211e38b167b
| 9,414
|
py
|
Python
|
turnitin/src9.py
|
alvaedu/NYUsakai11
|
2434f320c49072d23af77062ea763228374f4c25
|
[
"ECL-2.0"
] | 4
|
2017-03-22T16:57:42.000Z
|
2020-04-07T17:34:41.000Z
|
turnitin/src9.py
|
alvaedu/NYUsakai11
|
2434f320c49072d23af77062ea763228374f4c25
|
[
"ECL-2.0"
] | 216
|
2016-06-23T14:02:32.000Z
|
2021-08-31T17:11:24.000Z
|
turnitin/src9.py
|
alvaedu/NYUsakai11
|
2434f320c49072d23af77062ea763228374f4c25
|
[
"ECL-2.0"
] | 15
|
2016-06-17T16:26:08.000Z
|
2017-08-19T21:06:33.000Z
|
"""
Test script for src=9 provisioning
Below are some odd examples and notes:
Adding a class
{
'src': '9',
'uln': 'Githens',
'ufn': 'Steven',
'aid': '56021',
'utp': '2',
'said': '56021',
'fid': '2',
'username': 'swgithen',
'ctl': 'CourseTitleb018b622-b425-4af7-bb3d-d0d2b4deb35c',
'diagnostic': '0',
'encrypt': '0',
'uem': 'swgithen@mtu.edu',
'cid': 'CourseTitleb018b622-b425-4af7-bb3d-d0d2b4deb35c',
'fcmd': '2'
}
{rmessage=Successful!, userid=17463901, classid=2836785, rcode=21}
Adding an assignment
{
'fid': '4',
'diagnostic': '0',
'ufn': 'Steven',
'uln': 'Githens',
'username': 'swgithen',
'assignid': 'AssignmentTitlec717957d-254f-4d6d-a64c-952e630db872',
'aid': '56021',
'src': '9',
'cid': 'CourseTitleb018b622-b425-4af7-bb3d-d0d2b4deb35c', 'said': '56021', 'dtstart': '20091225', 'encrypt': '0', 'assign': 'AssignmentTitlec717957d-254f-4d6d-a64c-952e630db872', 'uem': 'swgithen@mtu.edu', 'utp': '2', 'fcmd': '2', 'ctl': 'CourseTitleb018b622-b425-4af7-bb3d-d0d2b4deb35c', 'dtdue': '20100101'}
{rmessage=Successful!, userid=17463901, classid=2836785, assignmentid=7902977, rcode=41}
Adding an assignment with another inst
{'fid': '4', 'diagnostic': '0', 'ufn': 'StevenIU', 'uln': 'GithensIU', 'username': 'sgithens', 'assignid': 'AssignmentTitle5ae51e10-fd60-4720-931b-ed4f58057d00', 'aid': '56021', 'src': '9', 'cid': '2836785', 'said': '56021', 'dtstart': '20091225', 'encrypt': '0', 'assign': 'AssignmentTitle5ae51e10-fd60-4720-931b-ed4f58057d00', 'uem': 'sgithens@iupui.edu', 'utp': '2', 'fcmd': '2', 'ctl': 'CourseTitleb018b622-b425-4af7-bb3d-d0d2b4deb35c', 'dtdue': '20100101'}
{rmessage=Successful!, userid=17463902, classid=2836786, assignmentid=7902978, rcode=41}
Adding a class
{'src': '9', 'uln': 'Githens', 'ufn': 'Steven', 'aid': '56021', 'utp': '2', 'said': '56021', 'fid': '2', 'username': 'swgithen', 'ctl': 'CourseTitle46abd163-7464-4d21-a2c0-90c5af3312ab', 'diagnostic': '0', 'encrypt': '0', 'uem': 'swgithen@mtu.edu', 'fcmd': '2'}
{rmessage=Successful!, userid=17259618, classid=2836733, rcode=21}
Adding an assignment
{'fid': '4', 'diagnostic': '0', 'ufn': 'Steven', 'uln': 'Githens', 'username': 'swgithen', 'assignid': 'AssignmentTitlec4f211c1-2c38-4daf-86dc-3c57c6ef5b7b', 'aid': '56021', 'src': '9', 'cid': '2836733', 'said': '56021', 'dtstart': '20091225', 'encrypt': '0', 'assign': 'AssignmentTitlec4f211c1-2c38-4daf-86dc-3c57c6ef5b7b', 'uem': 'swgithen@mtu.edu', 'utp': '2', 'fcmd': '2', 'ctl': 'CourseTitle46abd163-7464-4d21-a2c0-90c5af3312ab', 'dtdue': '20100101'}
{rmessage=Successful!, userid=17463581, classid=2836734, assignmentid=7902887, rcode=41}
Adding an assignment with another inst
{'fid': '4', 'diagnostic': '0', 'ufn': 'StevenIU', 'uln': 'GithensIU', 'username': 'sgithens', 'assignid': 'AssignmentTitle2650fcca-b96e-42bd-926e-63660076d2ad', 'aid': '56021', 'src': '9', 'cid': '2836733', 'said': '56021', 'dtstart': '20091225', 'encrypt': '0', 'assign': 'AssignmentTitle2650fcca-b96e-42bd-926e-63660076d2ad', 'uem': 'sgithens@iupui.edu', 'utp': '2', 'fcmd': '2', 'ctl': 'CourseTitle46abd163-7464-4d21-a2c0-90c5af3312ab', 'dtdue': '20100101'}
{rmessage=Successful!, userid=17463581, classid=2836734, assignmentid=7902888, rcode=41}
"""
import unittest
import random
import sys
from org.sakaiproject.component.cover import ComponentManager
from java.net import InetSocketAddress, Proxy, InetAddress
from java.util import HashMap
debug_proxy = Proxy(Proxy.Type.HTTP, InetSocketAddress(InetAddress.getByName("127.0.0.1"),8008))
tiireview_serv = ComponentManager.get("org.sakaiproject.contentreview.service.ContentReviewService")
class SakaiUuid(object):
"""My Current Jython impl doens't seem to have UUID, so re-implementing it
for now"""
def __init__(self):
self.idmanager = ComponentManager.get("org.sakaiproject.id.api.IdManager")
def uuid1(self):
return self.idmanager.createUuid()
uuid = SakaiUuid()
def getJavaMap(d=None,**kwargs):
m = HashMap()
if d is not None:
for key,val in d.iteritems():
m.put(key,val)
for key,val in kwargs.iteritems():
m.put(key,val)
return m
defaults = {
"aid": "56021",
"said": "56021",
"diagnostic": "0",
"encrypt": "0",
"src": "9"
}
userdummy = {
"uem": "swgithenaabb1234124@mtu.edu",
"ufn": "Stevenaabb1234",
"uln": "Githensaaabb234",
"utp": "2",
"uid": "1979092312341234124aabb",
"username": "swgithenaabb1234124"
}
user = {
"uem": "swgithen@mtu.edu",
"ufn": "Steven",
"uln": "Githens",
"utp": "2",
#"uid": "19790923",
"username": "swgithen"
}
user2 = {
"uem": "sgithens@iupui.edu",
"ufn": "StevenIU",
"uln": "GithensIU",
"utp": "2",
"username": "sgithens"
}
adduser = {
"fcmd" : "2",
"fid" : "1"
}
def callTIIReviewServ(params):
"""Use the Sakai Turnitin Service to make a raw call to TII with the
dictionary of parameters. Returns the API results in map/dict form."""
return tiireview_serv.callTurnitinWDefaultsReturnMap(getJavaMap(params))
def makeNewCourseTitle():
"Make and return a new random title to use for integration test courses"
return "CourseTitle"+str(uuid.uuid1())
def makeNewAsnnTitle():
"Make and return a new random title to use for integration test assignments"
return "AssignmentTitle"+str(uuid.uuid1())
def addSampleInst():
"""This will add/update a user to Turnitin. A successful return looks as
follows:
{rmessage=Successful!, userid=17259618, rcode=11}
It important to note that the userid returned is the userid of whoever made
this API call, and not necessarily the user that was just added.
"""
adduser_cmd = {}
adduser_cmd.update(adduser)
adduser_cmd.update(user)
adduser_cmd.update(defaults)
return callTIIReviewServ(adduser_cmd)
def addSampleClass():
"""Add a simple class using Sakai Source 9 parameters.
Successful results should look as follows:
{rmessage=Successful!, userid=17259618, classid=2833470, rcode=21}
"""
addclass_cmd = {}
addclass_cmd.update(user)
addclass_cmd.update(defaults)
addclass_cmd.update({
"ctl": makeNewCourseTitle(),
"utp":"2",
"fid":"2",
"fcmd":"2"
})
return callTIIReviewServ(addclass_cmd)
def addSampleAssignment():
"""Add a simple assignment."""
course_title = makeNewCourseTitle()
addclass_cmd = {}
addclass_cmd.update(user)
addclass_cmd.update(defaults)
addclass_cmd.update({
"ctl": course_title,
"cid": course_title,
"utp":"2",
"fid":"2",
"fcmd":"2"
})
print("Adding a class\n"+str(addclass_cmd))
addclass_results = callTIIReviewServ(addclass_cmd)
print(addclass_results)
cid = addclass_results["classid"]
asnn_title = makeNewAsnnTitle()
addasnn_cmd = {}
addasnn_cmd.update(user)
addasnn_cmd.update(defaults)
addasnn_cmd.update({
"fid":"4",
"fcmd":"2",
"ctl":course_title,
"assign":asnn_title,
"assignid":asnn_title,
"utp":"2",
"dtstart":"20091225",
"dtdue":"20100101",
"cid":course_title
#"ced":"20110101"
})
print("Adding an assignment\n"+str(addasnn_cmd))
print(callTIIReviewServ(addasnn_cmd))
# Trying with a second instructor now
asnn_title = makeNewAsnnTitle()
addasnn_cmd = {}
addasnn_cmd.update(user2)
addasnn_cmd.update(defaults)
addasnn_cmd.update({
"fid":"4",
"fcmd":"2",
"ctl":course_title,
"assign":asnn_title,
"assignid":asnn_title,
"utp":"2",
"dtstart":"20091225",
"dtdue":"20100101",
"cid":cid
#"ced":"20110101"
})
print("Adding an assignment with another inst\n"+str(addasnn_cmd))
print(callTIIReviewServ(addasnn_cmd))
# Temporarily change to straight HTTP so I can intercept with WebScarab to get a parameter dump
#tiiresult = tiireview_serv.callTurnitinReturnMap("http://www.turnitin.com/api.asp?",
# getJavaMap(adduser_cmd), "sakai123", debug_proxy
# );
class TestRawTurnitinSource9(unittest.TestCase):
"""
This set of test cases is going to flex using the raw Turnitin API by
sending the hand crafted maps to the server and examing the return results.
Additionally all these tests will use the source 9 setup.
"""
def setUp(self):
self.tiireview_serv = ComponentManager.get("org.sakaiproject.contentreview.service.ContentReviewService")
def testAdduser(self):
results = addSampleInst()
self.assertEquals(results["rmessage"],"Successful!")
self.assertEquals(results["rcode"],"11")
def testAddclass(self):
results = addSampleClass()
self.assertEquals(results["rmessage"],"Successful!")
self.assertEquals(results["rcode"],"21")
def main(args):
if len(args) > 0 and args[0] == "runtests":
print("Running the tests")
tii_suites = []
tii_suites.append(unittest.TestLoader().loadTestsFromTestCase(TestRawTurnitinSource9))
alltests = unittest.TestSuite(tii_suites)
unittest.TextTestRunner(verbosity=2).run(alltests)
else:
addSampleAssignment()
if __name__ == "__main__":
main(sys.argv[1:])
| 34.866667
| 461
| 0.652008
| 1,069
| 0.113554
| 0
| 0
| 0
| 0
| 0
| 0
| 5,759
| 0.611748
|
fb99379467ad51c39cd5405a13aedf9d925212e0
| 40
|
py
|
Python
|
test.py
|
probot1511/test_repo
|
9dee2d2eb1c44c09d04d91861b3f9bd2b63c4e0f
|
[
"MIT"
] | null | null | null |
test.py
|
probot1511/test_repo
|
9dee2d2eb1c44c09d04d91861b3f9bd2b63c4e0f
|
[
"MIT"
] | null | null | null |
test.py
|
probot1511/test_repo
|
9dee2d2eb1c44c09d04d91861b3f9bd2b63c4e0f
|
[
"MIT"
] | 1
|
2022-01-31T19:24:49.000Z
|
2022-01-31T19:24:49.000Z
|
print("RUnning!!!")
print("Updated!!!")
| 13.333333
| 19
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 24
| 0.6
|
fb9987081ed710e35f756b48711ebeb1fdc7fbe0
| 2,309
|
py
|
Python
|
tests/parser/choice.47.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/choice.47.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/choice.47.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
input = """
% This is a synthetic example documenting a bug in an early version of DLV's
% backjumping algorithm.
% The abstract computation tree looks as follows (choice order should be fixed
% by disabling heuristics with -OH-):
%
% o
% a / \ -a
% / \_..._
% o \
% b / \ -b {-a,-b,f}
% / \
% o o
% incons incons based on a and b
% based
% only
% on b
%
% The backjumping algorithm wrongly determined that in the bottom left
% subtree both inconsistencies are based only on the choice of b and
% therefore stopped the entire search, missing the model on the right.
a | -a.
b | -b.
% taking b causes inconsistency
x :- b.
y :- b.
:- x,y.
% taking -b causes m1 to be MBT, but only with a
% taking -b unconditionally causes d to be false
:- -b, a, not m1.
:- -b, d.
% the constraint is violated if m1 is MBT and d is false
% the reasons are obviously the choice for b and the choice for a
:- m1, not d.
% give m1 a chance to be true
% if not allow a model with f
m1 | f.
% avoid d to be always false
% and allow a model with f
d | f.
"""
output = """
% This is a synthetic example documenting a bug in an early version of DLV's
% backjumping algorithm.
% The abstract computation tree looks as follows (choice order should be fixed
% by disabling heuristics with -OH-):
%
% o
% a / \ -a
% / \_..._
% o \
% b / \ -b {-a,-b,f}
% / \
% o o
% incons incons based on a and b
% based
% only
% on b
%
% The backjumping algorithm wrongly determined that in the bottom left
% subtree both inconsistencies are based only on the choice of b and
% therefore stopped the entire search, missing the model on the right.
a | -a.
b | -b.
% taking b causes inconsistency
x :- b.
y :- b.
:- x,y.
% taking -b causes m1 to be MBT, but only with a
% taking -b unconditionally causes d to be false
:- -b, a, not m1.
:- -b, d.
% the constraint is violated if m1 is MBT and d is false
% the reasons are obviously the choice for b and the choice for a
:- m1, not d.
% give m1 a chance to be true
% if not allow a model with f
m1 | f.
% avoid d to be always false
% and allow a model with f
d | f.
"""
| 23.323232
| 79
| 0.60589
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,290
| 0.991771
|
fb9ad71d95d0e5101ca350ea6c907e1e31dc4b55
| 7,189
|
py
|
Python
|
functions.py
|
brupoon/blackfork
|
8acf49907b7140894f72255f5ccd3e3e7cd638a0
|
[
"MIT"
] | null | null | null |
functions.py
|
brupoon/blackfork
|
8acf49907b7140894f72255f5ccd3e3e7cd638a0
|
[
"MIT"
] | null | null | null |
functions.py
|
brupoon/blackfork
|
8acf49907b7140894f72255f5ccd3e3e7cd638a0
|
[
"MIT"
] | null | null | null |
from __future__ import division
from random import *
#...(former location of probability as a FN GLOBAL)
#OUR SUPERCOOL GENETIC MUTANT NINJA TURTALGORITHM
def genetics(sampleList):
winLoss = [] #win/loss arrays for each sample
for sample in range(sampleList):
winLoss.append(isWinSample(sampleList[sample]))
#The algorithm which dictates what our hand does
#bustThreshold is the determinant for whether we hit or stay
def lettuce(game, bustThreshold=0.5):
print("Pile given to lettuce", game[0])
# value = calculateValue(hand)
# if value < threshold:
# newCard = randomCard(pile)
# deal(pile, newCard, hand)
# if value >= threshold:
i = 0
dealer = dealerLikely(game, 1)
while dealerLikely < 17:
print("in dealerLikely loop in lettuce")
i += 1
dealer = dealerLikely(game, i)
#Used to make sure that we get accurate numbers for when the dealer
#algo actually stays.
print("Pile after dealerLikely loop" , game[0])
handValue = calculateValue(game[1][1]) #our hand
equation = ((handValue - dealer[0]))
if equation <= 0: #dealer likely is higher
#deal the hand
if bustChance(game, 1, 1) <= bustThreshold:
print("attempting to deal to me")
deal(game[0],randomCard(game[0]),game[1][1])
print("Game before lettuce round", game)
lettuce(game) #recursive where it will continue until
print("pile after recursion", game[0])
dealerValue = calculateValue(game[1][0])
print("right before deathloop")
while dealerValue < 17:
print("in dealerValue/algorithm loop in lettuce")
print("pile prior to algorithm", game[0])
algorithm(game[1][0],game[0])
print("dealer hand:", game[1][0])
dealerValue = calculateValue(game[1][0])
#things we need
#a 'goodness equation" which can be run for each sample in our simulation
#we can use a function which creates an array of True/False for wins or losses
#a semi-random generator which selects what hands to hit on and stay
#beginning with simple hit/stay on 15,16,17 etc
#continuing onto dealing with percentages given certain
#returns a list with [highest probable dealer hand value, percentage of getting that value]
def dealerLikely(game, cards, handNum = 0):
probabilityList = callEstimate(game[0],game[1][handNum], cards)
print("pile in dealerLikely", game[0])
highestProbableValue = 0
for i in probabilityList:
if probabilityList[i] > highestProbableValue:
highestProbableValue = i
percentage = probabilityList[i]
return [highestProbableValue, percentage]
#Returns a float that is the chance of busting
def bustChance(game, handNum, cards):
bustList = callEstimate(game[0], game[1][handNum], cards)
print("This is bust list ", bustList)
bust = 0
notBust = 0
for i in range(len(bustList)):
if i < 21:
notBust = notBust + bustList[i]
else:
bust = bust + bustList[i]
print(bust)
print(notBust)
return bust/(bust + notBust)
#returns the total number of cards in the pile
def total(pile):
total = 0
for i in range(len(pile)):
total = total + pile[i]
return total
#creates a list of hands incl dealer and initializes the non-dealer hands
def handList(dealer, pile, numhands):
handList = []
handList.append(dealer)
for i in range(numhands):
handList.append(initHand(pile))
return handList
#Give it a pile, hand, and the amount of cards to deal
#Returns an array where the index is the value of the hand and the value is the chance of getting it
def callEstimate(pile, hand, numberOfCards):
probability = []
print("pile in callestimate", pile)
for i in range((len(hand) + 1) * 11):
probability.append(0)
pileEstimate = pile
estimate(calculateValue(hand), pileEstimate, numberOfCards, probability)
print("pile after estimate / for loop in callestimate", pile)
return probability
def estimate(value, pile, cards, probability):
newpile = pile
print("pile in estimate", cards, "newpile:", newpile)
if cards == 0:
probability[value] = probability[value] + 1
return probability
else:
for i in range(0,13):
while newpile[i] > 0:
newpile[i] = newpile[i] - 1
probability = estimate(value + calculateValue([i]), newpile, cards - 1, probability)
#changable algorithm default to soft 17 hit, updating dealer's hand / dealer decision algorithm
def algorithm(hand, pile, threshold = 17):
print("algorithm start pile", pile)
value = calculateValue(hand)
while value < threshold:
value = calculateValue(hand)
print("pile before randomCard", pile)
newCard = randomCard(pile)
deal(pile, newCard, hand)
#chooses a random card from the pile, value 0-12
#DON'T TOUCH
def randomCard(pile):
print("totalpile:",total(pile))
goal = randrange(0,total(pile))
card = 0
if goal < pile[0]:
return 0
while goal >= pile[card]:
goal = goal - pile[card]
card = card + 1
return card
#removes a card from the pile, value 0-12
def deal(pile, card, hand):
if pile[card] > 0:
pile[card] = pile[card] - 1
addToHand(hand, card)
else:
return None
#adds a card to hand
def addToHand(hand, card):
hand.append(card)
return hand
#calculates value of a hand
#figure out how to deal with an Ace (card = 0)
def calculateValue(hand):
total = 0
aces = 0
for i in hand:
if i == 10: #jack
total = total + 10
elif i == 11: #queen
total = total + 10
elif i == 12: #king
total = total + 10
elif i == 0: #ace
total = aces + 1
else:
total = total + (i+1)
return total
#Given threshold, returns True to hit, False to stay
#need to make it possible to get probability of going over
def hitStay(threshold, pile, card):
probability = chance(pile, card)
if probability >= threshold:
return True #hit
else:
return False #stay
#calculates probability of drawing a card from the pile
def chance(pile, card):
probability = amount(pile, card) / total(pile)
return probability
#returns the number of a specific kind of card in the pile
def amount(pile, card):
return pile[card]
#checks each hand in handList to see if it has busted, returns true if over.
def checkList(handList):
busted = []
for hand in handList:
if calculateValue(hand) <= 21:
busted.append(False)
else:
busted.append(True)
return busted
def isWin(game):
if calculateValue(game[1][1]) > calculateValue([1][0]):
return True
else:
return False
def isWinSample(sample):
winList = []
for i in sample:
winList.append(isWin(sample[i]))
numberWon = []
numberLost = []
for j in winList:
if winList == True:
numberWon += 1
else:
numberLost += 1
return [numberWon, numberLost]
| 31.669604
| 100
| 0.640979
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,381
| 0.3312
|
fb9c65b9797d0529bc740e716a16f8507c95db85
| 1,567
|
py
|
Python
|
testframework/checkers/spanner_checker.py
|
emartech/ems-dataflow-testframework
|
c70b0768573e9c4af98173bb0b444dee442de53a
|
[
"MIT"
] | null | null | null |
testframework/checkers/spanner_checker.py
|
emartech/ems-dataflow-testframework
|
c70b0768573e9c4af98173bb0b444dee442de53a
|
[
"MIT"
] | null | null | null |
testframework/checkers/spanner_checker.py
|
emartech/ems-dataflow-testframework
|
c70b0768573e9c4af98173bb0b444dee442de53a
|
[
"MIT"
] | 1
|
2022-02-17T19:56:44.000Z
|
2022-02-17T19:56:44.000Z
|
import logging
from collections import Generator
from typing import Dict
from spanner import ems_spanner_client
from tenacity import retry, stop_after_attempt, wait_fixed
class SpannerChecker:
STOP_AFTER_ATTEMPT_SECS = 15
WAIT_FIXED = 3
def __init__(self, project_id: str, instance_id: str, db_name: str) -> None:
self._client = ems_spanner_client.EmsSpannerClient(project_id, instance_id, db_name)
def execute_sql(self, query: str) -> Generator:
logging.info(f"Executing query: {query}")
return self._client.execute_sql(query)
def execute_update(self, query: str):
logging.info(f"Executing update: {query}")
self._client.execute_update(query)
def has_row_for(self, table_name: str, conditions: Dict):
@retry(stop=stop_after_attempt(self.STOP_AFTER_ATTEMPT_SECS), wait=wait_fixed(self.WAIT_FIXED))
def is_found(query: str):
if list(self.execute_sql(query))[0][0] == 0:
raise ValueError("Spanner table row not found.")
return True
query = self._compose_query(table_name, conditions)
return is_found(query)
@staticmethod
def _compose_query(table_name, conditions) -> str:
normalized_conditions = []
for key, value in conditions.items():
quoted_value = f"'{value}'" if isinstance(value, str) else value
normalized_conditions.append(f'{key} = {quoted_value}')
where = ' AND '.join(normalized_conditions)
return f'SELECT COUNT(*) FROM {table_name} WHERE {where}'
| 37.309524
| 103
| 0.685386
| 1,392
| 0.888322
| 0
| 0
| 687
| 0.438417
| 0
| 0
| 179
| 0.114231
|
fb9c6e6bdafc518cc8754d80b7344aed59410824
| 458
|
py
|
Python
|
src/sniptly/output.py
|
jjaakko/sniptly
|
c8190294f75a7b3db26af40e4b3592b5c5971b91
|
[
"MIT"
] | null | null | null |
src/sniptly/output.py
|
jjaakko/sniptly
|
c8190294f75a7b3db26af40e4b3592b5c5971b91
|
[
"MIT"
] | null | null | null |
src/sniptly/output.py
|
jjaakko/sniptly
|
c8190294f75a7b3db26af40e4b3592b5c5971b91
|
[
"MIT"
] | null | null | null |
from typing import Any
from click import echo, style
def out(message: str, new_line: bool = True, **styles: Any) -> None:
if "bold" not in styles:
styles["bold"] = True
message = style(message, **styles)
echo(message, nl=new_line)
def err(message: str, new_line: bool = True, **styles: Any) -> None:
if "fg" not in styles:
styles["fg"] = "red"
message = style(message, **styles)
echo(message, nl=new_line)
| 26.941176
| 68
| 0.615721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 25
| 0.054585
|
fb9d867e25a8da5be0e4b33fa5b6bfbaf98a5fde
| 298
|
py
|
Python
|
bookmarks/images/signals.py
|
hiteshgarg14/Django-Social-Website
|
750f3b6e457a0da84e3fe4eaa56f54cb007d9e1e
|
[
"MIT"
] | 1
|
2020-11-19T19:33:10.000Z
|
2020-11-19T19:33:10.000Z
|
bookmarks/images/signals.py
|
hiteshgarg14/Django-Social-Website
|
750f3b6e457a0da84e3fe4eaa56f54cb007d9e1e
|
[
"MIT"
] | null | null | null |
bookmarks/images/signals.py
|
hiteshgarg14/Django-Social-Website
|
750f3b6e457a0da84e3fe4eaa56f54cb007d9e1e
|
[
"MIT"
] | null | null | null |
from django.db.models.signals import m2m_changed
from django.dispatch import receiver
from .models import Image
@receiver(m2m_changed, sender=Image.users_likes.through)
def users_like_changed(sender, instance, **kwargs):
instance.total_likes = instance.users_likes.count()
instance.save()
| 33.111111
| 56
| 0.802013
| 0
| 0
| 0
| 0
| 184
| 0.61745
| 0
| 0
| 0
| 0
|
fb9daa303c7186ca7833c4c97259f8015245fe48
| 4,579
|
py
|
Python
|
src/nemo/transforms.py
|
thomasjo/nemo-redux
|
c4196c0d99633dca011d60008be0cb7667c348b7
|
[
"MIT"
] | null | null | null |
src/nemo/transforms.py
|
thomasjo/nemo-redux
|
c4196c0d99633dca011d60008be0cb7667c348b7
|
[
"MIT"
] | null | null | null |
src/nemo/transforms.py
|
thomasjo/nemo-redux
|
c4196c0d99633dca011d60008be0cb7667c348b7
|
[
"MIT"
] | null | null | null |
import random
from typing import List, Union
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
from PIL import Image
class RandomDiscreteRotation():
def __init__(self, angles, resample=0, expand=False):
self.angles = angles
self.resample = resample
self.expand = expand
def __call__(self, image, target=None):
if target is not None:
raise NotImplementedError("target transformation not implemented")
angle = random.choice(self.angles)
image = F.rotate(image, angle, self.resample, self.expand)
return image
def __repr__(self):
return f"{self.__class__.__name__}(angles={self.angles})"
class Compose():
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
class RandomHorizontalFlip():
def __init__(self, p=0.5):
self.p = p
def __call__(self, image, target):
if torch.rand(1) < self.p:
image = F.hflip(image)
width, _ = _get_image_size(image)
boxes = target["boxes"]
boxes[:, [0, 2]] = width - boxes[:, [2, 0]]
target["boxes"] = boxes
if "masks" in target:
target["masks"] = F.hflip(target["masks"])
return image, target
class RandomVerticalFlip():
def __init__(self, p=0.5):
self.p = p
def __call__(self, image, target):
if torch.rand(1) < self.p:
image = F.vflip(image)
_, height = _get_image_size(image)
boxes = target["boxes"]
boxes[:, [1, 3]] = height - boxes[:, [3, 1]]
target["boxes"] = boxes
if "masks" in target:
target["masks"] = F.vflip(target["masks"])
return image, target
class RandomFlip():
def __init__(self, p=0.5):
self.p = p
self.transforms = [
RandomHorizontalFlip(p),
RandomVerticalFlip(p),
]
def __call__(self, image, target):
t = random.choice(self.transforms)
return t(image, target)
class GammaJitter():
def __init__(self, gamma=0):
self.gamma = self._check_input(gamma, "gamma")
def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):
if isinstance(value, (int, float)):
if value < 0:
raise ValueError("If {} is a single number, it must be non negative.".format(name))
value = [center - float(value), center + float(value)]
if clip_first_on_zero:
value[0] = max(value[0], 0.0)
elif isinstance(value, (tuple, list)) and len(value) == 2:
if not bound[0] <= value[0] <= value[1] <= bound[1]:
raise ValueError("{} values should be between {}".format(name, bound))
else:
raise TypeError("{} should be a single number or a list/tuple with lenght 2.".format(name))
if value[0] == value[1] == center:
value = None
return value
def __call__(self, image, target):
gamma = torch.tensor(1.0).uniform_(self.gamma[0], self.gamma[1]).item()
image = F.adjust_gamma(image, gamma)
return image, target
class ColorJitter():
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.color_jitter = T.ColorJitter(brightness, contrast, saturation, hue)
def __call__(self, image, target):
image = self.color_jitter(image)
return image, target
class RandomChoice():
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
t = random.choice(self.transforms)
return t(image, target)
class ToTensor():
def __call__(self, image, target):
image = F.to_tensor(image)
return image, target
def _get_image_size(img: Union[Image.Image, torch.Tensor]):
if isinstance(img, torch.Tensor):
return _get_tensor_image_size(img)
elif isinstance(img, Image.Image):
return img.size
raise TypeError("Unexpected input type")
def _is_tensor_a_torch_image(x: torch.Tensor) -> bool:
return x.ndim >= 2
def _get_tensor_image_size(img: torch.Tensor) -> List[int]:
"""Returns (w, h) of tensor image"""
if _is_tensor_a_torch_image(img):
return [img.shape[-1], img.shape[-2]]
raise TypeError("Unexpected input type")
| 28.798742
| 103
| 0.602752
| 3,825
| 0.835335
| 0
| 0
| 0
| 0
| 0
| 0
| 398
| 0.086919
|
fb9fdfc27bb90a2635e9ed5a41c5798497074c0d
| 154
|
py
|
Python
|
zadania-python/zadanie#8.01-03/zadanie_8_01.py
|
Qeentissue/wizualizacja-danych
|
36914230ff1c28d8a5cd05a2d4dfd5d3f4ddc1b0
|
[
"MIT"
] | null | null | null |
zadania-python/zadanie#8.01-03/zadanie_8_01.py
|
Qeentissue/wizualizacja-danych
|
36914230ff1c28d8a5cd05a2d4dfd5d3f4ddc1b0
|
[
"MIT"
] | null | null | null |
zadania-python/zadanie#8.01-03/zadanie_8_01.py
|
Qeentissue/wizualizacja-danych
|
36914230ff1c28d8a5cd05a2d4dfd5d3f4ddc1b0
|
[
"MIT"
] | null | null | null |
import pandas as pd
# Wczytaj do DataFrame arkusz z narodzinami dzieci
# w Polsce dostępny pod adresem
df = pd.read_csv('Imiona_dzieci_2000-2019.csv')
| 22
| 50
| 0.779221
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 112
| 0.722581
|
fba184b6f53f7d77cbaf5e8e08d7ed47d47fd543
| 5,950
|
py
|
Python
|
vgg16_imagenet.py
|
jamccomb92/TransferLearningPneuomia
|
d476cb89dc75e51ea7bbbea3542590fe0e74dfaa
|
[
"MIT"
] | null | null | null |
vgg16_imagenet.py
|
jamccomb92/TransferLearningPneuomia
|
d476cb89dc75e51ea7bbbea3542590fe0e74dfaa
|
[
"MIT"
] | null | null | null |
vgg16_imagenet.py
|
jamccomb92/TransferLearningPneuomia
|
d476cb89dc75e51ea7bbbea3542590fe0e74dfaa
|
[
"MIT"
] | null | null | null |
\
import os
from keras import applications
import keras
import tensorflow as tf
import time
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
keras.backend.tensorflow_backend.set_session(tf.Session(config=config))
from keras.models import Model
from keras.layers import Input, Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam,SGD
from keras.callbacks import ModelCheckpoint,CSVLogger
from keras import backend as k
DATASET_PATH = '/deepLearning/jamccomb/chest_xray/'
IMAGE_SIZE = (150,150)
NUM_CLASSES = 2
BATCH_SIZE = 32 # try reducing batch size or freeze more layers if your GPU runs out of memory
NUM_EPOCHS = 35
WEIGHTS_FINAL = 'model-transfer-Chest-MobileNet-000001--final.h5'
train_datagen = ImageDataGenerator( rescale=1.0 / 255.0,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
channel_shift_range=10,
horizontal_flip=True,
fill_mode='nearest')
train_batches = train_datagen.flow_from_directory(DATASET_PATH + '/train',
target_size=IMAGE_SIZE,
interpolation='bicubic',
class_mode='categorical',
shuffle=True,
batch_size=BATCH_SIZE)
valid_datagen = ImageDataGenerator(rescale=1.0/255.0)
valid_batches = valid_datagen.flow_from_directory(DATASET_PATH + '/test',
target_size=IMAGE_SIZE,
interpolation='bicubic',
class_mode='categorical',
shuffle=False,
batch_size=BATCH_SIZE)
lrelu = lambda x: tensorflow.keras.activations.relu(x, alpha=0.1)
# Load VGG16 model architecture with the ImageNet weights
model = applications.VGG16(weights = "imagenet", include_top=False, input_shape=[150,150,3])
# Freeze the layers which you don't want to train. Here I am freezing the first 5 layers.
for layer in model.layers[:14]:
layer.trainable = False
# Build classifier
x = model.output
x = Flatten()(x)
x = Dense(32, activation="sigmoid")(x)
predictions = Dense(2, activation="softmax")(x)
#Use Adam optimizer (instead of plain SGD), set learning rate to explore.
adam = Adam(lr=.00001)
#instantiate model
model = Model(input=model.input, output=predictions)
#Compile model
model.compile(optimizer = adam, loss='categorical_crossentropy', metrics=['accuracy'])
#Print layers for resulting model
model.summary()
#Log training data into csv file
csv_logger = CSVLogger(filename="vgg16-imagenet-log.csv")
checkpointer = ModelCheckpoint(filepath='MobileNet/000001//weights.{epoch:02d}-{val_acc:.2f}.hdf5',monitor='val_loss', verbose=1, save_best_only=True, mode='min')
cblist = [csv_logger, checkpointer]
# train the model
model.fit_generator(train_batches,
steps_per_epoch = train_batches.samples // BATCH_SIZE,
validation_data = valid_batches,
validation_steps = valid_batches.samples // BATCH_SIZE,
epochs = NUM_EPOCHS,
callbacks=cblist)
# save trained model and weights
model.save(WEIGHTS_FINAL)
| 58.910891
| 271
| 0.381849
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,809
| 0.472101
|
fba2be86b846eae4b6b694478e685649917f0dba
| 7,761
|
py
|
Python
|
mvpa2/tests/test_procrust.py
|
mortonne/PyMVPA
|
98644c5cd9733edd39fac746ea7cf67398674645
|
[
"MIT"
] | null | null | null |
mvpa2/tests/test_procrust.py
|
mortonne/PyMVPA
|
98644c5cd9733edd39fac746ea7cf67398674645
|
[
"MIT"
] | null | null | null |
mvpa2/tests/test_procrust.py
|
mortonne/PyMVPA
|
98644c5cd9733edd39fac746ea7cf67398674645
|
[
"MIT"
] | null | null | null |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Unit tests for PyMVPA Procrustean mapper"""
import unittest
import numpy as np
import itertools
from numpy.linalg import norm
from mvpa2.base import externals
from mvpa2.datasets.base import dataset_wizard
from mvpa2.testing import *
from mvpa2.testing.datasets import *
from mvpa2.mappers.procrustean import ProcrusteanMapper
svds = ["numpy"]
if externals.exists("liblapack.so"):
svds += ["dgesvd"]
if externals.exists("scipy"):
svds += ["scipy"]
class ProcrusteanMapperTests(unittest.TestCase):
@sweepargs(oblique=(False, True))
@sweepargs(svd=svds)
@reseed_rng()
def test_simple(self, svd, oblique):
d_orig = datasets["uni2large"].samples
d_orig2 = datasets["uni4large"].samples
for sdim, nf_s, nf_t, full_test in (
("Same 2D", 2, 2, True),
("Same 10D", 10, 10, True),
("2D -> 3D", 2, 3, True),
("3D -> 2D", 3, 2, False),
):
# figure out some "random" rotation
d = max(nf_s, nf_t)
R = get_random_rotation(nf_s, nf_t, d_orig)
if nf_s == nf_t:
adR = np.abs(1.0 - np.linalg.det(R))
self.assertTrue(
adR < 1e-10,
"Determinant of rotation matrix should " "be 1. Got it 1+%g" % adR,
)
self.assertTrue(norm(np.dot(R, R.T) - np.eye(R.shape[0])) < 1e-10)
for (s, scaling), demean in itertools.product(
((0.3, True), (1.0, False)), (False, True)
):
pm = ProcrusteanMapper(
scaling=scaling, oblique=oblique, svd=svd, demean=demean
)
# pm2 = ProcrusteanMapper(scaling=scaling, oblique=oblique)
if demean:
t1, t2 = d_orig[23, 1], d_orig[22, 1]
else:
t1, t2 = 0, 0
full_test = False # although runs, not intended to perform properly
# Create source/target data
d = d_orig[:, :nf_s]
d_s = d + t1
d_t = np.dot(s * d, R) + t2
# train bloody mapper(s)
ds = dataset_wizard(samples=d_s, targets=d_t)
pm.train(ds)
## not possible with new interface
# pm2.train(d_s, d_t)
## verify that both created the same transformation
# npm2proj = norm(pm.proj - pm2.proj)
# self.assertTrue(npm2proj <= 1e-10,
# msg="Got transformation different by norm %g."
# " Had to be less than 1e-10" % npm2proj)
# self.assertTrue(norm(pm._offset_in - pm2._offset_in) <= 1e-10)
# self.assertTrue(norm(pm._offset_out - pm2._offset_out) <= 1e-10)
# do forward transformation on the same source data
d_s_f = pm.forward(d_s)
self.assertEqual(
d_s_f.shape,
d_t.shape,
msg="Mapped shape should be identical to the d_t",
)
dsf = d_s_f - d_t
ndsf = norm(dsf) / norm(d_t)
if full_test:
dsR = norm(s * R - pm.proj)
if not oblique:
self.assertTrue(
dsR <= 1e-12,
msg="We should have got reconstructed rotation+scaling "
"perfectly. Now got d scale*R=%g" % dsR,
)
self.assertTrue(
np.abs(s - pm._scale) < 1e-12,
msg="We should have got reconstructed scale "
"perfectly. Now got %g for %g" % (pm._scale, s),
)
self.assertTrue(
ndsf <= 1e-12,
msg="%s: Failed to get to the target space correctly."
" normed error=%g" % (sdim, ndsf),
)
# Test if we get back
d_s_f_r = pm.reverse(d_s_f)
# Test if recon proj is true inverse except for high->low projection
if nf_s <= nf_t:
assert_almost_equal(
np.dot(pm._proj, pm._recon),
np.eye(pm._proj.shape[0]),
err_msg="Deviation from identity matrix is too large",
)
dsfr = d_s_f_r - d_s
ndsfr = norm(dsfr) / norm(d_s)
if full_test:
self.assertTrue(
ndsfr <= 1e-12,
msg="%s: Failed to reconstruct into source space correctly."
" normed error=%g" % (sdim, ndsfr),
)
@reseed_rng()
def test_reflection(self, rep=10):
for i in range(rep):
from mvpa2.testing.datasets import get_random_rotation
d = np.random.random((100, 2))
T = get_random_rotation(d.shape[1])
d2 = np.dot(d, T)
# scale it up a bit
d2 *= 1.2
# add a reflection by flipping the first dimension
d2[:, 0] *= -1
ds = dataset_wizard(samples=d, targets=d2)
norm0 = np.linalg.norm(d - d2)
mapper = ProcrusteanMapper(scaling=False, reflection=False)
mapper.train(ds)
norm1 = np.linalg.norm(d2 - mapper.forward(ds).samples)
eps = 1e-7
self.assertLess(
norm1,
norm0 + eps,
msg="Procrustes should reduce difference, "
"but %f > %f" % (norm1, norm0),
)
mapper = ProcrusteanMapper(scaling=True, reflection=False)
mapper.train(ds)
norm2 = np.linalg.norm(d2 - mapper.forward(ds).samples)
self.assertLess(
norm2,
norm1 + eps,
msg="Procrustes with scaling should work better, "
"but %f > %f" % (norm2, norm1),
)
mapper = ProcrusteanMapper(scaling=False, reflection=True)
mapper.train(ds)
norm3 = np.linalg.norm(d2 - mapper.forward(ds).samples)
self.assertLess(
norm3,
norm1 + eps,
msg="Procrustes with reflection should work better, "
"but %f > %f" % (norm3, norm1),
)
mapper = ProcrusteanMapper(scaling=True, reflection=True)
mapper.train(ds)
norm4 = np.linalg.norm(d2 - mapper.forward(ds).samples)
self.assertLess(
norm4,
norm3 + eps,
msg="Procrustes with scaling should work better, "
"but %f > %f" % (norm4, norm3),
)
self.assertLess(
norm4,
norm2 + eps,
msg="Procrustes with reflection should work better, "
"but %f > %f" % (norm4, norm2),
)
def suite(): # pragma: no cover
return unittest.makeSuite(ProcrusteanMapperTests)
if __name__ == "__main__": # pragma: no cover
from . import runner
runner.run()
| 37.492754
| 88
| 0.47365
| 6,735
| 0.867801
| 0
| 0
| 6,676
| 0.860198
| 0
| 0
| 2,141
| 0.275867
|
fba413cbbac04e4578ce84a8676b8bf632b9cb46
| 431
|
py
|
Python
|
configs/production.py
|
syz247179876/Flask-Sports
|
ed2d21c5a6172e7b6f3fc479bd5114fdb171896d
|
[
"Apache-2.0"
] | 2
|
2020-12-02T14:20:44.000Z
|
2020-12-08T15:36:51.000Z
|
configs/production.py
|
syz247179876/Flask-Sports
|
ed2d21c5a6172e7b6f3fc479bd5114fdb171896d
|
[
"Apache-2.0"
] | 1
|
2020-12-05T13:44:14.000Z
|
2020-12-05T13:44:14.000Z
|
configs/production.py
|
syz247179876/Flask-Sports
|
ed2d21c5a6172e7b6f3fc479bd5114fdb171896d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2020/12/1 下午11:24
# @Author : 司云中
# @File : production.py
# @Software: Pycharm
from configs.default import DefaultConfig
class ProductionConfig(DefaultConfig):
"""the config of production env"""
DEBUG = False
TESTING = False
MONGODB_DB = ''
MONGODB_HOST = ''
MONGODB_PORT = ''
MONGODB_USERNAME = ''
MONGODB_PASSWORD = ''
production_config = ProductionConfig()
| 20.52381
| 41
| 0.656613
| 231
| 0.52381
| 0
| 0
| 0
| 0
| 0
| 0
| 163
| 0.369615
|
fba528d6b9c993f8745f860d915d34b0353a4f4d
| 426
|
py
|
Python
|
glider/test/test_gliderRadio.py
|
ezeakeal/glider_drone
|
f0d5bb973d38245351a0fe1f4833827d94d0b0e4
|
[
"Apache-2.0"
] | null | null | null |
glider/test/test_gliderRadio.py
|
ezeakeal/glider_drone
|
f0d5bb973d38245351a0fe1f4833827d94d0b0e4
|
[
"Apache-2.0"
] | null | null | null |
glider/test/test_gliderRadio.py
|
ezeakeal/glider_drone
|
f0d5bb973d38245351a0fe1f4833827d94d0b0e4
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
from glider.modules.glider_radio import GliderRadio
class TestGliderRadio(TestCase):
def setUp(self):
self.radio = GliderRadio(self.test_callback)
self.radio.start()
def tearDown(self):
self.radio.stop()
def test_callback(self, msgdict):
print("Received message: %s" % msgdict)
def test_send_data(self):
self.radio.send_data(["test"])
| 23.666667
| 52
| 0.678404
| 341
| 0.800469
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 0.065728
|
fba67a228cffbfee38985da067132482c7b8a08a
| 1,052
|
py
|
Python
|
apps/warframes/migrations/0001_initial.py
|
tufbel/wFocus
|
ee0f02053b8a5bc9c40dd862306fc5df1a063b9d
|
[
"Apache-2.0"
] | null | null | null |
apps/warframes/migrations/0001_initial.py
|
tufbel/wFocus
|
ee0f02053b8a5bc9c40dd862306fc5df1a063b9d
|
[
"Apache-2.0"
] | 11
|
2020-06-06T01:51:51.000Z
|
2022-02-10T14:31:21.000Z
|
apps/warframes/migrations/0001_initial.py
|
tufbel/wFocus
|
ee0f02053b8a5bc9c40dd862306fc5df1a063b9d
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2.7 on 2019-12-15 12:15
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Warframe',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100, unique=True)),
('health', models.IntegerField()),
('shield', models.IntegerField()),
('armor', models.IntegerField()),
('power', models.IntegerField()),
('sprint_speed', models.FloatField()),
('power_strength', models.FloatField(default=1)),
('power_duration', models.FloatField(default=1)),
('power_range', models.FloatField(default=1)),
('power_efficiency', models.FloatField(default=1)),
('img', models.URLField(max_length=255, null=True)),
],
),
]
| 32.875
| 76
| 0.543726
| 959
| 0.911597
| 0
| 0
| 0
| 0
| 0
| 0
| 179
| 0.170152
|
fba6946e547a329b3ac4d404e2ef31baf20b094f
| 5,290
|
py
|
Python
|
pxr/base/tf/testenv/testTfStringUtils.py
|
DougRogers-DigitalFish/USD
|
d8a405a1344480f859f025c4f97085143efacb53
|
[
"BSD-2-Clause"
] | 3,680
|
2016-07-26T18:28:11.000Z
|
2022-03-31T09:55:05.000Z
|
pxr/base/tf/testenv/testTfStringUtils.py
|
DougRogers-DigitalFish/USD
|
d8a405a1344480f859f025c4f97085143efacb53
|
[
"BSD-2-Clause"
] | 1,759
|
2016-07-26T19:19:59.000Z
|
2022-03-31T21:24:00.000Z
|
pxr/base/tf/testenv/testTfStringUtils.py
|
DougRogers-DigitalFish/USD
|
d8a405a1344480f859f025c4f97085143efacb53
|
[
"BSD-2-Clause"
] | 904
|
2016-07-26T18:33:40.000Z
|
2022-03-31T09:55:16.000Z
|
#!/pxrpythonsubst
#
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
from pxr import Tf
import logging
import unittest
class TestStringUtils(unittest.TestCase):
"""
Test Tf String Utils (The python wrapped porting of the utility functions).
"""
def setUp(self):
self.log = logging.getLogger()
def test_StringSplit(self):
"""Testing StringSplit() function. This function is supposed to behave
like the split method on python string objects."""
self.log.info("Testing string split cases")
self.assertEqual([], Tf.StringSplit("",""))
self.assertEqual([], Tf.StringSplit("abcd",""))
self.assertEqual([], Tf.StringSplit("","ccc"))
s = "abcd"
self.assertEqual(s.split("a"), Tf.StringSplit(s, "a"))
self.assertEqual(s.split("b"), Tf.StringSplit(s, "b"))
self.assertEqual(s.split("c"), Tf.StringSplit(s, "c"))
self.assertEqual(s.split("d"), Tf.StringSplit(s, "d"))
self.assertEqual(s.split("abcd"), Tf.StringSplit(s, "abcd"))
self.assertEqual(s.split("ab"), Tf.StringSplit(s, "ab"))
s = "a:+b:+c:+d"
self.assertEqual(s.split(":+"), Tf.StringSplit(s, ":+"))
s = "a:+b:+c:d"
self.assertEqual(s.split(":+"), Tf.StringSplit(s, ":+"))
def test_Unicode(self):
"""Testing that we can pass python unicode objects to wrapped
functions expecting std::string"""
self.log.info("Testing unicode calls")
self.assertEqual(Tf.StringSplit('123', '2'), ['1', '3'])
self.assertEqual(Tf.StringSplit('123', u'2'), ['1', '3'])
self.assertEqual(Tf.StringSplit(u'123', '2'), ['1', '3'])
self.assertEqual(Tf.StringSplit(u'123', u'2'), ['1', '3'])
self.assertEqual(Tf.DictionaryStrcmp('apple', 'banana'), -1)
self.assertEqual(Tf.DictionaryStrcmp('apple', u'banana'), -1)
self.assertEqual(Tf.DictionaryStrcmp(u'apple', 'banana'), -1)
self.assertEqual(Tf.DictionaryStrcmp(u'apple', u'banana'), -1)
def test_StringToLong(self):
def checks(val):
self.assertEqual(Tf.StringToLong(repr(val)), val)
def checku(val):
self.assertEqual(Tf.StringToULong(repr(val)), val)
# A range of valid values.
for i in range(1000000):
checku(i)
for i in range(-500000, 500000):
checks(i)
# A wider range of valid values.
for i in range(0, 1000000000, 9337):
checks(i)
for i in range(-500000000, 500000000, 9337):
checks(i)
# Get the max/min values.
ulmax, lmax, lmin = (
Tf._GetULongMax(), Tf._GetLongMax(), Tf._GetLongMin())
# Check the extrema and one before to ensure they work.
for n in [ulmax-1, ulmax]:
checku(n)
for n in [lmin, lmin+1, lmax-1, lmax]:
checks(n)
# Check that some beyond the extrema over/underflow.
#
# Unsigned overflow.
for i in range(1, 1000):
with self.assertRaises(ValueError):
checku(ulmax + i)
with self.assertRaises(ValueError):
checks(lmax + i)
with self.assertRaises(ValueError):
checks(lmin - i)
def test_Identifiers(self):
self.assertFalse(Tf.IsValidIdentifier(''))
self.assertTrue(Tf.IsValidIdentifier('hello9'))
self.assertFalse(Tf.IsValidIdentifier('9hello'))
self.assertTrue(Tf.IsValidIdentifier('hello_world'))
self.assertTrue(Tf.IsValidIdentifier('HELLO_WORLD'))
self.assertTrue(Tf.IsValidIdentifier('hello_world_1234'))
self.assertFalse(Tf.IsValidIdentifier('hello_#world#_1234'))
self.assertFalse(Tf.IsValidIdentifier('h e l l o'))
self.assertEqual(Tf.MakeValidIdentifier(''), '_')
self.assertEqual(Tf.MakeValidIdentifier('hello9'), 'hello9')
self.assertEqual(Tf.MakeValidIdentifier('9hello'), '_hello')
self.assertEqual(
Tf.MakeValidIdentifier('hello_#world#_1234'), 'hello__world__1234')
self.assertFalse(Tf.IsValidIdentifier('h e l l o'), 'h_e_l_l_o')
self.assertFalse(Tf.IsValidIdentifier('!@#$%'), '_____')
if __name__ == '__main__':
unittest.main()
| 38.613139
| 79
| 0.633648
| 4,111
| 0.777127
| 0
| 0
| 0
| 0
| 0
| 0
| 2,087
| 0.394518
|
fba6b995d133300dd22ec22078918d89b609c5b5
| 4,300
|
py
|
Python
|
oui/return_arp.py
|
sukhjinderpalsingh/ansible
|
07669bfc1e072af670f32a6ba037513c470caf8d
|
[
"Unlicense"
] | 4
|
2019-04-17T13:16:58.000Z
|
2020-05-05T23:07:35.000Z
|
oui/return_arp.py
|
sukhjinderpalsingh/ansible
|
07669bfc1e072af670f32a6ba037513c470caf8d
|
[
"Unlicense"
] | null | null | null |
oui/return_arp.py
|
sukhjinderpalsingh/ansible
|
07669bfc1e072af670f32a6ba037513c470caf8d
|
[
"Unlicense"
] | 1
|
2019-05-23T17:24:16.000Z
|
2019-05-23T17:24:16.000Z
|
#!/usr/bin/python
import subprocess
import sys
import cgi
import datetime
import re
import requests
validMac = False
ERROR = False
form = cgi.FieldStorage()
user = "READONLY_USER_HERE"
pwd = "PASSWORD"
OUI = form.getvalue('OUI')
host = form.getvalue('HOST')
def formatOUI(OUI):
ot=OUI[0:2]
tf=OUI[2:4]
fs=OUI[5:7]
fmac = ot+":"+tf+":"+fs+":00:00:00"
return fmac
fOUI = formatOUI(OUI)
webCmd = "show ip arp | i {}".format(OUI[0:7])
def printHeader():
print "Content-type: text/html"
print ""
print "<html><head>"
print "<title>OUI Finder</title></head><body>"
print "<br />Time run: " + str(datetime.datetime.now()) + "<br>"
def checkInput():
pattern = re.compile('[a-fA-F0-9]{4}.[a-fA-F0-9]{2}')
if re.match(pattern,OUI[0:7]):
return True
else:
return False
def sanitize(outp):
item=[]
outp = outp.split('# STATS ')[0]
outp = outp.split(' * ')
del outp[0]
print "<BR>"
item = []
for i in outp:
entry = []
i = i.replace('changed=False','')
if "Internet" not in i:
entry.append(i.split(' ')[0])
else:
entry.append(i.split(' ')[0])
i = i.split(' Internet ')
del i[0]
for j in i:
j = j.split(' ')
j = [k for k in j if k]
del j[1]
del j[2]
entry.append(j)
item.append(entry)
return item
def displaySanitized(hosts):
totHosts = 0
for i in hosts:
if len(i)>1:
totHosts+=(len(i)-1)
print "<CENTER>"
print "Number of hosts found: " + str(totHosts)
print "<TABLE border='1' cellpadding='10'> "
for item in hosts:
if len(item) == 1:
print "<TR><TH colspan='3'>"
print item[0]
print "</TH></TR>"
print "<TR><TH>IP</TH><TH>MAC</TH><TH>VLAN</TH>"
print "<TR><TD colspan='3'>No hosts found</TD></TR>"
else:
print "<TR><TH colspan='3'>"
print item[0]
print "</TH></TR>"
print "<TR><TH>IP</TH><TH>MAC</TH><TH>VLAN</TH>"
for i in range(1,len(item)):
print "<TR><TD>"
print item[i][0]
print "</TD><TD>"
print item[i][1]
print "</TD><TD>"
print item[i][2]
print "</TD></TR>"
print "</TABLE>"
def executeCmd(host):
cmd = """ansible-playbook /ansible/plays/show_cmd.yml --limit '"""+host+"""' -e 'user="{0}" pass="{1}" cmd="{2}"' | sed 's/\\\\n/\\n/g'""".format(user,pwd,webCmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
outp = str(p.communicate()[0])
if 'Authentication failed.' in outp:
print "<CENTER><H1>***ERROR!***<br>Authentication failed.</H1><h3>Check credentials</h3></CENTER>"
displaySanitized(sanitize(outp))
def lookup(OUI):
MAC_URL = 'http://macvendors.co/api/%s'
r = requests.get(MAC_URL % OUI)
print "<CENTER><h3>Vendor Name: "+(r.json()['result']['company'])+"</h3></CENTER>"
printHeader()
validMac = checkInput()
if validMac == False:
print "<CENTER><h3>{} OUI not formatted correctly, please use xxxx.xx (Cisco format).</h3></CENTER>".format(OUI)
else:
try:
lookup(fOUI)
except:
ERROR = True
print "<CENTER>OUI not found in database!<br>Check and try again</CENTER>"
if ERROR == False:
executeCmd(host)
| 33.858268
| 172
| 0.428837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,121
| 0.260698
|
fba8bbf32782335bea4f2efd30632d40070c98c8
| 1,801
|
py
|
Python
|
scripts/propogate_elab_labels.py
|
dmort27/elab-order
|
5fdca996eea8ab5c6520f9ba565f2fc2cf3e9d0a
|
[
"MIT"
] | 1
|
2021-09-22T00:28:54.000Z
|
2021-09-22T00:28:54.000Z
|
scripts/propogate_elab_labels.py
|
dmort27/elab-order
|
5fdca996eea8ab5c6520f9ba565f2fc2cf3e9d0a
|
[
"MIT"
] | null | null | null |
scripts/propogate_elab_labels.py
|
dmort27/elab-order
|
5fdca996eea8ab5c6520f9ba565f2fc2cf3e9d0a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import csv
import glob
import os.path
from collections import deque
from tqdm import tqdm
def read_csv(fnin):
with open(fnin) as f:
return {tuple(e) for e in csv.reader(f)}
def write_tagged_span(fout, buffer, count):
b_token = buffer.popleft()
print(f'{b_token}\tB', file=fout)
for i_token in buffer:
print(f'{i_token}\tI', file=fout)
buffer.clear()
def write_outside_token(fout, token):
print(f'{token}\tO', file=fout)
def tag_file(fn, fnout, elabs):
buffer = deque()
count =0
with open(fn) as fin, open(fnout, 'w') as fout:
for line in fin:
token = line.strip().split()
assert type(token) is list
token = "" if not token else token[0]
buffer.append(token)
if len(buffer) > 4:
token = buffer.popleft()
if token:
write_outside_token(fout, token)
else:
print('', file=fout)
if tuple(buffer) in elabs:
write_tagged_span(fout, buffer, count)
count += 1
for token in buffer:
if token:
write_outside_token(fout, token)
else:
print('', file=fout)
def main(elabs_filename, input_dir, output_dir):
elabs = read_csv(elabs_filename)
input_filenames = glob.glob(os.path.join(input_dir, '*.conll'))
for fn in tqdm(input_filenames):
root, ext = os.path.splitext(os.path.basename(fn))
fnout = os.path.join(output_dir, root + '.conll')
tag_file(fn, fnout, elabs)
if __name__ == '__main__':
main('../data/hmong/extracted_elabs/elabs_extracted.csv',
'../data/hmong/sch_corpus2_conll',
'../data/hmong/sch_corpus2_elab')
| 30.525424
| 67
| 0.583009
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 217
| 0.120489
|
fba8db75cec306c3be997ed165eb4fd61c2a754f
| 1,691
|
py
|
Python
|
python/ml_preproc/pipeline/beam_classes/parse_csv.py
|
gmogr/dataflow-production-ready
|
838dc45d0a01db184ce3f88728303d8ed69361f3
|
[
"Apache-2.0"
] | 1
|
2021-01-26T16:58:20.000Z
|
2021-01-26T16:58:20.000Z
|
python/ml_preproc/pipeline/beam_classes/parse_csv.py
|
gmogr/dataflow-production-ready
|
838dc45d0a01db184ce3f88728303d8ed69361f3
|
[
"Apache-2.0"
] | null | null | null |
python/ml_preproc/pipeline/beam_classes/parse_csv.py
|
gmogr/dataflow-production-ready
|
838dc45d0a01db184ce3f88728303d8ed69361f3
|
[
"Apache-2.0"
] | 1
|
2021-04-15T09:18:27.000Z
|
2021-04-15T09:18:27.000Z
|
# Copyright 2020 Google LLC.
# This software is provided as-is, without warranty or representation for any use or purpose.
# Your use of it is subject to your agreement with Google.
from apache_beam import DoFn, pvalue
from apache_beam.metrics import Metrics
from ..model import data_classes
from ..model.data_classes import Record
class ParseCSVDoFn(DoFn):
CORRECT_OUTPUT_TAG = 'accommodations'
WRONG_OUTPUT_TAG = 'parse_errors'
def __init__(self, header_line: str):
""" Parse the CSV data and create a PCollection of Accommodation.
Args:
header_line: The header line used in the CSV line, it will be ignored by the parser.
"""
self._header_line = header_line
# Metrics to report the number of records
self.input_records_counter = Metrics.counter("ParseCSVDoFn", 'input_records')
self.correct_records_counter = Metrics.counter("ParseCSVDoFn", 'correct_records')
self.wrong_records_counter = Metrics.counter("ParseCSVDoFn", 'wrong_records')
def process(self, element: str):
self.input_records_counter.inc()
# We have two outputs: one for well formed input lines, and another one with potential parsing errors
# (the parsing error output will be written to a different BigQuery table)
try:
# ignore header row
if element != self._header_line:
record: Record = data_classes.line2record(element)
self.correct_records_counter.inc()
yield pvalue.TaggedOutput(ParseCSVDoFn.CORRECT_OUTPUT_TAG, record)
except TypeError as err:
self.wrong_records_counter.inc()
msg = str(err)
yield pvalue.TaggedOutput(ParseCSVDoFn.WRONG_OUTPUT_TAG, {'error': msg, 'line': element})
| 39.325581
| 105
| 0.738025
| 1,353
| 0.800118
| 692
| 0.409225
| 0
| 0
| 0
| 0
| 726
| 0.429332
|
fba917b2c0de9b130231e038b70145fe9679fe7d
| 2,285
|
py
|
Python
|
src/sentry/integrations/pagerduty/client.py
|
pombredanne/django-sentry
|
4ad09417fb3cfa3aa4a0d4175ae49fe02837c567
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/integrations/pagerduty/client.py
|
pombredanne/django-sentry
|
4ad09417fb3cfa3aa4a0d4175ae49fe02837c567
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/integrations/pagerduty/client.py
|
pombredanne/django-sentry
|
4ad09417fb3cfa3aa4a0d4175ae49fe02837c567
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from sentry.integrations.client import ApiClient
from sentry.models import EventCommon
from sentry.api.serializers import serialize, ExternalEventSerializer
LEVEL_SEVERITY_MAP = {
"debug": "info",
"info": "info",
"warning": "warning",
"error": "error",
"fatal": "critical",
}
class PagerDutyClient(ApiClient):
allow_redirects = False
integration_name = "pagerduty"
base_url = "https://events.pagerduty.com/v2/enqueue"
def __init__(self, integration_key):
self.integration_key = integration_key
super(PagerDutyClient, self).__init__()
def request(self, method, path, headers=None, data=None, params=None):
if not headers:
headers = {"Content-Type": "application/json"}
return self._request(method, path, headers=headers, data=data, params=params)
def send_trigger(self, data):
# expected payload: https://v2.developer.pagerduty.com/docs/send-an-event-events-api-v2
# for now, only construct the payload if data is an event
if isinstance(data, EventCommon):
source = data.transaction or data.culprit or "<unknown>"
group = data.group
level = data.get_tag("level") or "error"
custom_details = serialize(data, None, ExternalEventSerializer())
payload = {
"routing_key": self.integration_key,
"event_action": "trigger",
"dedup_key": group.qualified_short_id,
"payload": {
"summary": data.message or data.title,
"severity": LEVEL_SEVERITY_MAP[level],
"source": source,
"component": group.project.slug,
"custom_details": custom_details,
},
"links": [
{
"href": group.get_absolute_url(
params={"referrer": "pagerduty_integration"}
),
"text": "Issue Details",
}
],
}
return self.post("/", data=payload)
def send_acknowledge(self, data):
pass
def send_resolve(self, data):
pass
| 34.621212
| 95
| 0.569365
| 1,945
| 0.851204
| 0
| 0
| 0
| 0
| 0
| 0
| 507
| 0.221882
|
fbaa2cc659c7ec0bddf4650c7e382079513809ba
| 3,192
|
py
|
Python
|
darling_ansible/python_venv/lib/python3.7/site-packages/oci/monitoring/models/failed_metric_record.py
|
revnav/sandbox
|
f9c8422233d093b76821686b6c249417502cf61d
|
[
"Apache-2.0"
] | null | null | null |
darling_ansible/python_venv/lib/python3.7/site-packages/oci/monitoring/models/failed_metric_record.py
|
revnav/sandbox
|
f9c8422233d093b76821686b6c249417502cf61d
|
[
"Apache-2.0"
] | null | null | null |
darling_ansible/python_venv/lib/python3.7/site-packages/oci/monitoring/models/failed_metric_record.py
|
revnav/sandbox
|
f9c8422233d093b76821686b6c249417502cf61d
|
[
"Apache-2.0"
] | 1
|
2020-06-25T03:12:58.000Z
|
2020-06-25T03:12:58.000Z
|
# coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class FailedMetricRecord(object):
"""
The record of a single metric object that failed input validation and the reason for the failure.
"""
def __init__(self, **kwargs):
"""
Initializes a new FailedMetricRecord object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param message:
The value to assign to the message property of this FailedMetricRecord.
:type message: str
:param metric_data:
The value to assign to the metric_data property of this FailedMetricRecord.
:type metric_data: MetricDataDetails
"""
self.swagger_types = {
'message': 'str',
'metric_data': 'MetricDataDetails'
}
self.attribute_map = {
'message': 'message',
'metric_data': 'metricData'
}
self._message = None
self._metric_data = None
@property
def message(self):
"""
**[Required]** Gets the message of this FailedMetricRecord.
An error message indicating the reason that the indicated metric object failed input validation.
:return: The message of this FailedMetricRecord.
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""
Sets the message of this FailedMetricRecord.
An error message indicating the reason that the indicated metric object failed input validation.
:param message: The message of this FailedMetricRecord.
:type: str
"""
self._message = message
@property
def metric_data(self):
"""
**[Required]** Gets the metric_data of this FailedMetricRecord.
Identifier of a metric object that failed input validation.
:return: The metric_data of this FailedMetricRecord.
:rtype: MetricDataDetails
"""
return self._metric_data
@metric_data.setter
def metric_data(self, metric_data):
"""
Sets the metric_data of this FailedMetricRecord.
Identifier of a metric object that failed input validation.
:param metric_data: The metric_data of this FailedMetricRecord.
:type: MetricDataDetails
"""
self._metric_data = metric_data
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 31.294118
| 245
| 0.658835
| 2,652
| 0.830827
| 0
| 0
| 2,682
| 0.840226
| 0
| 0
| 2,087
| 0.653822
|
fbab99a6802fc429bbfb9a29c754e3ca6d940978
| 3,352
|
py
|
Python
|
PyBlend/pyblend_prism.py
|
nfb2021/PrismPyTrace
|
d4f28cd4156b5543abc3b5634383a81b0663d28e
|
[
"MIT"
] | null | null | null |
PyBlend/pyblend_prism.py
|
nfb2021/PrismPyTrace
|
d4f28cd4156b5543abc3b5634383a81b0663d28e
|
[
"MIT"
] | null | null | null |
PyBlend/pyblend_prism.py
|
nfb2021/PrismPyTrace
|
d4f28cd4156b5543abc3b5634383a81b0663d28e
|
[
"MIT"
] | null | null | null |
import bpy
import numpy as np
import math
import mathutils
import time
import os
class Prism:
""" ^"""
""" / \\"""
""" / ^ \\"""
""" / | \\"""
""" /'alpha'\\ <-- lenght of this side is calculated based on 'width' and 'alpha'"""
"""/ \\"""
"""----------- """
""" ^"""
""" |"""
"""This side is defined via 'width',"""
"""parallel to z-axis of Sigray defined"""
"""The angle opposite to this side is 'alpha'"""
"""'height' defines the distance between the two triangular sides of the prism"""
def __init__(self, width, height, alpha):
self.width = width
self.height = height
self.alpha = math.radians(alpha)
def clear_scene(self):
"""This function clears the whole scene and all objects contained in it"""
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete(use_global=False)
def define_prism(self, loc = (0, 0, 0), angle = None, base_width = None):
"""The default location assigned is (0, 0, 0). Using the 'update_coordinates'-function allows for reassignment of coordinates"""
x, y, z = loc
name = "prism"
meshes = bpy.data.meshes
if angle == None:
angle = self.alpha
else:
angle = math.radians(angle)
if base_width == None:
base_width = self.width
else:
base_width = base_width
points = [ [x, y, z], [x + base_width, y, z], [x + (base_width / 2), y + (base_width / (2 * np.tan(angle / 2))), z],
[x, y, z + self.height], [x + base_width, y, z + self.height], [x + (base_width / 2), y + (base_width / (2 * np.tan(angle / 2))), z + self.height] ]
faces = [ [4,5,2],[1,0,3],[2,5,3],[4,3,5],[1,2,0],[1,4,2],[4,1,3],[0,2,3] ]
shape_vertices = []
for p in points:
print(p)
shape_vertices.append ( mathutils.Vector((p[0],p[1],p[2])) )
new_mesh = bpy.data.meshes.new ( name + "_mesh" )
new_mesh.from_pydata ( shape_vertices, [], faces )
new_mesh.update()
new_obj = bpy.data.objects.new ( name, new_mesh )
return new_obj
def link_prism(self, object):
"""Any created object in Blender needs to be linked to the scene, in order to be displayed"""
bpy.context.collection.objects.link(object)
def update_coordinates(self, new_location):
"""This function allows for reassignment of coordinates"""
return self.define_prism(loc = new_location)
def update_alpha(self, new_alpha):
"""This function allows for reassignment of the angle alpha"""
return self.define_prism(angle = new_alpha)
def update_width(self, new_width):
"""This function allows for reassignment of the width of the prism"""
return self.define_prism(base_width = new_width)
def make_array(self, x, y, no_of_prisms, separation):
for p in range(no_of_prisms):
if p == 0:
self.link_prism(self.update_coordinates((x, y, 0)))
else:
self.link_prism(self.update_coordinates( (p * (self.width + separation) + x, y, 0)))
| 34.556701
| 171
| 0.545346
| 3,251
| 0.969869
| 0
| 0
| 0
| 0
| 0
| 0
| 923
| 0.275358
|
fbac3a021640dbdfd78f68fea5a2c6021008a044
| 88
|
py
|
Python
|
Source/RainyDay_utilities_Py3/__init__.py
|
Dewberry/RainyDay2
|
ed3206b1d81ca4ffded4ed79bf156e4b8d87d143
|
[
"MIT"
] | 12
|
2019-03-24T02:59:51.000Z
|
2021-11-05T07:45:08.000Z
|
Source/RainyDay_utilities_Py3/__init__.py
|
Dewberry/RainyDay2
|
ed3206b1d81ca4ffded4ed79bf156e4b8d87d143
|
[
"MIT"
] | null | null | null |
Source/RainyDay_utilities_Py3/__init__.py
|
Dewberry/RainyDay2
|
ed3206b1d81ca4ffded4ed79bf156e4b8d87d143
|
[
"MIT"
] | 13
|
2017-08-10T17:18:16.000Z
|
2022-02-10T00:08:47.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 6 17:38:00 2015
@author: dbwrigh3
"""
| 11
| 35
| 0.568182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 85
| 0.965909
|
fbad3dd268d46eacf42426eb3b88f2a9c9f71d9f
| 526
|
py
|
Python
|
setup.py
|
Lewinta/ProcesosLab
|
223ddff1dd8d92403f9ded9f7a42b8f2fa8605f7
|
[
"MIT"
] | null | null | null |
setup.py
|
Lewinta/ProcesosLab
|
223ddff1dd8d92403f9ded9f7a42b8f2fa8605f7
|
[
"MIT"
] | null | null | null |
setup.py
|
Lewinta/ProcesosLab
|
223ddff1dd8d92403f9ded9f7a42b8f2fa8605f7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
# get version from __version__ variable in proceso/__init__.py
from proceso import __version__ as version
setup(
name='proceso',
version=version,
description='A customization app for Proceso',
author='Lewin Villar',
author_email='lewinvillar@tzcode.tech',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| 25.047619
| 62
| 0.76616
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 188
| 0.357414
|
fbad48a7594776e5f25fe4470488384a1f723e04
| 3,650
|
py
|
Python
|
eye/widgets/misc.py
|
hydrargyrum/eye
|
b4a6994fee74b7a70d4f918bc3a29184fe8d5526
|
[
"WTFPL"
] | 12
|
2015-09-07T18:32:15.000Z
|
2021-02-21T17:29:15.000Z
|
eye/widgets/misc.py
|
hydrargyrum/eye
|
b4a6994fee74b7a70d4f918bc3a29184fe8d5526
|
[
"WTFPL"
] | 20
|
2016-08-01T19:24:43.000Z
|
2020-12-23T21:29:04.000Z
|
eye/widgets/misc.py
|
hydrargyrum/eye
|
b4a6994fee74b7a70d4f918bc3a29184fe8d5526
|
[
"WTFPL"
] | 1
|
2018-09-07T14:26:24.000Z
|
2018-09-07T14:26:24.000Z
|
# this project is licensed under the WTFPLv2, see COPYING.txt for details
import logging
from weakref import ref
from PyQt5.QtCore import QEventLoop
from PyQt5.QtWidgets import QPlainTextEdit, QLabel, QWidget, QRubberBand, QApplication
from ..app import qApp
from ..qt import Slot, Signal
from .helpers import WidgetMixin
__all__ = ('LogWidget', 'PositionIndicator', 'WidgetPicker', 'interactiveWidgetPick')
class LogWidget(QPlainTextEdit):
class LogHandler(logging.Handler):
def __init__(self, widget):
super(LogWidget.LogHandler, self).__init__()
self.widget = widget
self.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
def emit(self, record):
self.widget.appendPlainText(self.format(record))
def __init__(self, parent=None):
super(LogWidget, self).__init__(parent=parent)
self.handler = LogWidget.LogHandler(self)
self.setReadOnly(True)
def install(self):
qApp().logger.addHandler(self.handler)
def uninstall(self):
qApp().logger.removeHandler(self.handler)
class PositionIndicator(QLabel, WidgetMixin):
"""Widget indicating cursor position of currently focused editor
When cursor position changes or focus goes to another editor widget, the text of this label is refreshed.
"""
format = '{percent:3.0f}% {line:5d}:{vcol:3d}'
"""Text format of the label
Uses PEP-3101 string formatting. Usable keys are `line`, `col`, `percent`, `offset`, `path`, `title` and `editor`.
"""
def __init__(self, format=None, **kwargs):
super(PositionIndicator, self).__init__(**kwargs)
if format is not None:
self.format = format
self.lastFocus = lambda: None
qApp().focusChanged.connect(self.focusChanged)
@Slot('QWidget*', 'QWidget*')
def focusChanged(self, _, new):
if not hasattr(new, 'categories'):
return
if 'editor' not in new.categories():
return
if new.window() != self.window():
return
lastFocus = self.lastFocus()
if lastFocus:
lastFocus.cursorPositionChanged.disconnect(self.updateLabel)
lastFocus.linesChanged.disconnect(self.updateLabel)
self.lastFocus = ref(new)
new.cursorPositionChanged.connect(self.updateLabel)
new.linesChanged.connect(self.updateLabel)
self.updateLabel()
@Slot()
def updateLabel(self):
ed = self.lastFocus()
line, col = ed.getCursorPosition()
offset = ed.cursorOffset()
line, col = line + 1, col + 1
lines = ed.lines()
d = {
'line': line,
'col': col,
'vcol': ed.cursorVisualColumn() + 1,
'percent': line * 100. / lines,
'offset': offset,
'path': ed.path,
'title': ed.windowTitle(),
'editor': ed,
}
self.setText(self.format.format(**d))
class WidgetPicker(QWidget):
"""Widget for letting user point at another widget."""
selected = Signal()
def __init__(self):
super(WidgetPicker, self).__init__()
self.band = QRubberBand(QRubberBand.Rectangle)
self.setMouseTracking(True)
self.el = QEventLoop()
def mousePressEvent(self, ev):
self.el.quit()
self.widget = QApplication.widgetAt(ev.globalPos())
self.band.hide()
def mouseMoveEvent(self, ev):
widget = QApplication.widgetAt(ev.globalPos())
if widget:
rect = widget.frameGeometry()
if widget.parent():
rect.moveTo(widget.parent().mapToGlobal(rect.topLeft()))
self.band.setGeometry(rect)
self.band.show()
else:
self.band.hide()
def run(self):
self.grabMouse()
try:
self.el.exec_()
finally:
self.releaseMouse()
return self.widget
def interactiveWidgetPick():
"""Let user peek a widget by clicking on it.
The user can point at open EYE widgets and click on one. Return the widget that was clicked
by the user.
"""
w = WidgetPicker()
return w.run()
| 25
| 115
| 0.712603
| 3,002
| 0.822466
| 0
| 0
| 943
| 0.258356
| 0
| 0
| 834
| 0.228493
|
fbadf16ea0f58eaa3cba965310bc72b10eb1a906
| 10,437
|
py
|
Python
|
envelopes/envelope.py
|
siyaoyao/envelopes
|
8ad190a55d0d8b805b6ae545b896e719467253b7
|
[
"MIT"
] | 202
|
2015-01-04T10:40:04.000Z
|
2022-03-17T16:58:22.000Z
|
envelopes/envelope.py
|
siyaoyao/envelopes
|
8ad190a55d0d8b805b6ae545b896e719467253b7
|
[
"MIT"
] | 12
|
2015-04-29T08:12:36.000Z
|
2021-06-03T01:34:33.000Z
|
envelopes/envelope.py
|
siyaoyao/envelopes
|
8ad190a55d0d8b805b6ae545b896e719467253b7
|
[
"MIT"
] | 48
|
2015-01-04T10:39:52.000Z
|
2022-02-28T03:25:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Tomasz Wójcik <tomek@bthlabs.pl>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""
envelopes.envelope
==================
This module contains the Envelope class.
"""
import sys
if sys.version_info[0] == 2:
from email import Encoders as email_encoders
elif sys.version_info[0] == 3:
from email import encoders as email_encoders
basestring = str
def unicode(_str, _charset):
return str(_str.encode(_charset), _charset)
else:
raise RuntimeError('Unsupported Python version: %d.%d.%d' % (
sys.version_info[0], sys.version_info[1], sys.version_info[2]
))
from email.header import Header
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
from email.mime.audio import MIMEAudio
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
import mimetypes
import os
import re
from .conn import SMTP
from .compat import encoded
class MessageEncodeError(Exception):
pass
class Envelope(object):
"""
The Envelope class.
**Address formats**
The following formats are supported for e-mail addresses:
* ``"user@server.com"`` - just the e-mail address part as a string,
* ``"Some User <user@server.com>"`` - name and e-mail address parts as a string,
* ``("user@server.com", "Some User")`` - e-mail address and name parts as a tuple.
Whenever you come to manipulate addresses feel free to use any (or all) of
the formats above.
:param to_addr: ``To`` address or list of ``To`` addresses
:param from_addr: ``From`` address
:param subject: message subject
:param html_body: optional HTML part of the message
:param text_body: optional plain text part of the message
:param cc_addr: optional single CC address or list of CC addresses
:param bcc_addr: optional single BCC address or list of BCC addresses
:param headers: optional dictionary of headers
:param charset: message charset
"""
ADDR_FORMAT = '%s <%s>'
ADDR_REGEXP = re.compile(r'^(.*) <([^@]+@[^@]+)>$')
def __init__(self, to_addr=None, from_addr=None, subject=None,
html_body=None, text_body=None, cc_addr=None, bcc_addr=None,
headers=None, charset='utf-8'):
if to_addr:
if isinstance(to_addr, list):
self._to = to_addr
else:
self._to = [to_addr]
else:
self._to = []
self._from = from_addr
self._subject = subject
self._parts = []
if text_body:
self._parts.append(('text/plain', text_body, charset))
if html_body:
self._parts.append(('text/html', html_body, charset))
if cc_addr:
if isinstance(cc_addr, list):
self._cc = cc_addr
else:
self._cc = [cc_addr]
else:
self._cc = []
if bcc_addr:
if isinstance(bcc_addr, list):
self._bcc = bcc_addr
else:
self._bcc = [bcc_addr]
else:
self._bcc = []
if headers:
self._headers = headers
else:
self._headers = {}
self._charset = charset
self._addr_format = unicode(self.ADDR_FORMAT, charset)
def __repr__(self):
return u'<Envelope from="%s" to="%s" subject="%s">' % (
self._addrs_to_header([self._from]),
self._addrs_to_header(self._to),
self._subject
)
@property
def to_addr(self):
"""List of ``To`` addresses."""
return self._to
def add_to_addr(self, to_addr):
"""Adds a ``To`` address."""
self._to.append(to_addr)
def clear_to_addr(self):
"""Clears list of ``To`` addresses."""
self._to = []
@property
def from_addr(self):
return self._from
@from_addr.setter
def from_addr(self, from_addr):
self._from = from_addr
@property
def cc_addr(self):
"""List of CC addresses."""
return self._cc
def add_cc_addr(self, cc_addr):
"""Adds a CC address."""
self._cc.append(cc_addr)
def clear_cc_addr(self):
"""Clears list of CC addresses."""
self._cc = []
@property
def bcc_addr(self):
"""List of BCC addresses."""
return self._bcc
def add_bcc_addr(self, bcc_addr):
"""Adds a BCC address."""
self._bcc.append(bcc_addr)
def clear_bcc_addr(self):
"""Clears list of BCC addresses."""
self._bcc = []
@property
def charset(self):
"""Message charset."""
return self._charset
@charset.setter
def charset(self, charset):
self._charset = charset
self._addr_format = unicode(self.ADDR_FORMAT, charset)
def _addr_tuple_to_addr(self, addr_tuple):
addr = ''
if len(addr_tuple) == 2 and addr_tuple[1]:
addr = self._addr_format % (
self._header(addr_tuple[1] or ''),
addr_tuple[0] or ''
)
elif addr_tuple[0]:
addr = addr_tuple[0]
return addr
@property
def headers(self):
"""Dictionary of custom headers."""
return self._headers
def add_header(self, key, value):
"""Adds a custom header."""
self._headers[key] = value
def clear_headers(self):
"""Clears custom headers."""
self._headers = {}
def _addrs_to_header(self, addrs):
_addrs = []
for addr in addrs:
if not addr:
continue
if isinstance(addr, basestring):
if self._is_ascii(addr):
_addrs.append(self._encoded(addr))
else:
# these headers need special care when encoding, see:
# http://tools.ietf.org/html/rfc2047#section-8
# Need to break apart the name from the address if there are
# non-ascii chars
m = self.ADDR_REGEXP.match(addr)
if m:
t = (m.group(2), m.group(1))
_addrs.append(self._addr_tuple_to_addr(t))
else:
# What can we do? Just pass along what the user gave us and hope they did it right
_addrs.append(self._encoded(addr))
elif isinstance(addr, tuple):
_addrs.append(self._addr_tuple_to_addr(addr))
else:
self._raise(MessageEncodeError,
'%s is not a valid address' % str(addr))
_header = ','.join(_addrs)
return _header
def _raise(self, exc_class, message):
raise exc_class(self._encoded(message))
def _header(self, _str):
if self._is_ascii(_str):
return _str
return Header(_str, self._charset).encode()
def _is_ascii(self, _str):
return all(ord(c) < 128 for c in _str)
def _encoded(self, _str):
return encoded(_str, self._charset)
def to_mime_message(self):
"""Returns the envelope as
:py:class:`email.mime.multipart.MIMEMultipart`."""
msg = MIMEMultipart('alternative')
msg['Subject'] = self._header(self._subject or '')
msg['From'] = self._encoded(self._addrs_to_header([self._from]))
msg['To'] = self._encoded(self._addrs_to_header(self._to))
if self._cc:
msg['CC'] = self._addrs_to_header(self._cc)
if self._headers:
for key, value in self._headers.items():
msg[key] = self._header(value)
for part in self._parts:
type_maj, type_min = part[0].split('/')
if type_maj == 'text' and type_min in ('html', 'plain'):
msg.attach(MIMEText(part[1], type_min, self._charset))
else:
msg.attach(part[1])
return msg
def add_attachment(self, file_path, mimetype=None):
"""Attaches a file located at *file_path* to the envelope. If
*mimetype* is not specified an attempt to guess it is made. If nothing
is guessed then `application/octet-stream` is used."""
if not mimetype:
mimetype, _ = mimetypes.guess_type(file_path)
if mimetype is None:
mimetype = 'application/octet-stream'
type_maj, type_min = mimetype.split('/')
with open(file_path, 'rb') as fh:
part_data = fh.read()
part = MIMEBase(type_maj, type_min)
part.set_payload(part_data)
email_encoders.encode_base64(part)
part_filename = os.path.basename(self._encoded(file_path))
part.add_header('Content-Disposition', 'attachment; filename="%s"'
% part_filename)
self._parts.append((mimetype, part))
def send(self, *args, **kwargs):
"""Sends the envelope using a freshly created SMTP connection. *args*
and *kwargs* are passed directly to :py:class:`envelopes.conn.SMTP`
constructor.
Returns a tuple of SMTP object and whatever its send method returns."""
conn = SMTP(*args, **kwargs)
send_result = conn.send(self)
return conn, send_result
| 31.531722
| 106
| 0.598927
| 8,407
| 0.805422
| 0
| 0
| 767
| 0.073482
| 0
| 0
| 3,664
| 0.351025
|
fbaea2b0a2ec669b63e49046a42524be78db8577
| 4,929
|
py
|
Python
|
dependencies/panda/Pmw/Pmw_2_0_1/lib/PmwOptionMenu.py
|
SuperM0use24/Project-Altis
|
8dec7518a4d3f902cee261fd522ebebc3c171a42
|
[
"Apache-2.0"
] | null | null | null |
dependencies/panda/Pmw/Pmw_2_0_1/lib/PmwOptionMenu.py
|
SuperM0use24/Project-Altis
|
8dec7518a4d3f902cee261fd522ebebc3c171a42
|
[
"Apache-2.0"
] | null | null | null |
dependencies/panda/Pmw/Pmw_2_0_1/lib/PmwOptionMenu.py
|
SuperM0use24/Project-Altis
|
8dec7518a4d3f902cee261fd522ebebc3c171a42
|
[
"Apache-2.0"
] | null | null | null |
import types
import tkinter
import Pmw
import sys
import collections
class OptionMenu(Pmw.MegaWidget):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
INITOPT = Pmw.INITOPT
optiondefs = (
('command', None, None),
('items', (), INITOPT),
('initialitem', None, INITOPT),
('labelmargin', 0, INITOPT),
('labelpos', None, INITOPT),
('sticky', 'ew', INITOPT),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
Pmw.MegaWidget.__init__(self, parent)
# Create the components.
interior = self.interior()
self._menubutton = self.createcomponent('menubutton',
(), None,
tkinter.Menubutton, (interior,),
borderwidth = 2,
indicatoron = 1,
relief = 'raised',
anchor = 'c',
highlightthickness = 2,
direction = 'flush',
takefocus = 1,
)
self._menubutton.grid(column = 2, row = 2, sticky = self['sticky'])
self._menu = self.createcomponent('menu',
(), None,
tkinter.Menu, (self._menubutton,),
tearoff=0
)
self._menubutton.configure(menu = self._menu)
interior.grid_columnconfigure(2, weight = 1)
interior.grid_rowconfigure(2, weight = 1)
# Create the label.
self.createlabel(interior)
# Add the items specified by the initialisation option.
self._itemList = []
self.setitems(self['items'], self['initialitem'])
# Check keywords and initialise options.
self.initialiseoptions()
def setitems(self, items, index = None):
#cleaning up old items only required for Python < 2.5.4
if sys.version_info < (2, 5, 4):
# Clean up old items and callback commands.
for oldIndex in range(len(self._itemList)):
tclCommandName = str(self._menu.entrycget(oldIndex, 'command'))
if tclCommandName != '':
self._menu.deletecommand(tclCommandName)
self._menu.delete(0, 'end')
self._itemList = list(items)
# Set the items in the menu component.
for item in items:
self._menu.add_command(label = item,
command = lambda self = self, item = item: self._invoke(item))
# Set the currently selected value.
if index is None:
var = str(self._menubutton.cget('textvariable'))
if var != '':
# None means do not change text variable.
return
if len(items) == 0:
text = ''
elif str(self._menubutton.cget('text')) in items:
# Do not change selection if it is still valid
return
else:
text = items[0]
else:
index = self.index(index)
text = self._itemList[index]
self.setvalue(text)
def getcurselection(self):
var = str(self._menubutton.cget('textvariable'))
if var == '':
return str(self._menubutton.cget('text'))
else:
return self._menu.tk.globalgetvar(var)
def getvalue(self):
return self.getcurselection()
def setvalue(self, text):
var = str(self._menubutton.cget('textvariable'))
if var == '':
self._menubutton.configure(text = text)
else:
self._menu.tk.globalsetvar(var, text)
def index(self, index):
listLength = len(self._itemList)
if type(index) == int:
if index < listLength:
return index
else:
raise ValueError('index "%s" is out of range' % index)
elif index is Pmw.END:
if listLength > 0:
return listLength - 1
else:
raise ValueError('OptionMenu has no items')
else:
if index is Pmw.SELECT:
if listLength > 0:
index = self.getcurselection()
else:
raise ValueError('OptionMenu has no items')
if index in self._itemList:
return self._itemList.index(index)
raise ValueError('bad index "%s": must be a ' \
'name, a number, Pmw.END or Pmw.SELECT' % (index,))
def invoke(self, index = Pmw.SELECT):
index = self.index(index)
text = self._itemList[index]
return self._invoke(text)
def _invoke(self, text):
self.setvalue(text)
command = self['command']
if isinstance(command, collections.Callable):
return command(text)
| 33.080537
| 79
| 0.529113
| 4,858
| 0.985595
| 0
| 0
| 0
| 0
| 0
| 0
| 845
| 0.171434
|
fbaf22f54791d4657c17a86b0e49e13fd65f1463
| 7,614
|
py
|
Python
|
options/train_options.py
|
fatalfeel/DeblurGAN
|
cc4ccf09d23b91389dbea70a34797cb80331819c
|
[
"BSD-3-Clause"
] | 3
|
2021-07-12T07:38:32.000Z
|
2021-11-16T04:56:00.000Z
|
options/train_options.py
|
fatalfeel/DeblurGAN
|
cc4ccf09d23b91389dbea70a34797cb80331819c
|
[
"BSD-3-Clause"
] | 1
|
2021-11-03T09:57:31.000Z
|
2021-11-04T03:00:49.000Z
|
options/train_options.py
|
fatalfeel/DeblurGAN
|
cc4ccf09d23b91389dbea70a34797cb80331819c
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import torch
import argparse
from util import util
def str2bool(b_str):
if b_str.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif b_str.lower() in ('no', 'false', 'f', 'n', '0'):
return False
class TrainOptions():
def __init__(self):
self.parser = argparse.ArgumentParser()
self.parser.add_argument('--dataroot', type=str, default='./data/combined', help='path to images (should have subfolders train, val, test)')
self.parser.add_argument('--batchSize', type=int, default=1, help='input batch size')
self.parser.add_argument('--loadSizeX', type=int, default=640, help='scale images to this size')
self.parser.add_argument('--loadSizeY', type=int, default=360, help='scale images to this size')
self.parser.add_argument('--fineSize', type=int, default=256, help='then crop to this size')
self.parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels')
self.parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
self.parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
self.parser.add_argument('--which_model_netG', type=str, default='RESNET', help='RESNET, FPN50, FPN101, FPN152')
self.parser.add_argument('--learn_residual', type=str2bool, default=True, help='if specified, model would learn only the residual to the input')
self.parser.add_argument('--resume', type=str2bool, default=False, help='continue training')
self.parser.add_argument('--gan_type', type=str, default='gan', help='gan is faster, wgan-gp is stable')
self.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers')
self.parser.add_argument('--n_layers_G', type=int, default=3, help='2 layers features 2^6~2^8, 3 layers features 2^6~2^9')
self.parser.add_argument('--n_blocks_G', type=int, default=12, help='ResnetBlocks at 6, 9, 12...')
#self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
self.parser.add_argument('--cuda', type=str2bool, default=False, help='using gpu training')
self.parser.add_argument('--dataset_mode', type=str, default='aligned', help='chooses how datasets are loaded. [unaligned | aligned | single]')
self.parser.add_argument('--model', type=str, default='content_gan', help='chooses which model to use. content_gan, pix2pix, test')
self.parser.add_argument('--which_direction', type=str, default='AtoB', help='AtoB or BtoA')
self.parser.add_argument('--nThreads', type=int, default=1, help='# threads for loading data')
self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoint', help='models are saved here')
self.parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization')
self.parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
self.parser.add_argument('--display_winsize', type=int, default=256, help='display window size')
self.parser.add_argument('--display_id', type=int, default=-1, help='window id of the web display')
self.parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')
self.parser.add_argument('--display_single_pane_ncols', type=int, default=0, help='if positive, display all images in a single visdom web panel with certain number of images per row.')
self.parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
self.parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
self.parser.add_argument('--resize_or_crop', type=str, default='crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]')
self.parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
self.parser.add_argument('--display_freq', type=int, default=100, help='frequency of showing training results on screen')
self.parser.add_argument('--print_freq', type=int, default=20, help='frequency of showing training results on console')
self.parser.add_argument('--save_epoch_freq', type=int, default=10, help='frequency of saving checkpoints at the end of epochs')
self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
self.parser.add_argument('--e_epoch', type=int, default=2000, help='number repeat to train')
self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
self.parser.add_argument('--lr', type=float, default=0.00001, help='initial learning rate for adam')
self.parser.add_argument('--content_weight', type=float, default=100.0, help='fast-neural-style content weight')
self.parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
self.isTrain = True
def GetOption(self):
self.opt = self.parser.parse_args()
self.opt.isTrain = self.isTrain #train or test
args = vars(self.opt)
print('------------ Options -------------')
for k, v in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
#print('-------------- End ----------------')
# save to the disk
#expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
expr_dir = self.opt.checkpoints_dir
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for k, v in sorted(args.items()):
opt_file.write('%s: %s\n' % (str(k), str(v)))
opt_file.write('-------------- End ----------------\n')
return self.opt
| 89.576471
| 237
| 0.582217
| 7,374
| 0.968479
| 0
| 0
| 0
| 0
| 0
| 0
| 3,036
| 0.398739
|
fbb096d17208c7f5930144a24371f1c241611091
| 1,991
|
py
|
Python
|
bin/tabletest.py
|
tjoneslo/pypdflite
|
ac2501f30d6619eae9dea5644717575ca9263d0a
|
[
"MIT"
] | 7
|
2016-05-19T02:23:42.000Z
|
2020-04-16T16:19:13.000Z
|
bin/tabletest.py
|
tjoneslo/pypdflite
|
ac2501f30d6619eae9dea5644717575ca9263d0a
|
[
"MIT"
] | 5
|
2016-11-29T19:21:39.000Z
|
2019-08-18T09:44:25.000Z
|
bin/tabletest.py
|
tjoneslo/pypdflite
|
ac2501f30d6619eae9dea5644717575ca9263d0a
|
[
"MIT"
] | 6
|
2017-01-23T02:12:52.000Z
|
2020-07-07T22:34:44.000Z
|
import os
from pypdflite.pdflite import PDFLite
from pypdflite.pdfobjects.pdfcolor import PDFColor
def TableTest(test_dir):
""" Functional test for text, paragraph, and page
splitting.
"""
data = [["Heading1", "Heading2", "Heading3"],
["Cell a2", "Cell b2", "Cell c2"],
["Cell a3", "Cell b3", "Cell c3"]]
#Create PDFLITE object, initialize with path & filename.
writer = PDFLite(os.path.join(test_dir, "tests/TableTest.pdf"))
# If desired (in production code), set compression
# writer.setCompression(True)
# Set general information metadata
writer.set_information(title="Testing Table") # set optional information
# Use get_document method to get the generated document object.
document = writer.get_document()
document.set_cursor(100, 100)
document.set_font(family='arial', style='UB', size=12)
underline = document.get_font()
document.set_font(family='arial', size=12)
default_font = document.get_font()
# Example for adding short and long text and whitespaces
mytable = document.add_table(3, 3)
green = PDFColor(name='green')
default = document.add_cell_format({'font': default_font, 'align': 'left', 'border': (0, 1)})
justleft = document.add_cell_format({'left': (0, 1)})
header_format = document.add_cell_format({'font': underline, 'align': 'right', 'border': (0, 1)})
green_format = document.add_cell_format({'font': default_font, 'border': (0, 1), 'fill_color': green})
#mytable.set_column_width(1, 200)
#mytable.set_row_height(2, 200)
mytable.write_row(0, 0, data[0], header_format)
mytable.write_row(1, 0, data[1], justleft)
mytable.write_row(2, 0, data[2], green_format)
document.draw_table(mytable)
document.add_newline(4)
document.add_text("Testing followup text")
# Close writer
writer.close()
if __name__ == "__main__":
TableTest()
| 32.639344
| 107
| 0.656454
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 743
| 0.373179
|
fbb17de1e27a39093c5016a288d1b4d494da72ba
| 2,227
|
py
|
Python
|
backend/app/literature/schemas/cross_reference_schemas.py
|
alliance-genome/agr_literature_service
|
2278316422d5c3ab65e21bb97d91e861e48853c5
|
[
"MIT"
] | null | null | null |
backend/app/literature/schemas/cross_reference_schemas.py
|
alliance-genome/agr_literature_service
|
2278316422d5c3ab65e21bb97d91e861e48853c5
|
[
"MIT"
] | 39
|
2021-10-18T17:02:49.000Z
|
2022-03-28T20:56:24.000Z
|
backend/app/literature/schemas/cross_reference_schemas.py
|
alliance-genome/agr_literature_service
|
2278316422d5c3ab65e21bb97d91e861e48853c5
|
[
"MIT"
] | 1
|
2021-10-21T00:11:18.000Z
|
2021-10-21T00:11:18.000Z
|
from typing import List, Optional
from pydantic import BaseModel
from pydantic import validator
class CrossReferenceSchemaRelated(BaseModel):
curie: str
pages: Optional[List[str]] = None
is_obsolete: Optional[bool] = None
@validator('curie')
def name_must_contain_space(cls, v):
if v.count(":") != 1 and not v.startswith("DOI:"):
raise ValueError('must contain a single colon')
return v
class Config():
orm_mode = True
extra = "forbid"
schema_extra = {
"example": {
"curie": "MOD:curie",
"pages": [
"reference"
]
}
}
class CrossReferenceSchemaPost(CrossReferenceSchemaRelated):
resource_curie: Optional[str] = None
reference_curie: Optional[str] = None
class Config():
orm_mod = True
extra = "forbid"
schema_extra = {
"example": {
"curie": "MOD:curie",
"pages": [
"reference"
],
"reference_curie": "AGR:AGRReference<number>"
}
}
class CrossReferencePageSchemaShow(BaseModel):
name: Optional[str] = None
url: Optional[str] = None
class Config():
orm_mode = True
extra = "forbid"
class CrossReferenceSchemaShow(BaseModel):
curie: str
url: Optional[str] = None
pages: Optional[List[CrossReferencePageSchemaShow]] = None
is_obsolete: bool
class CrossReferenceSchema(BaseModel):
curie: str
pages: Optional[List[CrossReferencePageSchemaShow]] = None
url: Optional[str] = None
is_obsolete: Optional[bool] = False
resource_curie: Optional[str] = None
reference_curie: Optional[str] = None
author_ids: Optional[List[int]] = None
editor_ids: Optional[List[int]] = None
class Config():
orm_mode = True
extra = "forbid"
class CrossReferenceSchemaUpdate(BaseModel):
pages: Optional[List[str]] = None
resource_curie: Optional[str] = None
reference_curie: Optional[str] = None
is_obsolete: Optional[bool] = None
class Config():
orm_mode = True
extra = "forbid"
| 24.472527
| 62
| 0.594073
| 2,112
| 0.948361
| 0
| 0
| 196
| 0.088011
| 0
| 0
| 218
| 0.09789
|
fbb2c4cd37463a15b46ecf098fc46612a9561fb6
| 325
|
py
|
Python
|
leetcode/53.py
|
jasonlmfong/leetcode
|
490764b4212735915fa73e1a1bdfd40b8c8ad9ea
|
[
"MIT"
] | null | null | null |
leetcode/53.py
|
jasonlmfong/leetcode
|
490764b4212735915fa73e1a1bdfd40b8c8ad9ea
|
[
"MIT"
] | null | null | null |
leetcode/53.py
|
jasonlmfong/leetcode
|
490764b4212735915fa73e1a1bdfd40b8c8ad9ea
|
[
"MIT"
] | null | null | null |
class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
Max = -float("inf")
currMax = -float("inf")
for num in nums:
currMax = max(num, num + currMax)
Max = max(Max, currMax)
return Max
| 25
| 45
| 0.461538
| 325
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 75
| 0.230769
|
fbb2df16b7c104eb5bf6d5b9289bada959a6f3e9
| 967
|
py
|
Python
|
tests/test_resolver.py
|
manz/a816
|
2338ebf87039d6a4a4db8014c48c1d0d2488ceca
|
[
"MIT"
] | 2
|
2018-06-11T23:37:02.000Z
|
2018-09-06T04:02:19.000Z
|
tests/test_resolver.py
|
manz/a816
|
2338ebf87039d6a4a4db8014c48c1d0d2488ceca
|
[
"MIT"
] | 8
|
2015-10-30T11:20:45.000Z
|
2021-11-21T12:59:33.000Z
|
tests/test_resolver.py
|
manz/a816
|
2338ebf87039d6a4a4db8014c48c1d0d2488ceca
|
[
"MIT"
] | 1
|
2021-03-29T03:21:54.000Z
|
2021-03-29T03:21:54.000Z
|
import unittest
from a816.parse.ast.expression import eval_expression_str
from a816.symbols import Resolver
class ResolverTest(unittest.TestCase):
def test_math_expr_eval(self) -> None:
expr = "0x100+toto & 0xFFFF"
resolver = Resolver()
resolver.current_scope.add_symbol("toto", 0x108000)
self.assertEqual(eval_expression_str(expr, resolver), 0x8100)
def test_symbols_resolved_through_eval(self) -> None:
expr = "toto"
resolver = Resolver()
resolver.current_scope.add_symbol("toto", 0x1234)
self.assertEqual(eval_expression_str(expr, resolver), 0x1234)
def test_eval(self) -> None:
r = Resolver()
r.current_scope.add_symbol("name.data", 4)
value = eval_expression_str("name.data", r)
self.assertEqual(value, 4)
def test_unary(self) -> None:
r = Resolver()
value = eval_expression_str("-1", r)
self.assertEqual(value, -1)
| 28.441176
| 69
| 0.663909
| 855
| 0.884178
| 0
| 0
| 0
| 0
| 0
| 0
| 65
| 0.067218
|
fbb43010f529aa881832d163b127b8c90dbb0317
| 4,445
|
py
|
Python
|
syft_proto/frameworks/crypten/onnx_model_pb2.py
|
vkkhare/syft-proto
|
513b4af50d7476bd5b1ff9dfb6da8528100f961d
|
[
"Apache-2.0"
] | null | null | null |
syft_proto/frameworks/crypten/onnx_model_pb2.py
|
vkkhare/syft-proto
|
513b4af50d7476bd5b1ff9dfb6da8528100f961d
|
[
"Apache-2.0"
] | null | null | null |
syft_proto/frameworks/crypten/onnx_model_pb2.py
|
vkkhare/syft-proto
|
513b4af50d7476bd5b1ff9dfb6da8528100f961d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: syft_proto/frameworks/crypten/onnx_model.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from syft_proto.types.syft.v1 import id_pb2 as syft__proto_dot_types_dot_syft_dot_v1_dot_id__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='syft_proto/frameworks/crypten/onnx_model.proto',
package='syft_proto.frameworks.torch.tensors.interpreters.v1',
syntax='proto3',
serialized_options=b'\n@org.openmined.syftproto.frameworks.torch.tensors.interpreters.v1',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n.syft_proto/frameworks/crypten/onnx_model.proto\x12\x33syft_proto.frameworks.torch.tensors.interpreters.v1\x1a!syft_proto/types/syft/v1/id.proto\"\x9a\x01\n\tOnnxModel\x12,\n\x02id\x18\x01 \x01(\x0b\x32\x1c.syft_proto.types.syft.v1.IdR\x02id\x12)\n\x10serialized_model\x18\x02 \x01(\x0cR\x0fserializedModel\x12\x12\n\x04tags\x18\x03 \x03(\tR\x04tags\x12 \n\x0b\x64\x65scription\x18\x04 \x01(\tR\x0b\x64\x65scriptionBB\n@org.openmined.syftproto.frameworks.torch.tensors.interpreters.v1b\x06proto3'
,
dependencies=[syft__proto_dot_types_dot_syft_dot_v1_dot_id__pb2.DESCRIPTOR,])
_ONNXMODEL = _descriptor.Descriptor(
name='OnnxModel',
full_name='syft_proto.frameworks.torch.tensors.interpreters.v1.OnnxModel',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='syft_proto.frameworks.torch.tensors.interpreters.v1.OnnxModel.id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='id', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='serialized_model', full_name='syft_proto.frameworks.torch.tensors.interpreters.v1.OnnxModel.serialized_model', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='serializedModel', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tags', full_name='syft_proto.frameworks.torch.tensors.interpreters.v1.OnnxModel.tags', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='tags', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='syft_proto.frameworks.torch.tensors.interpreters.v1.OnnxModel.description', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='description', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=139,
serialized_end=293,
)
_ONNXMODEL.fields_by_name['id'].message_type = syft__proto_dot_types_dot_syft_dot_v1_dot_id__pb2._ID
DESCRIPTOR.message_types_by_name['OnnxModel'] = _ONNXMODEL
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
OnnxModel = _reflection.GeneratedProtocolMessageType('OnnxModel', (_message.Message,), {
'DESCRIPTOR' : _ONNXMODEL,
'__module__' : 'syft_proto.frameworks.crypten.onnx_model_pb2'
# @@protoc_insertion_point(class_scope:syft_proto.frameworks.torch.tensors.interpreters.v1.OnnxModel)
})
_sym_db.RegisterMessage(OnnxModel)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 46.302083
| 516
| 0.783127
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,553
| 0.349381
|
fbb5a43012f90f186d93e303de06bb99a2be6844
| 3,333
|
py
|
Python
|
Actor_CriticPointer_Network-TSP/knapsack_env.py
|
GeoffNN/DLforCombin
|
02553a50491420ab0d51860faff4f9d5aee59616
|
[
"MIT"
] | 5
|
2017-12-29T12:16:37.000Z
|
2020-05-24T22:53:56.000Z
|
Actor_CriticPointer_Network-TSP/knapsack_env.py
|
GeoffNN/DLforCombin
|
02553a50491420ab0d51860faff4f9d5aee59616
|
[
"MIT"
] | 1
|
2018-01-28T20:09:44.000Z
|
2018-01-28T20:09:44.000Z
|
Actor_CriticPointer_Network-TSP/knapsack_env.py
|
GeoffNN/DLforCombin
|
02553a50491420ab0d51860faff4f9d5aee59616
|
[
"MIT"
] | 1
|
2020-05-24T22:53:50.000Z
|
2020-05-24T22:53:50.000Z
|
import numpy as np
import knapsack
class Knapsack:
def __init__(self, K, max_weight, state_shape = 'flat', penalize_repeat = False):
self.K = K
self.max_weight = max_weight
self.penalize_repeat = penalize_repeat # Not used for now, have to figure out details
self.env_name = 'Knapsack'
self.state_shape = state_shape
if self.state_shape == 'flat':
self.state_shape = [self.K * 3]
else:
self.state_shape = [self.K, 3]
self.num_actions = self.K
def reset(self):
self.values = np.random.rand(self.K)
self.weights = np.random.rand(self.K)
self.xs = np.zeros(self.K)
self.episode_rewards = []
if self.state_shape == 'flat':
return np.concatenate([self.values, self.weights, self.xs])
else:
return np.array([self.values, self.weights, self.xs]).T
def optimal_solution(self):
total_reward, choices = knapsack.knapsack(self.weights, self.values).solve(self.max_weight)
xs = np.zeros(self.K)
for i in choices:
xs[i] = 1
return total_reward, xs
def at_random_solution(self):
current_xs = np.zeros(self.K)
next_xs = np.zeros(self.K)
while np.sum(current_xs) < self.K:
next_xs[np.random.randint(self.K)] = 1
if np.sum(self.weights * next_xs) > self.max_weight:
break
current_xs = np.copy(next_xs)
return np.sum(self.values * current_xs), current_xs, \
np.sum(self.weights * current_xs)
def accumulated_reward(self):
return np.sum(self.values * self.xs)
def max_reward_to_go(self):
remaining_weight_capacity = self.max_weight - np.sum(self.weights[self.xs == 1])
max_rtg, _ = knapsack.knapsack(self.weights[self.xs != 1],
self.values[self.xs != 1]).solve(remaining_weight_capacity)
return max_rtg
def step(self, action):
# Action is the index of the next object to add
current_sacks_weight = np.sum(self.weights * self.xs)
if self.xs[action] == 1 or current_sacks_weight + self.weights[action] > self.max_weight: # Do nothing
if self.state_shape == 'flat':
new_state = np.concatenate([self.values, self.weights, self.xs])
else:
new_state = np.array([self.values, self.weights, self.xs]).T
self.episode_rewards.append(0)
return new_state, 0, False
else:
self.xs[action] = 1
current_sacks_weight = np.sum(self.weights * self.xs)
if self.state_shape == 'flat':
new_state = np.concatenate([self.values, self.weights, self.xs])
else:
new_state = np.array([self.values, self.weights, self.xs]).T
reward = self.values[action]
self.episode_rewards.append(reward)
if np.sum(self.xs) == self.K:
return new_state, reward, True
next_lightest_weight = np.min(self.weights[self.xs != 1])
if current_sacks_weight + next_lightest_weight > self.max_weight:
done = True
else:
done = False
return new_state, reward, done
| 38.310345
| 110
| 0.584158
| 3,295
| 0.988599
| 0
| 0
| 0
| 0
| 0
| 0
| 145
| 0.043504
|
fbb6af8f01f84caaa96ba70cd3d046f928150b4b
| 3,063
|
py
|
Python
|
plotmonitor.py
|
mjlosch/python_scripts
|
7e3c81382484a70a598e81da9ca260e45ad85a00
|
[
"MIT"
] | 1
|
2020-11-20T20:07:06.000Z
|
2020-11-20T20:07:06.000Z
|
plotmonitor.py
|
mjlosch/python_scripts
|
7e3c81382484a70a598e81da9ca260e45ad85a00
|
[
"MIT"
] | null | null | null |
plotmonitor.py
|
mjlosch/python_scripts
|
7e3c81382484a70a598e81da9ca260e45ad85a00
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
######################## -*- coding: utf-8 -*-
"""Usage: plotres.py variable INPUTFILE(S)
"""
import sys
from getopt import gnu_getopt as getopt
import matplotlib.pyplot as plt
import numpy as np
import datetime
# parse command-line arguments
try:
optlist,args = getopt(sys.argv[1:], ':', ['verbose'])
assert len(args) > 1
except (AssertionError):
sys.exit(__doc__)
files=[]
mystr=args[0]
if len(args)<2:
from glob import glob
for infile in glob(args[1]):
files.append(infile)
else:
files=args[1:]
#
def getKey(item):
return item[0]
def get_output (fnames, mystring):
"""parse fname and get some numbers out"""
timev = []
myvar = []
pp = []
for fname in fnames:
try:
f=open(fname)
except:
print(fname + " does not exist, continuing")
else:
# p = []
for line in f:
if "time_secondsf" in line:
ll = line.split()
# p.append(float(ll[-1].replace('D','e')))
# p.append(np.NaN)
timev.append(float(ll[-1].replace('D','e')))
myvar.append(np.NaN)
if mystring in line:
ll = line.split()
# p[1] = float(ll[-1].replace('D','e'))
# pp.append(p)
# p = []
myvar[-1] = float(ll[-1].replace('D','e'))
f.close()
timevs=np.asarray(timev)
myvars=np.asarray(myvar)
isort = np.argsort(timevs)
timevs=timevs[isort]
myvars=myvars[isort]
# ppp = sorted( pp, key = getKey )
# indx = sorted(range(len(timev)), key=lambda k: timev[k])
# myvars=[]
# timevs=[]
# for k in range(len(pp)):
# myvars.append(ppp[k][1])
# timevs.append(ppp[k][0])
return timevs, myvars
# done
fig = plt.figure(figsize=(12, 4))
ax=fig.add_subplot(111)
refdate = datetime.datetime(1,1,1,0,0)
#refdate = datetime.datetime(1979,1,1,0,0)
#refdate = datetime.datetime(1958,1,1,0,0)
# determine start date
with open(files[0]) as f:
for line in f:
if 'startDate_1' in line:
ll = line.strip().split('=')[-1]
refdate = datetime.datetime(int(ll[0:4]),int(ll[4:6]),int(ll[6:8]))
#refdate = datetime.datetime(2001,1,1)
timesec, h = get_output(files, mystr)
if np.all(np.isnan(h)): sys.exit("only nans in timeseries")
timeday = np.asarray(timesec)/86400.
#xdays = refdate + timeday * datetime.timedelta(days=1)
xdays = np.array([refdate + datetime.timedelta(days=i) for i in timeday])
# now plot everything
#print timesec[0:2], timesec[-3:-1]
#print h[0:2], h[-3:-1]
#print timesec
#print h
ax.plot(xdays, h, '-x', linewidth=1.0)
plt.grid()
plt.title(mystr)
hh=np.ma.masked_array(h,np.isnan(h))
print("mean = "+str(np.mean(hh)))
print("min = "+str(np.min(hh)))
print("max = "+str(np.max(hh)))
print("std = "+str(np.std(hh)))
print("last-first = "+str(h[-1]-h[0]))
plt.show()
| 26.179487
| 79
| 0.555664
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,165
| 0.380346
|
fbb7a78d183671e9c2e148567652110d81c620e9
| 14,266
|
py
|
Python
|
src/primaires/communication/contextes/immersion.py
|
vlegoff/tsunami
|
36b3b974f6eefbf15cd5d5f099fc14630e66570b
|
[
"BSD-3-Clause"
] | 14
|
2015-08-21T19:15:21.000Z
|
2017-11-26T13:59:17.000Z
|
src/primaires/communication/contextes/immersion.py
|
vincent-lg/tsunami
|
36b3b974f6eefbf15cd5d5f099fc14630e66570b
|
[
"BSD-3-Clause"
] | 20
|
2015-09-29T20:50:45.000Z
|
2018-06-21T12:58:30.000Z
|
src/primaires/communication/contextes/immersion.py
|
vlegoff/tsunami
|
36b3b974f6eefbf15cd5d5f099fc14630e66570b
|
[
"BSD-3-Clause"
] | 3
|
2015-05-02T19:42:03.000Z
|
2018-09-06T10:55:00.000Z
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le contexte 'communication:immersion'"""
from primaires.format.constantes import ponctuations_finales
from primaires.interpreteur.contexte import Contexte
from primaires.communication.contextes.invitation import Invitation
class Immersion(Contexte):
"""Contexte d'immersion dans un canal de communication.
"""
def __init__(self, pere):
"""Constructeur du contexte"""
Contexte.__init__(self, pere)
self.opts.prompt_prf = ""
self.opts.prompt_clr = ""
self.canal = None
self.options = {
# Options d'user
"q" : self.opt_quit,
"w" : self.opt_who,
"h" : self.opt_help,
"i" : self.opt_invite,
"me" : self.opt_emote,
# Options de modo
"e" : self.opt_eject,
"b" : self.opt_ban,
"a" : self.opt_announce,
# Options d'admin
"p" : self.opt_promote,
"ed" : self.opt_edit,
"d" : self.opt_dissolve,
}
def __getstate__(self):
"""Nettoyage des options"""
dico_attr = Contexte.__getstate__(self)
dico_attr["options"] = dico_attr["options"].copy()
for rac, fonction in dico_attr["options"].items():
dico_attr["options"][rac] = fonction.__name__
return dico_attr
def __setstate__(self, dico_attr):
"""Récupération du contexte"""
Contexte.__setstate__(self, dico_attr)
for rac, nom in self.options.items():
fonction = getattr(self, nom)
self.options[rac] = fonction
@property
def u_nom(self):
return "immersion:" + self.canal.nom
def accueil(self):
"""Message d'accueil du contexte"""
canal = self.canal
res = canal.clr + ">|ff| Immersion dans le canal " + canal.nom
res += "\n Entrez |ent|/h|ff| pour afficher l'aide."
return res
def opt_quit(self, arguments):
"""Option quitter : /q"""
canal = self.canal
personnage = self.pere.joueur
canal.immerger_ou_sortir(personnage)
personnage << canal.clr + ">|ff| Retour au jeu."
def opt_who(self, arguments):
"""Option qui : /w"""
personnage = self.pere.joueur
res = self.canal.clr + ">|ff| Joueurs connectés :"
for connecte in self.canal.connectes:
if connecte in type(self).importeur.connex.joueurs_connectes:
if connecte is self.canal.auteur:
statut = "|rgc|@"
elif connecte in self.canal.moderateurs:
statut = "|jn|*"
else:
statut = "|bc|"
res += "\n " + statut + connecte.nom + "|ff|"
if connecte in self.canal.immerges:
res += " (immergé)"
personnage << res
def opt_help(self, arguments):
"""Options d'affichage de l'aide : /h"""
personnage = self.pere.joueur
canal = self.canal
res = canal.clr + ">|ff| Aide du canal |ent|{}|ff| ({}) :\n".format(
canal.nom, canal.resume)
res += str(canal.description)
res += "\n Administrateur : |rgc|"
res += (canal.auteur and canal.auteur.nom or "aucun") + "|ff|"
modos = ""
if len(canal.moderateurs) == 1:
modos = "\n Modérateur : |jn|" + canal.moderateurs[0].nom + "|ff|"
elif len(canal.moderateurs) > 1:
modos = "\n Modérateurs : |jn|" + "|ff|, |jn|".join(
sorted([modo.nom for modo in canal.moderateurs])) + "|ff|"
res += modos
res += "\n Commandes disponibles :"
res += "\n - |cmd|/h|ff| : affiche ce message d'aide"
res += "\n - |cmd|/w|ff| : liste les joueurs connectés au canal"
res += "\n - |cmd|/i <joueur>|ff| : invite un joueur à rejoindre "
res += "le canal"
res += "\n - |cmd|/me <message>|ff| : joue une emote dans le canal"
res += "\n - |cmd|/q|ff| : permet de sortir du mode immersif"
if personnage in canal.moderateurs or personnage is canal.auteur \
or personnage.est_immortel():
res += "\n Commandes de modération :"
res += "\n - |cmd|/e <joueur>|ff| : éjecte un joueur"
res += "\n - |cmd|/b <joueur>|ff| : bannit ou rappelle un joueur"
res += "\n - |cmd|/a <message>|ff| : permet d'envoyer une "
res += "annonce impersonnelle"
if personnage is canal.auteur or personnage.est_immortel():
res += "\n Commandes d'administration :"
res += "\n - |cmd|/p <joueur>|ff| : promeut ou déchoit un joueur "
res += "modérateur"
res += "\n - |cmd|/ed|ff| : ouvre l'éditeur du canal"
res += "\n - |cmd|/d|ff| : dissout le canal"
personnage << res
def opt_invite(self, arguments):
"""Option pour inviter un ami à rejoindre le cana : /i <joueur>"""
canal = self.canal
if not arguments or arguments.isspace():
self.pere.joueur << "|err|Vous devez spécifier un joueur.|ff|"
return
nom_joueur = arguments.split(" ")[0]
joueur = None
for t_joueur in type(self).importeur.connex.joueurs_connectes:
if nom_joueur == t_joueur.nom.lower():
joueur = t_joueur
break
if joueur is None:
self.pere.joueur << "|err|Le joueur passé en paramètre n'a pu " \
"être trouvé.|ff|"
return
if joueur in canal.connectes:
self.pere.joueur << "|err|Ce joueur est déjà connecté au canal.|ff|"
return
contexte = Invitation(joueur.instance_connexion)
contexte.emetteur = self.pere.joueur
contexte.canal = canal
contexte.actualiser()
self.pere.joueur << "|att|Vous venez d'inviter {} à rejoindre le " \
"canal {}.|ff|".format(joueur.nom, canal.nom)
def opt_emote(self, arguments):
"""Option d'emote dans le contexte immersif"""
canal = self.canal
joueur = self.pere.joueur
if not arguments or arguments.isspace():
joueur << "|err|Vous devez préciser une action.|ff|"
return
message = arguments.rstrip(" \n")
if not message[-1] in ponctuations_finales:
message += "."
im = canal.clr + "<" + joueur.nom + " " + message + ">|ff|"
ex = canal.clr + "[" + canal.nom + "] " + joueur.nom + " "
ex += message + "|ff|"
for connecte in canal.connectes:
if connecte in type(self).importeur.connex.joueurs_connectes:
if connecte in canal.immerges:
connecte << im
else:
connecte << ex
def opt_eject(self, arguments):
"""Option permettant d'éjecter un joueur connecté : /e <joueur>"""
canal = self.canal
if not self.pere.joueur in canal.moderateurs and \
self.pere.joueur is not canal.auteur and not \
self.pere.joueur.est_immortel():
self.pere.joueur << "|err|Vous n'avez pas accès à cette option.|ff|"
return
if not arguments or arguments.isspace():
self.pere.joueur << "|err|Vous devez spécifier un joueur.|ff|"
return
nom_joueur = arguments.split(" ")[0]
joueur = None
for connecte in canal.connectes:
if nom_joueur == connecte.nom.lower():
joueur = connecte
break
if joueur is None:
self.pere.joueur << "|err|Ce joueur n'est pas connecté au " \
"canal.|ff|"
return
if joueur is self.pere.joueur:
self.pere.joueur << "|err|Vous ne pouvez vous éjecter " \
"vous-même.|ff|"
return
if joueur in canal.moderateurs or joueur is canal.auteur:
self.pere.joueur << "|err|Vous ne pouvez éjecter ce joueur.|ff|"
return
canal.ejecter(joueur)
def opt_ban(self, arguments):
"""Option permettant de bannir un joueur connecté : /b <joueur>"""
canal = self.canal
if not self.pere.joueur in canal.moderateurs and \
self.pere.joueur is not canal.auteur and not \
self.pere.joueur.est_immortel():
self.pere.joueur << "|err|Vous n'avez pas accès à cette option.|ff|"
return
nom_joueur = arguments.split(" ")[0]
joueur = None
for t_joueur in type(self).importeur.connex.joueurs:
if nom_joueur == t_joueur.nom.lower():
joueur = t_joueur
break
if joueur is None:
self.pere.joueur << "|err|Le joueur passé en paramètre n'a pu " \
"être trouvé.|ff|"
return
if joueur is self.pere.joueur:
self.pere.joueur << "|err|Vous ne pouvez vous bannir vous-même.|ff|"
return
if joueur in canal.moderateurs or joueur is canal.auteur:
self.pere.joueur << "|err|Vous ne pouvez éjecter ce joueur.|ff|"
return
canal.bannir(joueur)
def opt_announce(self, arguments):
"""Option permettant d'envoyer une annonce : /a <message>"""
canal = self.canal
if not self.pere.joueur in canal.moderateurs and \
self.pere.joueur is not canal.auteur and not \
self.pere.joueur.est_immortel():
self.pere.joueur << "|err|Vous n'avez pas accès à cette option.|ff|"
return
message = arguments.rstrip(" \n")
canal.envoyer_imp(message)
def opt_promote(self, arguments):
"""Option permettant de promouvoir un joueur connecté : /p <joueur>"""
canal = self.canal
if self.pere.joueur is not canal.auteur and not \
self.pere.joueur.est_immortel():
self.pere.joueur << "|err|Vous n'avez pas accès à cette option.|ff|"
return
nom_joueur = arguments.split(" ")[0]
joueur = None
for connecte in canal.connectes:
if nom_joueur == connecte.nom.lower():
joueur = connecte
break
if joueur is None:
self.pere.joueur << "|err|Ce joueur n'est pas connecté au " \
"canal.|ff|"
return
if joueur is self.pere.joueur:
self.pere.joueur << "|err|Vous ne pouvez vous promouvoir " \
"vous-même.|ff|"
return
if joueur is canal.auteur:
self.pere.joueur << "|err|Ce joueur est déjà administrateur.|ff|"
return
canal.promouvoir_ou_dechoir(joueur)
def opt_edit(self, arguments):
"""Option ouvrant un éditeur du canal"""
canal = self.canal
if self.pere.joueur is not canal.auteur and not \
self.pere.joueur.est_immortel():
self.pere.joueur << "|err|Vous n'avez pas accès à cette option.|ff|"
return
editeur = type(self).importeur.interpreteur.construire_editeur(
"chedit", self.pere.joueur, canal)
self.pere.joueur.contextes.ajouter(editeur)
editeur.actualiser()
def opt_dissolve(self, arguments):
"""Option permettant de dissoudre le canal"""
canal = self.canal
if self.pere.joueur is not canal.auteur and not \
self.pere.joueur.est_immortel():
self.pere.joueur << "|err|Vous n'avez pas accès à cette option.|ff|"
return
joueur = self.pere.joueur
canal.immerger_ou_sortir(joueur, False)
canal.rejoindre_ou_quitter(joueur, False)
joueur << "|err|Le canal {} a été dissous.|ff|".format(canal.nom)
canal.dissoudre()
def interpreter(self, msg):
"""Méthode d'interprétation du contexte"""
if msg.startswith("/"):
# C'est une option
# On extrait le nom de l'option
mots = msg.split(" ")
option = mots[0][1:]
arguments = " ".join(mots[1:])
if option not in self.options.keys():
self.pere << "|err|Option invalide ({}).|ff|".format(option)
else: # On appelle la fonction correspondante à l'option
fonction = self.options[option]
fonction(arguments)
else:
self.canal.envoyer(self.pere.joueur, msg)
| 42.207101
| 80
| 0.570798
| 12,509
| 0.873107
| 0
| 0
| 75
| 0.005235
| 0
| 0
| 4,913
| 0.342919
|
fbb9ce8232b91bd4820115cf24c512ee8d3b9a6c
| 2,104
|
py
|
Python
|
Projects/ABM_DA/experiments/ukf_experiments/tests/arc_test.py
|
RobertClay/DUST-RC
|
09f7ec9d8d093021d068dff8a7a48c15ea318b86
|
[
"MIT"
] | 15
|
2018-11-21T14:57:24.000Z
|
2022-03-04T15:42:09.000Z
|
Projects/ABM_DA/experiments/ukf_experiments/tests/arc_test.py
|
RobertClay/DUST-RC
|
09f7ec9d8d093021d068dff8a7a48c15ea318b86
|
[
"MIT"
] | 125
|
2019-11-06T13:03:35.000Z
|
2022-03-07T13:38:33.000Z
|
Projects/ABM_DA/experiments/ukf_experiments/tests/arc_test.py
|
RobertClay/DUST-RC
|
09f7ec9d8d093021d068dff8a7a48c15ea318b86
|
[
"MIT"
] | 6
|
2018-11-20T15:56:49.000Z
|
2021-10-08T10:21:06.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 13 12:07:22 2020
@author: medrclaa
Stand alone script for testing arc in ARC. simply run
pytest arc_test.py
To ensure the working environment is suitable for running experiments.
If you only wish to run a single experiment then you an easily hash the
other 2 for quicker testing time.
"""
import unittest
import os
"""
run file in ukf_experiments. putting test at top level allows the
large number of
"""
"if running this file on its own. this will move cwd up to ukf_experiments."
if os.path.split(os.getcwd())[1] != "ukf_experiments":
os.chdir("..")
import arc.arc as arc
from modules.ukf_fx import HiddenPrints
class Test_arc(unittest.TestCase):
"""test the ukf runs for all 3 experiments in arc
this is a fairly long test but tests vitually everything runs bar the
plotting.
"""
@classmethod
def setUpClass(cls):
pass
def test_ex0(self):
"""run the arc test for the experiment 0 module
pass the test if the whole arc test completes.
Note that arc_test.py does similar but is actually runnable in
arc to check the environment is suitable there.
"""
with HiddenPrints():
arc.main(arc.ex0_input, arc.ex0_save, test=True)
def test_ex1(self):
"""another arc module for experiment 1
We choose n =5 and proportion observed prop = 0.5
"""
with HiddenPrints():
arc.main(arc.ex1_input, test=True)
def test_ex2(self):
"""another arc module test for experiment 2
We choose n = 5 and aggregate square size bin_size = 50
"""
with HiddenPrints():
arc.main(arc.ex2_input, test=True)
if __name__ == '__main__':
"test the three experiments arc functions are working"
" each test uses 5 agents and some arbitrary parameters for the sake of speed"
arc_tests =Test_arc.setUpClass()
unittest.main()
| 25.047619
| 82
| 0.63308
| 1,163
| 0.552757
| 0
| 0
| 50
| 0.023764
| 0
| 0
| 1,330
| 0.632129
|
fbbb3304e214d67619ec0cdb7ec6c61b33484d73
| 646
|
py
|
Python
|
setup.py
|
dunkgray/nomics-python
|
1e19647522f62e32218fa4cf859db68d26696d10
|
[
"MIT"
] | null | null | null |
setup.py
|
dunkgray/nomics-python
|
1e19647522f62e32218fa4cf859db68d26696d10
|
[
"MIT"
] | null | null | null |
setup.py
|
dunkgray/nomics-python
|
1e19647522f62e32218fa4cf859db68d26696d10
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name = "nomics-python",
version = "3.1.0",
author = "Taylor Facen",
author_email = "taylor.facen@gmail.com",
description = "A python wrapper for the Nomics API",
long_description = long_description,
long_description_content_type = "text/markdown",
url = "https://github.com/TaylorFacen/nomics-python",
packages = setuptools.find_packages(),
install_requires = ['requests>=2'],
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License"
]
)
| 30.761905
| 57
| 0.653251
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 262
| 0.405573
|
fbbbe5438f901d187d2db29f2722a9a76b8e6ff6
| 2,177
|
py
|
Python
|
tests/test_day3.py
|
ullumullu/adventofcode2020
|
0ad0e6ac7af7d3c21fe2cb42cbb8d29a992ae6d0
|
[
"MIT"
] | null | null | null |
tests/test_day3.py
|
ullumullu/adventofcode2020
|
0ad0e6ac7af7d3c21fe2cb42cbb8d29a992ae6d0
|
[
"MIT"
] | null | null | null |
tests/test_day3.py
|
ullumullu/adventofcode2020
|
0ad0e6ac7af7d3c21fe2cb42cbb8d29a992ae6d0
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
from typing import List
from challenges.day3 import frequency_character
def _read_input() -> List[str]:
"""Read the input file."""
travel_map = []
current_path = Path(os.path.dirname(os.path.realpath(__file__)))
image_path = current_path / "resources" / "day3_puzzle_input.txt"
with image_path.open("r", encoding="utf-8") as input_file:
for line in input_file:
travel_map.append(str(line.strip()))
return travel_map
def test_sample_input_part1():
sample_input = ["..##.......", "#...#...#..", ".#....#..#.", "..#.#...#.#", ".#...##..#.",
"..#.##.....", ".#.#.#....#", ".#........#", "#.##...#...", "#...##....#",
".#..#...#.#"]
expected_trees_hit = 7
hit_trees = frequency_character(
sample_input, right=3, down=1, char="#")
assert hit_trees == expected_trees_hit
def test_puzzle_input_part1():
input_map = _read_input()
result = frequency_character(
input_map, right=3, down=1, char="#")
print(f"Result: {result}")
assert result == 276
def test_sample_input_part2():
sample_input = ["..##.......", "#...#...#..", ".#....#..#.", "..#.#...#.#", ".#...##..#.",
"..#.##.....", ".#.#.#....#", ".#........#", "#.##...#...", "#...##....#",
".#..#...#.#"]
expected_trees_multiplier = 336
# right, down, expected
test_paths = [(1, 1, 2), (3, 1, 7), (5, 1, 3), (7, 1, 4), (1, 2, 2)]
result = 1
for test_path in test_paths:
hit_trees = frequency_character(
sample_input, right=test_path[0], down=test_path[1], char="#")
assert hit_trees == test_path[2]
result *= hit_trees
assert result == expected_trees_multiplier
def test_puzzle_input_part2():
input_map = _read_input()
test_paths = [(1, 1, 2), (3, 1, 7), (5, 1, 3), (7, 1, 4), (1, 2, 2)]
result = 1
for test_path in test_paths:
hit_trees = frequency_character(
input_map, right=test_path[0], down=test_path[1], char="#")
result *= hit_trees
print(f"Result: {result}")
assert result == 7812180000
| 32.984848
| 94
| 0.521819
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 429
| 0.19706
|
fbbc3f98679b551d2bd048c8773e0364748a4e51
| 2,333
|
py
|
Python
|
test/test_format.py
|
GuyTuval/msgpack-python
|
8fb709f2e0438862020d8810fa70a81fb5dac7d4
|
[
"Apache-2.0"
] | 1,252
|
2015-01-05T18:18:10.000Z
|
2022-03-27T16:40:44.000Z
|
test/test_format.py
|
GuyTuval/msgpack-python
|
8fb709f2e0438862020d8810fa70a81fb5dac7d4
|
[
"Apache-2.0"
] | 298
|
2015-01-06T12:21:09.000Z
|
2022-03-11T23:57:58.000Z
|
test/test_format.py
|
GuyTuval/msgpack-python
|
8fb709f2e0438862020d8810fa70a81fb5dac7d4
|
[
"Apache-2.0"
] | 199
|
2015-01-09T04:33:00.000Z
|
2022-03-30T15:04:37.000Z
|
#!/usr/bin/env python
# coding: utf-8
from msgpack import unpackb
def check(src, should, use_list=0, raw=True):
assert unpackb(src, use_list=use_list, raw=raw, strict_map_key=False) == should
def testSimpleValue():
check(b"\x93\xc0\xc2\xc3", (None, False, True))
def testFixnum():
check(b"\x92\x93\x00\x40\x7f\x93\xe0\xf0\xff", ((0, 64, 127), (-32, -16, -1)))
def testFixArray():
check(b"\x92\x90\x91\x91\xc0", ((), ((None,),)))
def testFixRaw():
check(b"\x94\xa0\xa1a\xa2bc\xa3def", (b"", b"a", b"bc", b"def"))
def testFixMap():
check(
b"\x82\xc2\x81\xc0\xc0\xc3\x81\xc0\x80", {False: {None: None}, True: {None: {}}}
)
def testUnsignedInt():
check(
b"\x99\xcc\x00\xcc\x80\xcc\xff\xcd\x00\x00\xcd\x80\x00"
b"\xcd\xff\xff\xce\x00\x00\x00\x00\xce\x80\x00\x00\x00"
b"\xce\xff\xff\xff\xff",
(0, 128, 255, 0, 32768, 65535, 0, 2147483648, 4294967295),
)
def testSignedInt():
check(
b"\x99\xd0\x00\xd0\x80\xd0\xff\xd1\x00\x00\xd1\x80\x00"
b"\xd1\xff\xff\xd2\x00\x00\x00\x00\xd2\x80\x00\x00\x00"
b"\xd2\xff\xff\xff\xff",
(0, -128, -1, 0, -32768, -1, 0, -2147483648, -1),
)
def testRaw():
check(
b"\x96\xda\x00\x00\xda\x00\x01a\xda\x00\x02ab\xdb\x00\x00"
b"\x00\x00\xdb\x00\x00\x00\x01a\xdb\x00\x00\x00\x02ab",
(b"", b"a", b"ab", b"", b"a", b"ab"),
)
check(
b"\x96\xda\x00\x00\xda\x00\x01a\xda\x00\x02ab\xdb\x00\x00"
b"\x00\x00\xdb\x00\x00\x00\x01a\xdb\x00\x00\x00\x02ab",
("", "a", "ab", "", "a", "ab"),
raw=False,
)
def testArray():
check(
b"\x96\xdc\x00\x00\xdc\x00\x01\xc0\xdc\x00\x02\xc2\xc3\xdd\x00"
b"\x00\x00\x00\xdd\x00\x00\x00\x01\xc0\xdd\x00\x00\x00\x02"
b"\xc2\xc3",
((), (None,), (False, True), (), (None,), (False, True)),
)
def testMap():
check(
b"\x96"
b"\xde\x00\x00"
b"\xde\x00\x01\xc0\xc2"
b"\xde\x00\x02\xc0\xc2\xc3\xc2"
b"\xdf\x00\x00\x00\x00"
b"\xdf\x00\x00\x00\x01\xc0\xc2"
b"\xdf\x00\x00\x00\x02\xc0\xc2\xc3\xc2",
(
{},
{None: False},
{True: False, None: False},
{},
{None: False},
{True: False, None: False},
),
)
| 25.358696
| 88
| 0.533648
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,037
| 0.444492
|
fbbc5a0d2746e83d3a087caa18124913a0952155
| 519
|
py
|
Python
|
src/main/resources/pytz/zoneinfo/Asia/Brunei.py
|
TheEin/swagger-maven-plugin
|
cf93dce2d5c8d3534f4cf8c612b11e2d2313871b
|
[
"Apache-2.0"
] | 65
|
2015-11-14T13:46:01.000Z
|
2021-08-14T05:54:04.000Z
|
lib/pytz/zoneinfo/Asia/Brunei.py
|
tjsavage/polymer-dashboard
|
19bc467f1206613f8eec646b6f2bc43cc319ef75
|
[
"CNRI-Python",
"Linux-OpenIB"
] | 13
|
2016-03-31T20:00:17.000Z
|
2021-08-20T14:52:31.000Z
|
lib/pytz/zoneinfo/Asia/Brunei.py
|
tjsavage/polymer-dashboard
|
19bc467f1206613f8eec646b6f2bc43cc319ef75
|
[
"CNRI-Python",
"Linux-OpenIB"
] | 20
|
2015-03-18T08:41:37.000Z
|
2020-12-18T02:58:30.000Z
|
'''tzinfo timezone information for Asia/Brunei.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Brunei(DstTzInfo):
'''Asia/Brunei timezone definition. See datetime.tzinfo for details'''
zone = 'Asia/Brunei'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1926,2,28,16,20,20),
d(1932,12,31,16,30,0),
]
_transition_info = [
i(27600,0,'LMT'),
i(27000,0,'BNT'),
i(28800,0,'BNT'),
]
Brunei = Brunei()
| 20.76
| 74
| 0.674374
| 318
| 0.612717
| 0
| 0
| 0
| 0
| 0
| 0
| 148
| 0.285164
|
fbbc6e1e7a5fe37234c1f6cec6987abfae3a501e
| 4,184
|
py
|
Python
|
raiden/tests/unit/test_messages.py
|
ConsenSysMesh/raiden
|
76510e5535fa0a1ceb26107b560f805f3d7d26d9
|
[
"MIT"
] | 3
|
2017-04-24T01:09:28.000Z
|
2017-05-26T18:32:34.000Z
|
raiden/tests/unit/test_messages.py
|
ConsenSysMesh/raiden
|
76510e5535fa0a1ceb26107b560f805f3d7d26d9
|
[
"MIT"
] | 1
|
2021-10-31T12:41:15.000Z
|
2021-10-31T12:41:15.000Z
|
raiden/tests/unit/test_messages.py
|
isabella232/raiden
|
76510e5535fa0a1ceb26107b560f805f3d7d26d9
|
[
"MIT"
] | 1
|
2021-10-31T12:05:52.000Z
|
2021-10-31T12:05:52.000Z
|
# -*- coding: utf-8 -*-
import pytest
from raiden.messages import Ping, Ack, decode, Lock, MediatedTransfer
from raiden.utils import make_privkey_address, sha3
PRIVKEY, ADDRESS = make_privkey_address()
def test_signature():
ping = Ping(nonce=0)
ping.sign(PRIVKEY, ADDRESS)
assert ping.sender == ADDRESS
def test_encoding():
ping = Ping(nonce=0)
ping.sign(PRIVKEY, ADDRESS)
decoded_ping = decode(ping.encode())
assert isinstance(decoded_ping, Ping)
assert decoded_ping.sender == ADDRESS == ping.sender
assert ping.nonce == decoded_ping.nonce
assert ping.signature == decoded_ping.signature
assert ping.cmdid == decoded_ping.cmdid
assert ping.hash == decoded_ping.hash
def test_hash():
ping = Ping(nonce=0)
ping.sign(PRIVKEY, ADDRESS)
data = ping.encode()
msghash = sha3(data)
decoded_ping = decode(data)
assert sha3(decoded_ping.encode()) == msghash
def test_ack():
echo = sha3(PRIVKEY)
ack = Ack(ADDRESS, echo)
assert ack.echo == echo
data = ack.encode()
msghash = sha3(data)
decoded_ack = decode(data)
assert decoded_ack.echo == ack.echo
assert decoded_ack.sender == ack.sender
assert sha3(decoded_ack.encode()) == msghash
def test_mediated_transfer():
nonce = balance = 1
token = recipient = target = initiator = ADDRESS
hashlock = locksroot = sha3(ADDRESS)
amount = expiration = 1
fee = 0
lock = Lock(amount, expiration, hashlock)
mediated_transfer = MediatedTransfer(
1, # TODO: fill in identifier
nonce,
token,
balance,
recipient,
locksroot,
lock,
target,
initiator,
fee,
)
assert roundtrip_serialize_mediated_transfer(mediated_transfer)
def make_lock_with_amount(amount):
return Lock(amount, 1, "a" * 32)
def make_mediated_transfer_with_amount(amount):
return MediatedTransfer(
0,
1,
ADDRESS,
amount,
ADDRESS,
"",
make_lock_with_amount(amount),
ADDRESS,
ADDRESS,
0
)
def make_mediated_transfer_with_nonce(nonce):
return MediatedTransfer(
0,
nonce,
ADDRESS,
1,
ADDRESS,
"",
make_lock_with_amount(1),
ADDRESS,
ADDRESS,
0
)
def make_mediated_transfer_with_fee(fee):
return MediatedTransfer(
0,
1,
ADDRESS,
1,
ADDRESS,
"",
make_lock_with_amount(1),
ADDRESS,
ADDRESS,
fee
)
def roundtrip_serialize_mediated_transfer(mediated_transfer):
mediated_transfer.sign(PRIVKEY, ADDRESS)
decoded_mediated_transfer = decode(mediated_transfer.encode())
assert decoded_mediated_transfer == mediated_transfer
return True
@pytest.mark.parametrize("amount", [-1, 2 ** 256])
@pytest.mark.parametrize("make", [make_lock_with_amount,
make_mediated_transfer_with_amount])
def test_amount_out_of_bounds(amount, make):
with pytest.raises(ValueError):
make(amount)
@pytest.mark.parametrize("amount", [0, 2 ** 256 - 1])
def test_mediated_transfer_amount_min_max(amount):
mediated_transfer = make_mediated_transfer_with_amount(amount)
assert roundtrip_serialize_mediated_transfer(mediated_transfer)
@pytest.mark.parametrize("nonce", [2 ** 64])
def test_mediated_transfer_nonce_out_of_bounds(nonce):
with pytest.raises(ValueError):
make_mediated_transfer_with_nonce(nonce)
@pytest.mark.parametrize("nonce", [2 ** 64 - 1])
def test_mediated_transfer_nonce_max(nonce):
mediated_transfer = make_mediated_transfer_with_nonce(nonce)
assert roundtrip_serialize_mediated_transfer(mediated_transfer)
@pytest.mark.parametrize("fee", [2 ** 256])
def test_mediated_transfer_fee_out_of_bounds(fee):
with pytest.raises(ValueError):
make_mediated_transfer_with_fee(fee)
@pytest.mark.parametrize("fee", [0, 2 ** 256 - 1])
def test_mediated_transfer_fee_min_max(fee):
mediated_transfer = make_mediated_transfer_with_fee(fee)
assert roundtrip_serialize_mediated_transfer(mediated_transfer)
| 25.668712
| 70
| 0.675908
| 0
| 0
| 0
| 0
| 1,328
| 0.3174
| 0
| 0
| 104
| 0.024857
|
fbbdd496f48c965142da201326e11323ba150849
| 6,428
|
py
|
Python
|
python/helpful_scripts/circle_packing.py
|
Oilgrim/ivs_sim
|
95dc017ef2aec32173e73dc397ba00177d4f92ce
|
[
"MIT"
] | null | null | null |
python/helpful_scripts/circle_packing.py
|
Oilgrim/ivs_sim
|
95dc017ef2aec32173e73dc397ba00177d4f92ce
|
[
"MIT"
] | null | null | null |
python/helpful_scripts/circle_packing.py
|
Oilgrim/ivs_sim
|
95dc017ef2aec32173e73dc397ba00177d4f92ce
|
[
"MIT"
] | 1
|
2019-08-07T03:16:47.000Z
|
2019-08-07T03:16:47.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 5 12:41:09 2017
@author: lracuna
"""
#!/usr/bin/env python
"""
This program uses a simple implementation of the ADMM algorithm to solve
the circle packing problem.
We solve
minimize 1
subject to |x_i - x_j| > 2R,
R < x_i, y_i < L - R
We put a bunch of equal radius balls inside a square.
Type --help to see the options of the program.
Must create a directory .figs.
Guilherme Franca
guifranca@gmail.com
November 2015
"""
import sys, os, optparse
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle, Circle
def nonoverlap(a, i, omega, R):
"""No overlap constraint.
This function receives a 1D array which is the row of a matrix.
Each element is a vector. i is which row we are passing.
"""
nonzeroi = np.nonzero(omega[i])[0]
x = a
n1, n2 = a[nonzeroi]
vec = n1 - n2
norm = np.linalg.norm(vec)
if norm < 2*R: # push the balls appart
disp = R - norm/2
x[nonzeroi] = n1 + (disp/norm)*vec, n2 - (disp/norm)*vec
return x
def insidebox(a, i, omega, R, L):
"""Keep the balls inside the box."""
j = np.nonzero(omega[i])[0][0]
x = a
n = a[j]
if n[0] < R:
x[j,0] = R
elif n[0] > L-R:
x[j,0] = L-R
if n[1] < R:
x[j,1] = R
elif n[1] > L-R:
x[j,1] = L-R
return x
def make_graph(t, z, imgpath, R, L):
"""Create a plot of a given time.
z contains a list of vectors with the position of the center of
each ball. t is the iteration time.
"""
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('t=%i' % t)
ax.add_patch(Rectangle((0,0), L, L, fill=False,
linestyle='solid', linewidth=2, color='blue'))
plt.xlim(-0.5, L+0.5)
plt.ylim(-0.5, L+0.5)
plt.axes().set_aspect('equal')
colors = iter(plt.cm.prism_r(np.linspace(0,1,N)))
for x in z:
c = next(colors)
ax.add_patch(Circle(x, radius=R, color=c, alpha=.6))
plt.axis('off')
fig.tight_layout()
fig.savefig(imgpath % t, format='png')
print imgpath
plt.close(fig)
def make_omega(N):
"""Topology matrix
Columns label variables, and rows the functions.
You must order all the "nonoverlap" functions first
and the "inside box" function last.
We also create a vectorized version of omega.
"""
o1 = []
o2 = []
one = np.array([1,1])
zero = np.array([0,0])
# TODO: this is the most expensive way of creating these matrices.
# Maybe improve this.
for i in range(N):
for j in range(i+1, N):
row1 = [0]*N
row1[i], row1[j] = 1, 1
o1.append(row1)
row2 = [zero]*N
row2[i], row2[j] = one, one
o2.append(row2)
for i in range(N):
row = [0]*N
row[i] = 1
o1.append(row)
row2 = [zero]*N
row2[i] = one
o2.append(row2)
o1 = np.array(o1)
o2 = np.array(o2)
return o1, o2
###############################################################################
if __name__ == '__main__':
usg = "%prog -L box -R radius -N balls -M iter [-r rate -o output]"
dsc = "Use ADMM optimization algorithm to fit balls into a box."
parser = optparse.OptionParser(usage=usg, description=dsc)
parser.add_option('-L', '--box_size', action='store', dest='L',
type='float', help='size of the box')
parser.add_option('-R', '--radius', action='store', dest='R',
type='float', help='radius of the balls')
parser.add_option('-N', '--num_balls', action='store', dest='N',
type='int', help='number of balls')
parser.add_option('-M', '--iter', action='store', dest='M',
type='int', help='number of iterations')
parser.add_option('-r', '--rate', action='store', dest='rate',
default=10, type='float', help='frame rate for the movie')
parser.add_option('-o', '--output', action='store', dest='out',
default='out.mp4', type='str', help='movie output file')
parser.add_option('-a', '--alpha', action='store', dest='alpha',
default=0.05, type='float', help='alpha parameter')
parser.add_option('-p', '--rho', action='store', dest='rho',
default=0.5, type='float', help='rho parameter')
options, args = parser.parse_args()
if not options.L:
parser.error("-L option is mandatory")
if not options.R:
parser.error("-R option is mandatory")
if not options.N:
parser.error("-N option is mandatory")
if not options.M:
parser.error("-M option is mandatory")
# initialization
L = options.L
R = options.R
N = options.N
max_iter = options.M
rate = options.rate
output = options.out
omega, omega_vec = make_omega(N)
num_funcs = len(omega)
num_vars = len(omega[0])
s = (num_funcs, num_vars, 2)
alpha = float(options.alpha)
x = np.ones(s)*omega_vec
z = np.random.random_sample(size=(num_vars, 2))+\
(L/2.)*np.ones((num_vars, 2))
zz = np.array([z]*num_funcs)*omega_vec
u = np.ones(s)*omega_vec
n = np.ones(s)*omega_vec
rho = float(options.rho)*omega_vec
# performing optimization
if not os.path.exists('.figs'):
os.makedirs('.figs')
os.system("rm -rf .figs/*")
imgpath = '.figs/fig%04d.png'
for k in range(max_iter):
n = zz - u
# proximal operator
for i in range(num_funcs):
if i < num_funcs - num_vars:
x[i] = nonoverlap(n[i], i, omega, R)
else:
x[i] = insidebox(n[i], i, omega, R, L)
m = x + u
z = np.sum(rho*m, axis=0)/np.sum(rho, axis=0)
zz = np.array([z]*num_funcs)*omega_vec
u = u + alpha*(x-zz)
if k == (max_iter-1):
make_graph(k, z, imgpath, R, L)
print "doing %i/%i" % (k, max_iter)
print "Generating animation '%s' ..." % (output)
os.system("ffmpeg -y -r %f -sameq -i %s %s > /dev/null 2>&1" % \
(rate, imgpath, output))
#os.system("rm -rf .figs/*")
#os.rmdir('.figs')
print "Done!"
print "Playing ..."
os.system("mplayer %s > /dev/null 2>&1" % output)
| 31.665025
| 79
| 0.549471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,245
| 0.349253
|
fbc1e336b5068fcf3a34a0e6490251bfd7d85954
| 7,658
|
py
|
Python
|
models/train/train_seq2seq.py
|
Chucooleg/alfred
|
250cdc8b1e75dd6acb9e20d3c616beec63307a46
|
[
"MIT"
] | 1
|
2021-07-19T01:58:51.000Z
|
2021-07-19T01:58:51.000Z
|
models/train/train_seq2seq.py
|
Chucooleg/alfred
|
250cdc8b1e75dd6acb9e20d3c616beec63307a46
|
[
"MIT"
] | null | null | null |
models/train/train_seq2seq.py
|
Chucooleg/alfred
|
250cdc8b1e75dd6acb9e20d3c616beec63307a46
|
[
"MIT"
] | null | null | null |
import os
import sys
import random
sys.path.append(os.path.join(os.environ['ALFRED_ROOT']))
sys.path.append(os.path.join(os.environ['ALFRED_ROOT'], 'models'))
import torch
import pprint
import json
from data.preprocess import Dataset
from importlib import import_module
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
from models.utils.helper_utils import optimizer_to
if __name__ == '__main__':
# parser
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
# settings
parser.add_argument('--seed', help='random seed', default=123, type=int)
parser.add_argument('--data', help='dataset folder', default='data/json_feat_2.1.0')
parser.add_argument('--splits', help='json file containing train/dev/test splits', default='data/splits/may17.json')
parser.add_argument('--preprocess', help='store preprocessed data to json files', action='store_true')
parser.add_argument('--pp_folder', help='folder name for preprocessed data')
parser.add_argument('--object_vocab', help='object_vocab version, should be file with .object_vocab ending. default is none', default='none')
parser.add_argument('--save_every_epoch', help='save model after every epoch (warning: consumes a lot of space)', action='store_true')
parser.add_argument('--model', help='model to use', required=True)
parser.add_argument('--gpu', help='use gpu', action='store_true')
parser.add_argument('--dout', help='where to save model', default='exp/model:{model}')
parser.add_argument('--resume', help='load a checkpoint')
# hyper parameters
parser.add_argument('--batch', help='batch size', default=8, type=int)
parser.add_argument('--epoch', help='number of epochs', default=20, type=int)
parser.add_argument('--lr', help='optimizer learning rate', default=1e-4, type=float)
parser.add_argument('--decay_epoch', help='num epoch to adjust learning rate', default=10, type=int)
parser.add_argument('--dhid', help='hidden layer size', default=512, type=int)
parser.add_argument('--dframe', help='image feature vec size', default=2500, type=int)
parser.add_argument('--demb', help='language embedding size', default=100, type=int)
parser.add_argument('--pframe', help='image pixel size (assuming square shape eg: 300x300)', default=300, type=int)
parser.add_argument('--mask_loss_wt', help='weight of mask loss', default=1., type=float)
parser.add_argument('--action_loss_wt', help='weight of action loss', default=1., type=float)
parser.add_argument('--subgoal_aux_loss_wt', help='weight of subgoal completion predictor', default=0., type=float)
parser.add_argument('--pm_aux_loss_wt', help='weight of progress monitor', default=0., type=float)
# architecture ablations
parser.add_argument('--encoder_addons', type=str, default='none', choices=['none', 'max_pool_obj', 'biattn_obj'])
parser.add_argument('--decoder_addons', type=str, default='none', choices=['none', 'aux_loss'])
parser.add_argument('--object_repr', type=str, default='type', choices=['none', 'type', 'instance'])
parser.add_argument('--reweight_aux_bce', help='reweight binary CE for auxiliary tasks', action='store_true')
# target
parser.add_argument('--predict_goal_level_instruction', help='predict abstract single goal level instruction for entire task.', action='store_true')
# dropouts
parser.add_argument('--zero_goal', help='zero out goal language', action='store_true')
parser.add_argument('--zero_instr', help='zero out step-by-step instr language', action='store_true')
parser.add_argument('--act_dropout', help='dropout rate for action input sequence', default=0., type=float)
parser.add_argument('--lang_dropout', help='dropout rate for language (goal + instr)', default=0., type=float)
parser.add_argument('--input_dropout', help='dropout rate for concatted input feats', default=0., type=float)
parser.add_argument('--vis_dropout', help='dropout rate for Resnet feats', default=0.3, type=float)
parser.add_argument('--hstate_dropout', help='dropout rate for LSTM hidden states during unrolling', default=0.3, type=float)
parser.add_argument('--attn_dropout', help='dropout rate for attention', default=0., type=float)
parser.add_argument('--actor_dropout', help='dropout rate for actor fc', default=0., type=float)
parser.add_argument('--word_dropout', help='dropout rate for word fc', default=0., type=float)
# other settings
parser.add_argument('--train_teacher_forcing', help='use gpu', action='store_true')
parser.add_argument('--train_student_forcing_prob', help='bernoulli probability', default=0.1, type=float)
parser.add_argument('--temp_no_history', help='use gpu', action='store_true')
# debugging
parser.add_argument('--fast_epoch', help='fast epoch during debugging', action='store_true')
parser.add_argument('--dataset_fraction', help='use fraction of the dataset for debugging (0 indicates full size)', default=0, type=int)
# args and init
args = parser.parse_args()
args.dout = args.dout.format(**vars(args))
torch.manual_seed(args.seed)
# check if dataset has been preprocessed
if not os.path.exists(os.path.join(args.data, "%s.vocab" % args.pp_folder)) and not args.preprocess:
raise Exception("Dataset not processed; run with --preprocess")
# make output dir
pprint.pprint(args)
if not os.path.isdir(args.dout):
os.makedirs(args.dout)
# load train/valid/tests splits
with open(args.splits) as f:
splits = json.load(f)
# create sanity check split as a small sample of train set
if not 'train_sanity' in splits:
print('Creating train_sanity split. Will save an updated split file.')
splits['train_sanity'] = random.sample(splits['train'], k=len(splits['valid_seen']))
with open(args.splits, 'w') as f:
json.dump(splits, f)
pprint.pprint({k: len(v) for k, v in splits.items()})
# preprocess and save
if args.preprocess:
print("\nPreprocessing dataset and saving to %s folders ... This will take a while. Do this once as required." % args.pp_folder)
dataset = Dataset(args, None)
dataset.preprocess_splits(splits, args.pp_folder)
vocab = torch.load(os.path.join(args.dout, "%s.vocab" % args.pp_folder))
else:
vocab = torch.load(os.path.join(args.data, "%s.vocab" % args.pp_folder))
# load object vocab
if args.object_vocab != 'none':
object_vocab = torch.load(os.path.join(args.data, '%s' % args.object_vocab))
else:
object_vocab = None
# load model
M = import_module('model.{}'.format(args.model))
if args.resume:
print("Loading: " + args.resume)
model, optimizer, start_epoch, start_iters = M.Module.load(args.resume)
end_epoch = args.epoch
if start_epoch >= end_epoch:
print('Checkpoint already finished {}/{} epochs.'.format(start_epoch, end_epoch))
sys.exit(0)
else:
print("Restarting at epoch {}/{}".format(start_epoch, end_epoch-1))
else:
model = M.Module(args, vocab, object_vocab)
optimizer = None
start_epoch = 0
start_iters = None
end_epoch = args.epoch
# to gpu
if args.gpu:
model = model.to(torch.device('cuda'))
model.demo_mode = False
if not optimizer is None:
optimizer_to(optimizer, torch.device('cuda'))
# start train loop
model.run_train(splits, optimizer=optimizer, start_epoch=start_epoch, end_epoch=end_epoch, start_iters=start_iters)
| 53.180556
| 152
| 0.69953
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,988
| 0.39018
|
fbc201d1881ba8593f71b1f223ddd8ebc3cad88f
| 474
|
py
|
Python
|
tests/search_test.py
|
martingaston/billy-search
|
60bdfa0cf740675c3afd86ad68f83755c9cd6596
|
[
"MIT"
] | null | null | null |
tests/search_test.py
|
martingaston/billy-search
|
60bdfa0cf740675c3afd86ad68f83755c9cd6596
|
[
"MIT"
] | 17
|
2018-11-28T19:20:01.000Z
|
2019-01-06T18:00:58.000Z
|
tests/search_test.py
|
martingaston/billy-search
|
60bdfa0cf740675c3afd86ad68f83755c9cd6596
|
[
"MIT"
] | null | null | null |
import pytest
from billy.utils.search import google_book_search
class TestGoogleBookSearch(object):
def test_search_returns_200(self, mock):
"""Ensure a basic search returns a 200 request"""
assert google_book_search("Harry Potter")["status"] == 200
def test_search_body_returns_dict(self, mock):
"""Ensure we're getting a JSON dict back from google_book_search()"""
assert type(google_book_search("Harry Potter")["body"]) is dict
| 36.461538
| 77
| 0.721519
| 407
| 0.85865
| 0
| 0
| 0
| 0
| 0
| 0
| 160
| 0.337553
|
fbc22fb24183b91ed5d90aa53daf5acd378bad49
| 2,981
|
py
|
Python
|
src/config.py
|
Clloud/MostPopularRoute
|
fd89c103b1635e4028913263fb667949d35c3986
|
[
"MIT"
] | 7
|
2019-08-22T06:34:02.000Z
|
2021-12-20T00:00:36.000Z
|
src/config.py
|
Clloud/MostPopularRoute
|
fd89c103b1635e4028913263fb667949d35c3986
|
[
"MIT"
] | null | null | null |
src/config.py
|
Clloud/MostPopularRoute
|
fd89c103b1635e4028913263fb667949d35c3986
|
[
"MIT"
] | 2
|
2022-01-15T11:48:57.000Z
|
2022-02-10T05:24:38.000Z
|
import math
class Config_1:
DATASET_ROOT_DIR = '../data/test1/Data' # The data set root directory
DATASET_SCALE = 0 # How many users' trajectory data are choosed
TRAJACTORY_SCALE = 20 # How many trajectories are choosed per user
RANGE = { # To pick trajectory points within the range
'status': False
}
GROUP_SIZE_THRESHOLD = 3 # group size threshold φ
COHERENCE_THRESHOLD = 0.4 # coherence threshold τ
SCALING_FACTOR = 1.5 # scaling factor δ
TURNING_ALPHA = 5 # tuning parameter α
TURNING_BETA = 2 # tuning parameter β
RADIUS = SCALING_FACTOR * \
((-math.log(COHERENCE_THRESHOLD)) ** (1 / TURNING_ALPHA))
class Config_2:
DATASET_ROOT_DIR = '../data/test2/Data' # The data set root directory
DATASET_SCALE = 3 # How many users' trajectory data are choosed
TRAJACTORY_SCALE = 4 # How many trajectories are choosed per user
RANGE = { # To pick trajectory points within the range
'status': True,
'longitude_upper_bound': 116.32,
'longitude_lower_bound': 116.304,
'latitude_upper_bound': 40.018,
'latitude_lower_bound': 40.004,
}
GROUP_SIZE_THRESHOLD = 3 # group size threshold φ
COHERENCE_THRESHOLD = 0.99 # coherence threshold τ
SCALING_FACTOR = 15e-4 # scaling factor δ
TURNING_ALPHA = 5 # tuning parameter α
TURNING_BETA = 2 # tuning parameter β
RADIUS = SCALING_FACTOR * \
((-math.log(COHERENCE_THRESHOLD)) ** (1 / TURNING_ALPHA))
class Config_3:
DATASET_ROOT_DIR = '../data/test3/Data' # The data set root directory
DATASET_SCALE = 0 # How many users' trajectory data are choosed
TRAJACTORY_SCALE = 20 # How many trajectories are choosed per user
RANGE = { # To pick trajectory points within the range
'status': False
}
GROUP_SIZE_THRESHOLD = 3 # group size threshold φ
COHERENCE_THRESHOLD = 0.49 # coherence threshold τ
SCALING_FACTOR = 1.1 # scaling factor δ
TURNING_ALPHA = 5 # tuning parameter α
TURNING_BETA = 2 # tuning parameter β
RADIUS = SCALING_FACTOR * \
((-math.log(COHERENCE_THRESHOLD)) ** (1 / TURNING_ALPHA))
class Config(Config_3):
__attr__ = ['DATASET_ROOT_DIR', 'DATASET_SCALE', 'TRAJACTORY_SCALE', 'RANGE',
'GROUP_SIZE_THRESHOLD', 'COHERENCE_THRESHOLD', 'SCALING_FACTOR',
'TURNING_ALPHA', 'TURNING_BETA', 'RADIUS']
def __str__(self):
s = ""
for attr in self.__attr__:
s += attr + ' ' + str(getattr(self, attr)) + '\n'
return s
def __repr__(self):
return self.__str__()
| 40.835616
| 85
| 0.578665
| 2,974
| 0.992657
| 0
| 0
| 0
| 0
| 0
| 0
| 1,153
| 0.384846
|
fbc2e6fec230818b054f8a5d8e0894e49655314a
| 766
|
py
|
Python
|
Python/first01.py
|
praseedpai/WhetYourApettite
|
d71780f5b52401eea71e631ba030270fca5d6005
|
[
"MIT"
] | null | null | null |
Python/first01.py
|
praseedpai/WhetYourApettite
|
d71780f5b52401eea71e631ba030270fca5d6005
|
[
"MIT"
] | null | null | null |
Python/first01.py
|
praseedpai/WhetYourApettite
|
d71780f5b52401eea71e631ba030270fca5d6005
|
[
"MIT"
] | null | null | null |
import sys
from sys import exit
if len(sys.argv) == 1 :
print ("No command line argument" )
sys.exit()
#else :
# print ("rest of the program ")
#numbers = sys.argv[1:]
#print (sorted(numbers, key=lambda x: float(x)))
numbers = []
i=1
n= len(sys.argv)
while ( i < n ):
numbers.append(sys.argv[i])
i=i+1
# bubbleSort(numbers)
n = len(numbers)
# Traverse through all array elements
for i in range(n):
# Last i elements are already in place
for j in range(0, n-i-1):
# traverse the array from 0 to n-i-1
# Swap if the element found is greater
# than the next element
if numbers[j] > numbers[j+1] :
numbers[j], numbers[j+1] = numbers[j+1], numbers[j]
print(numbers)
| 15.632653
| 67
| 0.590078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 336
| 0.438642
|
fbc2ffbc7159afa22106299ea24d3e4ca2b28846
| 4,553
|
py
|
Python
|
library/gui.py
|
bwbryant1/Library
|
be8f9bb4fef448ca8630cdae36136bf16b691412
|
[
"MIT"
] | null | null | null |
library/gui.py
|
bwbryant1/Library
|
be8f9bb4fef448ca8630cdae36136bf16b691412
|
[
"MIT"
] | null | null | null |
library/gui.py
|
bwbryant1/Library
|
be8f9bb4fef448ca8630cdae36136bf16b691412
|
[
"MIT"
] | null | null | null |
from . import dbFuncs
import sys, os
import pkg_resources
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication, qApp, QHBoxLayout, QMainWindow, QAction, QMessageBox, QFileDialog, QPushButton
from PyQt5.QtGui import QIcon
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow,self).__init__()
self.resource_package = __name__
self.iconsFolder = 'icons'
self.setWindowTitle('Library Manager 0.1')
self.setMinimumSize(800,400)
self.createActions()
self.createMenuBar()
self.createToolBar()
def getFileResource(self,name,type):
if type == 'icon':
self.resourceFolder = self.iconsFolder
resource_path = '/'.join((self.resourceFolder, name))
return pkg_resources.resource_filename(self.resource_package, resource_path)
def createActions(self):
self.newAct = QAction(QIcon(self.getFileResource('sync.svg','icon')),'&New Library', self)
self.newAct.setShortcut('Ctrl+n')
self.newAct.setStatusTip('New Library')
self.newAct.triggered.connect(self.newDialog)
self.openAct = QAction(QIcon(self.getFileResource('gear.svg','icon')),'&Open Library', self)
self.openAct.setShortcut('Ctrl+o')
self.openAct.setStatusTip('Open Library')
self.openAct.triggered.connect(self.openDialog)
self.exitAct = QAction(QIcon(self.getFileResource('x.svg','icon')),'&Exit LibMan', self)
self.exitAct.setShortcut('Ctrl+Q')
self.exitAct.setStatusTip('Exit application')
self.exitAct.triggered.connect(qApp.quit)
self.aboutAct = QAction("&About", self,
statusTip="Show the application's About box",
triggered=self.about)
def createMenuBar(self):
menu = self.menuBar()
menu.setNativeMenuBar(False)
fileMenu = menu.addMenu('File')
fileMenu.addAction(self.newAct)
fileMenu.addAction('Recent')
fileMenu.addSeparator()
fileMenu.addAction('Import items')
fileMenu.addAction('Export items')
fileMenu.addAction(self.openAct)
fileMenu.addSeparator()
fileMenu.addAction(self.exitAct)
editMenu = menu.addMenu('Edit')
editMenu.addAction('Undo')
editMenu.addAction('Redo')
editMenu.addAction('Add Selected to Reading List')
editMenu.addAction('Delete Selected')
editMenu.addAction('Preferences')
viewMenu = menu.addMenu('View')
viewMenu.addAction('Hide Sidebar')
viewMenu.addAction('Increase List Size')
viewMenu.addAction('Decrease List Size')
viewMenu.addAction('Go Fullscreen')
aboutMenu = menu.addMenu('About')
aboutMenu.addAction(self.aboutAct)
def createToolBar(self):
self.toolbar = self.addToolBar("Toolbar")
self.toolbar.addAction(self.openAct)
self.toolbar.addSeparator()
self.toolbar.addAction(self.exitAct)
def setStatusBar(self,msg):
self.statusBar().showMessage(str(msg))
def about(self):
QMessageBox.about(self, "About Library Manager",
"The <b>Library Manger</b> app was made for CYEN 481"
"<br>Its Authors are: Brandon Bryant, Caroline Fontenot, and Sai Spurthy")
def openDialog(self):
fileName = QFileDialog.getOpenFileName(self, 'Open file')
libPath = fileName[0]
msg = QMessageBox.information(self, 'Open Library', "Opening your library located at:" + libPath , QMessageBox.Ok | QMessageBox.Cancel)
if msg == QMessageBox.Ok:
self.listLibrary(libPath)
else:
print('Cancel clicked.')
def newDialog(self):
fileName, filter = QFileDialog.getSaveFileName(self, 'Save file', '', filter ="Allfiles (*)")
locationOf = os.getcwd()
nameOf = os.path.basename(fileName)
dbFuncs.makeNewLibrary(nameOf,locationOf)
def listLibrary(self,libPath):
bookListTuple = dbFuncs.listBooksInLibrary(libPath)['bookListTuple']
listLen = len(bookListTuple)
#TESTING PURPOSE
for _book,_bookmark in bookListTuple:
print("Title: "+ _book + " | Bookmark: "+ str(_bookmark))
def launch():
app = None
if ( not QApplication.instance() ):
# create a new application
app = QApplication(sys.argv)
window = MainWindow()
window.setStatusBar("Welcome Back")
window.showMaximized()
window.show()
app.exec_()
| 36.717742
| 143
| 0.648144
| 4,038
| 0.886888
| 0
| 0
| 0
| 0
| 0
| 0
| 795
| 0.17461
|
fbc4f59dc823de1070c620320ec7ff2dee6fbd35
| 135
|
py
|
Python
|
du/ps_utils.py
|
diogo149/doo
|
d83a1715fb9d4e5eac9f5d3d384a45cfc26fec2f
|
[
"MIT"
] | 1
|
2016-11-17T06:34:39.000Z
|
2016-11-17T06:34:39.000Z
|
du/ps_utils.py
|
diogo149/doo
|
d83a1715fb9d4e5eac9f5d3d384a45cfc26fec2f
|
[
"MIT"
] | null | null | null |
du/ps_utils.py
|
diogo149/doo
|
d83a1715fb9d4e5eac9f5d3d384a45cfc26fec2f
|
[
"MIT"
] | null | null | null |
import os
import psutil
import time
def process_time():
p = psutil.Process(os.getpid())
return time.time() - p.create_time()
| 15
| 40
| 0.688889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
fbc4fa09de1f509b411b286d8439548aa1647a45
| 544
|
py
|
Python
|
config.py
|
Laikos38/rockopy
|
3816ebb8466a27c65e76a387abc36c96df688ef7
|
[
"CC0-1.0"
] | null | null | null |
config.py
|
Laikos38/rockopy
|
3816ebb8466a27c65e76a387abc36c96df688ef7
|
[
"CC0-1.0"
] | null | null | null |
config.py
|
Laikos38/rockopy
|
3816ebb8466a27c65e76a387abc36c96df688ef7
|
[
"CC0-1.0"
] | null | null | null |
# =================================================
# SERVER CONFIGURATIONS
# =================================================
CLIENT_ID=''
CLIENT_SECRET=''
REDIRECT_URI='http://ROCKOPY/'
# =================================================
# SERVER CONFIGURATIONS
# =================================================
SERVER_IP = "127.0.0.1"
SERVER_PORT = 5043
# =================================================
# OTHER OPTIONS
# =================================================
# how many track search results show:
TRACKS_TO_SEARCH = 5
| 24.727273
| 51
| 0.318015
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 436
| 0.801471
|
fbc9208f2d120f0ad2e9b2264fc8cd7812726bef
| 1,356
|
py
|
Python
|
upcfcardsearch/c269.py
|
ProfessorSean/Kasutamaiza
|
7a69a69258f67bbb88bebbac6da4e6e1434947e6
|
[
"MIT"
] | null | null | null |
upcfcardsearch/c269.py
|
ProfessorSean/Kasutamaiza
|
7a69a69258f67bbb88bebbac6da4e6e1434947e6
|
[
"MIT"
] | null | null | null |
upcfcardsearch/c269.py
|
ProfessorSean/Kasutamaiza
|
7a69a69258f67bbb88bebbac6da4e6e1434947e6
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
from discord.utils import get
class c269(commands.Cog, name="c269"):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name='Vir_the_True_Elementalist', aliases=['c269', 'Elementalist_1', 'Distasta_Master_1'])
async def example_embed(self, ctx):
embed = discord.Embed(title='Vir the True Elementalist',
color=0xFDE68A)
embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2360695.jpg')
embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3 (Elementalist/Distasta Master)', inline=True)
embed.add_field(name='Type (Attribute)', value='Spellcaster/Normal (LIGHT)', inline=False)
embed.add_field(name='Level (ATK/DEF)', value='3 (1200/950)', inline=False)
embed.add_field(name='Lore Text', value='Some say that whenever a disaster occurs, Vir is near by practicing his magic. However, if Vir ever learns the secrets of the Book of Natural Disasters, with knowledge of the ancient scriptures, he will be able to tame the Distasta Masters and bring the world into a new age of doom.', inline=False)
embed.set_footer(text='Set Code: GMMP')
await ctx.send(embed=embed)
def setup(bot: commands.Bot):
bot.add_cog(c269(bot))
| 56.5
| 348
| 0.702802
| 1,219
| 0.898968
| 0
| 0
| 1,109
| 0.817847
| 992
| 0.731563
| 632
| 0.466077
|
fbc9265e10993b34830de5472d64bcc90ad75783
| 6,116
|
py
|
Python
|
test/method_comparison.py
|
kiyami/stad
|
492f5d4467553159ba11a17e46bae43e19fd7b6a
|
[
"MIT"
] | 2
|
2020-03-21T20:36:20.000Z
|
2021-09-02T20:02:17.000Z
|
test/method_comparison.py
|
kiyami/stad
|
492f5d4467553159ba11a17e46bae43e19fd7b6a
|
[
"MIT"
] | null | null | null |
test/method_comparison.py
|
kiyami/stad
|
492f5d4467553159ba11a17e46bae43e19fd7b6a
|
[
"MIT"
] | null | null | null |
from soad import AsymmetricData as asyd
import matplotlib.pyplot as plt
# This script is prepared for showing the difference between methods of handling asymmetric errors.
class Data:
control_variable_parameters = [10.0, 1.0, 1.0]
control_variable = []
variable_list = []
def __init__(self, mu, sigma_n, sigma_p):
self.mu = float(mu)
self.sigma_n = float(sigma_n)
self.sigma_p = float(sigma_p)
self.avg_std = (self.sigma_n + self.sigma_p) * 0.5
def get_params(self):
return [float(self.mu), float(self.sigma_n), float(self.sigma_p)]
@classmethod
def set_control_variable(cls):
cls.control_variable = Data(*cls.control_variable_parameters)
@classmethod
def print_variables(cls):
for variable in cls.variable_list:
print(variable.get_params())
@staticmethod
def calculate_asym_index(sigma_n, sigma_p):
sigma_n = float(sigma_n)
sigma_p = float(sigma_p)
return float((sigma_p - sigma_n) / (sigma_p + sigma_n))
@staticmethod
def calculate_sigma_p(sigma_n, asym_index):
sigma_n = float(sigma_n)
asym_index = float(asym_index)
return float(sigma_n * (1.0 + asym_index) / (1.0 - asym_index))
@staticmethod
def calculate_sigma_n(sigma_p, asym_index):
sigma_p = float(sigma_p)
asym_index = float(asym_index)
return float(sigma_p * (1.0 - asym_index) / (1.0 + asym_index))
def generate_single_variable(*args):
Data.variable_list.append(Data(*args))
def generate_control_variable(*args):
Data.control_variable = Data(*args)
def generate_multiple_variable():
n = 15
asym_index = 0.2
mu, sigma_n, sigma_p = Data.control_variable.get_params()
start = float(sigma_p)
stop = float(Data.calculate_sigma_p(sigma_n, asym_index))
step = (stop - start) / float(n)
for i in range(n+1):
temp_sigma_p = float(sigma_p) + (float(i)*float(step))
print("###### New sigma_p: ", temp_sigma_p)
generate_single_variable(mu, sigma_n, temp_sigma_p)
class AverageMethod:
result_list = []
@classmethod
def sum(cls, val_1):
val_2 = Data.control_variable
mu_result = val_1.mu + val_2.mu
std_result = (val_1.avg_std**2.0 + val_2.avg_std**2.0)**0.5
cls.result_list.append(Data(mu_result, std_result, std_result))
@classmethod
def mul(cls, val_1):
val_2 = Data.control_variable
mu_result = val_1.mu * val_2.mu
std_result = mu_result * ((val_1.avg_std / val_1.mu)**2.0 + (val_2.avg_std / val_2.mu)**2.0)**0.5
cls.result_list.append(Data(mu_result, std_result, std_result))
@classmethod
def print_results(cls):
print("Results for AverageMethod")
for result in cls.result_list:
print(result.get_params())
class MonteCarloMethod:
N = 50000
result_list = []
control_variable = []
@classmethod
def generate_control_variable(cls):
mu, sigma_n, sigma_p = Data.control_variable.get_params()
cls.control_variable = asyd(mu, sigma_n, sigma_p, N=cls.N)
@classmethod
def sum(cls, val):
if not cls.control_variable:
cls.generate_control_variable()
mu, sigma_n, sigma_p = val.get_params()
asym_val = asyd(mu, sigma_n, sigma_p, N=cls.N)
result = cls.control_variable + asym_val
result_val = [result.mu, result.sigma_n, result.sigma_p]
cls.result_list.append(result)
@classmethod
def mul(cls, val):
if not cls.control_variable:
cls.generate_control_variable()
mu, sigma_n, sigma_p = val.get_params()
asym_val = asyd(mu, sigma_n, sigma_p, N=cls.N)
result = cls.control_variable * asym_val
cls.result_list.append(result)
@classmethod
def print_results(cls):
print("Results for MonteCarloMethod")
for result in cls.result_list:
print([result.mu, result.sigma_n, result.sigma_p])
class CompareMethods:
methods = [AverageMethod, MonteCarloMethod]
@classmethod
def calculate_sum(cls):
for variable in Data.variable_list:
for method in cls.methods:
method.sum(variable)
@classmethod
def calculate_mul(cls):
for variable in Data.variable_list:
for method in cls.methods:
method.mul(variable)
@classmethod
def print_results(cls):
print("Result Comparison")
for method in cls.methods:
method.print_results()
# plotu düzelt
@classmethod
def plot_results(cls, save=True):
plt.clf()
fig, ax = plt.subplots(figsize=(6, 3))
plot_counter = 0
colors = ["deepskyblue", "tomato"]
ecolors = ["lightskyblue", "salmon"]
plot_shift_delta = 0.002
for method in cls.methods:
plot_shift = plot_counter * plot_shift_delta
print(plot_counter, plot_shift)
x = [(Data.calculate_asym_index(x.sigma_n, x.sigma_p)+plot_shift) for x in Data.variable_list]
print("x", x)
y = [x.mu for x in method.result_list]
yerr_neg = [x.sigma_n for x in method.result_list]
yerr_poz = [x.sigma_p for x in method.result_list]
plt.errorbar(x, y, yerr=[yerr_neg, yerr_poz], fmt='o', color=colors[plot_counter], ecolor=ecolors[plot_counter], elinewidth=3, capsize=0)
plot_counter += 1
plt.axhline(y=AverageMethod.result_list[0].mu, color="black", linewidth=2)
plt.title("Method Comparison")
plt.xlabel("Asymmetry Index")
plt.ylabel("Result")
plt.grid("True")
ax.set_facecolor('whitesmoke')
if save:
plt.savefig("comparison.png", dpi=100)
plt.show()
if __name__ == "__main__":
Data.set_control_variable()
generate_multiple_variable()
Data.print_variables()
CompareMethods.calculate_sum()
#CompareMethods.calculate_mul()
CompareMethods.print_results()
CompareMethods.plot_results(save=True)
| 29.980392
| 149
| 0.641269
| 5,040
| 0.823933
| 0
| 0
| 4,307
| 0.704103
| 0
| 0
| 387
| 0.063266
|
fbca8cfcb196a659d097cf5eeb8837d15ab42525
| 3,211
|
py
|
Python
|
python/ex4/ex4.py
|
SHIMengjie/Machine-Learning-Andrew-Ng-Matlab
|
2f54790e33dc538aea1534f40342791fb7c3abb1
|
[
"MIT"
] | 6
|
2017-12-27T04:47:18.000Z
|
2018-03-02T14:28:38.000Z
|
python/ex4/ex4.py
|
SHIMengjie/Machine-Learning-Andrew-Ng-Matlab
|
2f54790e33dc538aea1534f40342791fb7c3abb1
|
[
"MIT"
] | null | null | null |
python/ex4/ex4.py
|
SHIMengjie/Machine-Learning-Andrew-Ng-Matlab
|
2f54790e33dc538aea1534f40342791fb7c3abb1
|
[
"MIT"
] | 2
|
2018-05-31T08:04:40.000Z
|
2018-08-26T13:37:21.000Z
|
import scipy.io as scio
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as opt
from displayData import display_data
from costFunction import nn_cost_function
from sigmoid import sigmoid_gradient
from randInitializeWeights import rand_init_weights
from checkNNGradients import check_nn_gradients
from predict import predict_nn
# ==================== 1.读取数据,并显示随机样例 ==============================
# 使用scipy.io中的函数读取mat文件,data的格式是字典
data = scio.loadmat('ex4data1.mat')
# 根据关键字,分别获得输入数据和输出的真值
# print(type(Y),type(X)) # X和Y都是numpy.narray格式,也就是数组格式
X = data['X']
Y = data['y'].flatten()
# 随机取出其中的100个样本,显示结果
m = X.shape[0]
# 从[0,m-1]之间,随机生成一个序列
rand_indices = np.random.permutation(range(m))
selected = X[rand_indices[1:100],:]
# 显示手写数字样例
display_data(selected)
# plt.show()
# ==================== 2.读取参数,并计算代价 ==================================
weights = scio.loadmat('ex4weights.mat')
theta1 = weights['Theta1'] # 25*401
theta2 = weights['Theta2'] # 10*26
# theta1.flatten()把数组变成一列的形式,等价于theta1.reshape(theta1.size)
# 把两个列向量按行拼接起来,此时nn_paramters.shape=(10285,)
nn_paramters = np.concatenate([theta1.flatten(),theta2.flatten()],axis =0)
# 设置参数
input_layer = 400
hidden_layer = 25
out_layer = 10
# 计算代价
lmd = 0
cost,grad = nn_cost_function(X,Y,nn_paramters,input_layer,hidden_layer,out_layer,lmd)
print('Cost at parameters (loaded from ex4weights): {:0.6f}\n(This value should be about 0.287629)'.format(cost))
# 带入正则项
lmd = 1
cost,grad = nn_cost_function(X,Y,nn_paramters,input_layer,hidden_layer,out_layer,lmd)
print('Cost at parameters (loaded from ex4weights): {:0.6f}\n(This value should be about 0.383770)'.format(cost))
# 验证sigmoid的梯度
g = sigmoid_gradient(np.array([-1, -0.5, 0, 0.5, 1]))
print('Sigmoid gradient evaluated at [-1 -0.5 0 0.5 1]:\n{}'.format(g))
# =========================== 3.初始化网络参数 =================================
random_theta1 = rand_init_weights(input_layer,hidden_layer)
random_theta2 = rand_init_weights(hidden_layer,out_layer)
rand_nn_parameters = np.concatenate([random_theta1.flatten(),random_theta2.flatten()])
# 检查BP算法
lmd =3
check_nn_gradients(lmd)
debug_cost, _ = nn_cost_function(X,Y,nn_paramters,input_layer,hidden_layer,out_layer,lmd)
print('Cost at (fixed) debugging parameters (w/ lambda = {}): {:0.6f}\n(for lambda = 3, this value should be about 0.576051)'.format(lmd, debug_cost))
# ========================== 4.训练NN ==========================================
lmd = 1
def cost_func(p):
return nn_cost_function(X,Y,p,input_layer,hidden_layer,out_layer,lmd)[0]
def grad_func(p):
return nn_cost_function(X,Y,p,input_layer,hidden_layer,out_layer,lmd)[1]
nn_params, *unused = opt.fmin_cg(cost_func, fprime=grad_func, x0=rand_nn_parameters, maxiter=400, disp=True, full_output=True)
# Obtain theta1 and theta2 back from nn_params
theta1 = nn_params[:hidden_layer * (input_layer + 1)].reshape(hidden_layer, input_layer + 1)
theta2 = nn_params[hidden_layer * (input_layer + 1):].reshape(out_layer, hidden_layer + 1)
# ======================= 5.可视化系数和预测 ===================================
display_data(theta1[:, 1:])
plt.show()
pred = predict_nn(X,theta1, theta2)
print('Training set accuracy: {}'.format(np.mean(pred == Y)*100))
| 39.158537
| 150
| 0.690439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,531
| 0.429935
|
fbca957948e0eda8e87f337b852c488037b3df59
| 2,432
|
py
|
Python
|
examples/complex_filtering.py
|
ITgladiator/tortoise-orm
|
9a2bd0edd078ae12e5837c22f88c19f8cc84e7d7
|
[
"Apache-2.0"
] | null | null | null |
examples/complex_filtering.py
|
ITgladiator/tortoise-orm
|
9a2bd0edd078ae12e5837c22f88c19f8cc84e7d7
|
[
"Apache-2.0"
] | 5
|
2020-03-24T17:23:14.000Z
|
2021-12-13T20:12:49.000Z
|
examples/complex_filtering.py
|
ITgladiator/tortoise-orm
|
9a2bd0edd078ae12e5837c22f88c19f8cc84e7d7
|
[
"Apache-2.0"
] | null | null | null |
"""
This example shows some more complex querying
Key points are filtering by related names and using Q objects
"""
import asyncio
from tortoise import Tortoise, fields
from tortoise.models import Model
from tortoise.query_utils import Q
class Tournament(Model):
id = fields.IntField(pk=True)
name = fields.TextField()
def __str__(self):
return self.name
class Event(Model):
id = fields.IntField(pk=True)
name = fields.TextField()
tournament = fields.ForeignKeyField("models.Tournament", related_name="events")
participants = fields.ManyToManyField(
"models.Team", related_name="events", through="event_team"
)
def __str__(self):
return self.name
class Team(Model):
id = fields.IntField(pk=True)
name = fields.TextField()
def __str__(self):
return self.name
async def run():
await Tortoise.init(config_file="config.json")
await Tortoise.generate_schemas()
tournament = Tournament(name="Tournament")
await tournament.save()
second_tournament = Tournament(name="Tournament 2")
await second_tournament.save()
event_first = Event(name="1", tournament=tournament)
await event_first.save()
event_second = await Event.create(name="2", tournament=second_tournament)
await Event.create(name="3", tournament=tournament)
await Event.create(name="4", tournament=second_tournament)
await Event.filter(tournament=tournament)
team_first = Team(name="First")
await team_first.save()
team_second = Team(name="Second")
await team_second.save()
await team_first.events.add(event_first)
await event_second.participants.add(team_second)
print(
await Event.filter(Q(id__in=[event_first.id, event_second.id]) | Q(name="3"))
.filter(participants__not=team_second.id)
.order_by("tournament__id")
.distinct()
)
print(await Team.filter(events__tournament_id=tournament.id).order_by("-events__name"))
print(
await Tournament.filter(events__name__in=["1", "3"])
.order_by("-events__participants__name")
.distinct()
)
print(await Team.filter(name__icontains="CON"))
print(await Tournament.filter(events__participants__name__startswith="Fir"))
print(await Tournament.filter(id__icontains=1).count())
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
| 27.022222
| 91
| 0.701891
| 600
| 0.246711
| 0
| 0
| 0
| 0
| 1,480
| 0.608553
| 331
| 0.136102
|
fbcb4e1d16ab428716daad73e4a82b2e4b6883b2
| 3,012
|
py
|
Python
|
igmp/packet/PacketIpHeader.py
|
pedrofran12/igmp
|
fec8d366536cbe10b0fe1c14f6a82cd03fe0772a
|
[
"MIT"
] | 3
|
2020-08-07T21:26:09.000Z
|
2021-06-12T10:21:41.000Z
|
igmp/packet/PacketIpHeader.py
|
pedrofran12/igmp
|
fec8d366536cbe10b0fe1c14f6a82cd03fe0772a
|
[
"MIT"
] | 2
|
2021-08-25T14:58:54.000Z
|
2022-01-26T12:00:13.000Z
|
igmp/packet/PacketIpHeader.py
|
pedrofran12/igmp
|
fec8d366536cbe10b0fe1c14f6a82cd03fe0772a
|
[
"MIT"
] | 3
|
2022-01-24T12:59:00.000Z
|
2022-03-25T14:28:56.000Z
|
import struct
import socket
class PacketIpHeader:
"""
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|Version|
+-+-+-+-+
"""
IP_HDR = "! B"
IP_HDR_LEN = struct.calcsize(IP_HDR)
def __init__(self, ver, hdr_len):
self.version = ver
self.hdr_length = hdr_len
def __len__(self):
return self.hdr_length
@staticmethod
def parse_bytes(data: bytes):
(verhlen, ) = struct.unpack(PacketIpHeader.IP_HDR, data[:PacketIpHeader.IP_HDR_LEN])
ver = (verhlen & 0xF0) >> 4
print("ver:", ver)
return PACKET_HEADER.get(ver).parse_bytes(data)
class PacketIpv4Header(PacketIpHeader):
"""
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|Version| IHL |Type of Service| Total Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Identification |Flags| Fragment Offset |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Time to Live | Protocol | Header Checksum |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Source Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Destination Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Options | Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
"""
IP_HDR = "! BBH HH BBH 4s 4s"
IP_HDR_LEN = struct.calcsize(IP_HDR)
def __init__(self, ver, hdr_len, ttl, proto, ip_src, ip_dst):
super().__init__(ver, hdr_len)
self.ttl = ttl
self.proto = proto
self.ip_src = ip_src
self.ip_dst = ip_dst
def __len__(self):
return self.hdr_length
@staticmethod
def parse_bytes(data: bytes):
(verhlen, tos, iplen, ipid, frag, ttl, proto, cksum, src, dst) = \
struct.unpack(PacketIpv4Header.IP_HDR, data[:PacketIpv4Header.IP_HDR_LEN])
ver = (verhlen & 0xf0) >> 4
hlen = (verhlen & 0x0f) * 4
'''
"VER": ver,
"HLEN": hlen,
"TOS": tos,
"IPLEN": iplen,
"IPID": ipid,
"FRAG": frag,
"TTL": ttl,
"PROTO": proto,
"CKSUM": cksum,
"SRC": socket.inet_ntoa(src),
"DST": socket.inet_ntoa(dst)
'''
src_ip = socket.inet_ntoa(src)
dst_ip = socket.inet_ntoa(dst)
return PacketIpv4Header(ver, hlen, ttl, proto, src_ip, dst_ip)
PACKET_HEADER = {
4: PacketIpv4Header,
}
| 33.098901
| 92
| 0.404714
| 2,931
| 0.973108
| 0
| 0
| 988
| 0.328021
| 0
| 0
| 1,621
| 0.538181
|
fbcbce60af2ea40ef9771cd4e2bb6d4016db9a38
| 1,547
|
py
|
Python
|
shopping_mall/shopping_mall/utils/fastdfs/fdfs_storage.py
|
lzy00001/SHOP_CENTER
|
1e26b9694afc89d86f2f3db9c0b0ff1f98ab1369
|
[
"MIT"
] | null | null | null |
shopping_mall/shopping_mall/utils/fastdfs/fdfs_storage.py
|
lzy00001/SHOP_CENTER
|
1e26b9694afc89d86f2f3db9c0b0ff1f98ab1369
|
[
"MIT"
] | null | null | null |
shopping_mall/shopping_mall/utils/fastdfs/fdfs_storage.py
|
lzy00001/SHOP_CENTER
|
1e26b9694afc89d86f2f3db9c0b0ff1f98ab1369
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.core.files.storage import Storage
from django.utils.deconstruct import deconstructible
from fdfs_client.client import Fdfs_client
@deconstructible
class FastDFSStorage(Storage):
def __init__(self, base_url=None, client_conf=None):
"""
初始化
:param base_url: 用于构造图片完整路径使用,图片服务器的域名
:param client_conf: FastDFS客户端配置文件的路径
"""
if base_url is None:
base_url = settings.FDFS_URL
self.base_url = base_url
if client_conf is None:
client_conf = settings.FDFS_CLIENT_CONF
self.client_conf = client_conf
def _open(self, name, mode='rb'):
"""
用不到打开文件,所以省略
"""
pass
def _save(self, name, content):
"""
在FastDFS中保存文件
:param name: 传入的文件名
:param content: 文件内容
:return: 保存到数据库中的FastDFS的文件名
"""
client = Fdfs_client(self.client_conf)
ret = client.upload_by_buffer(content.read())
if ret.get("Status") != "Upload successed.":
raise Exception("upload file failed")
file_name = ret.get("Remote file_id")
return file_name
def url(self, name):
"""
返回文件的完整URL路径
:param name: 数据库中保存的文件名
:return: 完整的URL
"""
return self.base_url + name
def exists(self, name):
"""
判断文件是否存在,FastDFS可以自行解决文件的重名问题
所以此处返回False,告诉Django上传的都是新文件
:param name: 文件名
:return: False
"""
return False
| 26.672414
| 56
| 0.599871
| 1,628
| 0.893033
| 0
| 0
| 1,645
| 0.902359
| 0
| 0
| 861
| 0.472298
|
fbcc3437214daafca043a3fde76d32524788bacf
| 664
|
py
|
Python
|
src/sovereign/server.py
|
bochuxt/envoy-control-plane-python3
|
6d63ad6e1ecff5365bb571f0021951b066f8e270
|
[
"Apache-2.0"
] | 1
|
2020-07-08T19:37:09.000Z
|
2020-07-08T19:37:09.000Z
|
src/sovereign/server.py
|
bochuxt/envoy-control-plane-python3
|
6d63ad6e1ecff5365bb571f0021951b066f8e270
|
[
"Apache-2.0"
] | null | null | null |
src/sovereign/server.py
|
bochuxt/envoy-control-plane-python3
|
6d63ad6e1ecff5365bb571f0021951b066f8e270
|
[
"Apache-2.0"
] | null | null | null |
import gunicorn.app.base
from sovereign import asgi_config
from sovereign.app import app
class StandaloneApplication(gunicorn.app.base.BaseApplication):
def __init__(self, application, options=None):
self.options = options or {}
self.application = application
super().__init__()
def load_config(self):
for key, value in self.options.items():
self.cfg.set(key.lower(), value)
def load(self):
return self.application
def main():
asgi = StandaloneApplication(
application=app,
options=asgi_config.as_gunicorn_conf()
)
asgi.run()
if __name__ == '__main__':
main()
| 22.133333
| 63
| 0.661145
| 391
| 0.588855
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.01506
|
fbceb552efb8ef0ad3ab8ba19aa7104619c9f206
| 495
|
py
|
Python
|
Python/CeV/Exercicios/ex71.py
|
WerickL/Learning
|
5a9a488f0422454e612439b89093d5bc11242e65
|
[
"MIT"
] | null | null | null |
Python/CeV/Exercicios/ex71.py
|
WerickL/Learning
|
5a9a488f0422454e612439b89093d5bc11242e65
|
[
"MIT"
] | null | null | null |
Python/CeV/Exercicios/ex71.py
|
WerickL/Learning
|
5a9a488f0422454e612439b89093d5bc11242e65
|
[
"MIT"
] | null | null | null |
Val = int(input('Digite o valor que você quer sacar:'))
c50 = c20 = c10 = c1 = 0
if Val // 50 != 0:
c50 = Val // 50
Val = Val % 50
if Val // 20 != 0:
c20 = Val // 20
Val = Val % 20
if Val // 10 != 0:
c10 = Val // 10
Val = Val % 10
if Val // 1 != 0:
c1 = Val // 1
if c50 != 0:
print(f'{c50} Cédulas de R$50.00')
if c20 != 0:
print(f'{c20} Cédulas de R$20.00')
if c10 != 0:
print(f'{c10} Cédulas de R$10.00')
if c1 != 0:
print(f'{c1} Cédulas de R$1.00')
| 23.571429
| 55
| 0.50303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 148
| 0.296
|
fbd0e3db5f9cf99e22751e706aab58c1843471e9
| 801
|
bzl
|
Python
|
model/oppia_proto_library.bzl
|
bhaktideshmukh/oppia-android
|
94626909570ddbbd06d2cd691b49f357b986db0f
|
[
"Apache-2.0"
] | null | null | null |
model/oppia_proto_library.bzl
|
bhaktideshmukh/oppia-android
|
94626909570ddbbd06d2cd691b49f357b986db0f
|
[
"Apache-2.0"
] | null | null | null |
model/oppia_proto_library.bzl
|
bhaktideshmukh/oppia-android
|
94626909570ddbbd06d2cd691b49f357b986db0f
|
[
"Apache-2.0"
] | null | null | null |
"""
Bazel macros for defining proto libraries.
"""
load("@rules_proto//proto:defs.bzl", "proto_library")
# TODO(#4096): Remove this once it's no longer needed.
def oppia_proto_library(name, **kwargs):
"""
Defines a new proto library.
Note that the library is defined with a stripped import prefix which ensures that protos have a
common import directory (which is needed since Gradle builds protos in the same directory
whereas Bazel doesn't by default). This common import directory is needed for cross-proto
textprotos to work correctly.
Args:
name: str. The name of the proto library.
**kwargs: additional parameters to pass into proto_library.
"""
proto_library(
name = name,
strip_import_prefix = "",
**kwargs
)
| 30.807692
| 99
| 0.689139
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 647
| 0.80774
|
fbd1627e659fe085f390ad7f199095412b24f0f3
| 1,533
|
py
|
Python
|
tests/test_cbers_ndvi.py
|
RemotePixel/remotepixel-py
|
bd58db7a394c84651d05c4e6f83da4cd3d4c26f3
|
[
"BSD-2-Clause"
] | 5
|
2017-09-29T15:21:39.000Z
|
2021-02-23T02:03:18.000Z
|
tests/test_cbers_ndvi.py
|
RemotePixel/remotepixel-py
|
bd58db7a394c84651d05c4e6f83da4cd3d4c26f3
|
[
"BSD-2-Clause"
] | 3
|
2017-11-03T13:24:31.000Z
|
2018-09-18T13:55:52.000Z
|
tests/test_cbers_ndvi.py
|
RemotePixel/remotepixel-py
|
bd58db7a394c84651d05c4e6f83da4cd3d4c26f3
|
[
"BSD-2-Clause"
] | 4
|
2017-10-04T10:42:45.000Z
|
2019-06-21T07:49:35.000Z
|
import os
from remotepixel import cbers_ndvi
CBERS_SCENE = "CBERS_4_MUX_20171121_057_094_L2"
CBERS_BUCKET = os.path.join(os.path.dirname(__file__), "fixtures", "cbers-pds")
CBERS_PATH = os.path.join(
CBERS_BUCKET, "CBERS4/MUX/057/094/CBERS_4_MUX_20171121_057_094_L2/"
)
def test_point_valid(monkeypatch):
"""Should work as expected (read data, calculate NDVI and return json info)."""
monkeypatch.setattr(cbers_ndvi, "CBERS_BUCKET", CBERS_BUCKET)
expression = "(b8 - b7) / (b8 + b7)"
coords = [53.9097, 5.3674]
expectedContent = {
"date": "2017-11-21",
"scene": CBERS_SCENE,
"ndvi": -0.1320754716981132,
}
assert cbers_ndvi.point(CBERS_SCENE, coords, expression) == expectedContent
def test_point_invalid(monkeypatch):
"""Should work as expected and retour 0 for outside point."""
monkeypatch.setattr(cbers_ndvi, "CBERS_BUCKET", CBERS_BUCKET)
expression = "(b8 - b7) / (b8 + b7)"
coords = [53.9097, 2.3674]
expectedContent = {"date": "2017-11-21", "scene": CBERS_SCENE, "ndvi": 0.}
assert cbers_ndvi.point(CBERS_SCENE, coords, expression) == expectedContent
def test_area_valid(monkeypatch):
"""Should work as expected (read data, calculate NDVI and return img)."""
monkeypatch.setattr(cbers_ndvi, "CBERS_BUCKET", CBERS_BUCKET)
expression = "(b8 - b7) / (b8 + b7)"
bbox = [53.0859375, 5.266007882805496, 53.4375, 5.615985819155334]
res = cbers_ndvi.area(CBERS_SCENE, bbox, expression)
assert res["date"] == "2017-11-21"
| 36.5
| 83
| 0.692107
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 511
| 0.333333
|
fbd2de928f07cf44790d6956008e6625c654e85c
| 2,270
|
py
|
Python
|
test/scons-time/time/no-result.py
|
moroten/scons
|
20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
|
[
"MIT"
] | 1,403
|
2017-11-23T14:24:01.000Z
|
2022-03-30T20:59:39.000Z
|
test/scons-time/time/no-result.py
|
moroten/scons
|
20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
|
[
"MIT"
] | 3,708
|
2017-11-27T13:47:12.000Z
|
2022-03-29T17:21:17.000Z
|
test/scons-time/time/no-result.py
|
moroten/scons
|
20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
|
[
"MIT"
] | 281
|
2017-12-01T23:48:38.000Z
|
2022-03-31T15:25:44.000Z
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that the time subcommand's --which option doesn't fail, and prints
an appropriate error message, if a log file doesn't have its specific
requested results.
"""
import TestSCons_time
test = TestSCons_time.TestSCons_time()
header = """\
set key bottom left
plot '-' title "Startup" with lines lt 1
# Startup
"""
footer = """\
e
"""
line_fmt = "%s 11.123456\n"
lines = []
for i in range(9):
logfile_name = 'foo-%s-0.log' % i
if i == 5:
test.write(test.workpath(logfile_name), "NO RESULTS HERE!\n")
else:
test.fake_logfile(logfile_name)
lines.append(line_fmt % i)
expect = [header] + lines + [footer]
stderr = "file 'foo-5-0.log' has no results!\n"
test.run(arguments = 'time --fmt gnuplot --which total foo*.log',
stdout = ''.join(expect),
stderr = stderr)
expect = [header] + [footer]
test.run(arguments = 'time --fmt gnuplot foo-5-0.log',
stdout = ''.join(expect),
stderr = stderr)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 27.682927
| 73
| 0.711013
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,653
| 0.728194
|
fbd3456398088e022605db823242b0036ee40344
| 25,955
|
py
|
Python
|
alipy/index/index_collections.py
|
Houchaoqun/ALiPy
|
93aff0379db2a1994803d19026c434c2b12a2485
|
[
"BSD-3-Clause"
] | 1
|
2019-07-10T10:55:18.000Z
|
2019-07-10T10:55:18.000Z
|
alipy/index/index_collections.py
|
Houchaoqun/ALiPy
|
93aff0379db2a1994803d19026c434c2b12a2485
|
[
"BSD-3-Clause"
] | null | null | null |
alipy/index/index_collections.py
|
Houchaoqun/ALiPy
|
93aff0379db2a1994803d19026c434c2b12a2485
|
[
"BSD-3-Clause"
] | null | null | null |
"""
The container to store indexes in active learning.
Serve as the basic type of 'set' operation.
"""
# Authors: Ying-Peng Tang
# License: BSD 3 clause
from __future__ import division
import collections
import copy
import numpy as np
from .multi_label_tools import check_index_multilabel, infer_label_size_multilabel, flattern_multilabel_index, \
integrate_multilabel_index
from ..utils.ace_warnings import *
from ..utils.interface import BaseCollection
from ..utils.misc import randperm
class IndexCollection(BaseCollection):
"""Index Collection.
Index Collection class is a basic data type of setting operation.
Multiple different type of element is supported for Active learning.
Also check the validity of given operation.
Note that:
1. The types of elements should be same
1. If multiple elements to update, it should be a list, numpy.ndarray or IndexCollection
object, otherwise, it will be cheated as one single element. (If single element
contains multiple values, take tuple as the type of element.)
Parameters
----------
data : list or np.ndarray or object, optional (default=None)
shape [n_element]. Element should be int or tuple.
The meaning of elements can be defined by users.
Some examples of elements:
(example_index, label_index) for instance-label pair query.
(example_index, feature_index) for feature query,
(example_index, example_index) for active clustering;
If int, it may be the index of an instance, for example.
Attributes
----------
index: list, shape (1, n_elements)
A list contains all elements in this container.
Examples
--------
>>> a = IndexCollection([1, 2, 3])
>>> a.update([4,5])
[1, 2, 3, 4, 5]
>>> a.difference_update([1,2])
[3, 4, 5]
"""
def __init__(self, data=None):
if data is None or len(data) == 0:
self._innercontainer = []
else:
if isinstance(data, IndexCollection):
self._innercontainer = copy.deepcopy(data.index)
self._element_type = data.get_elementType()
return
if not isinstance(data, (list, np.ndarray)):
data = [data]
self._innercontainer = list(np.unique([i for i in data], axis=0))
if len(self._innercontainer) != len(data):
warnings.warn("There are %d same elements in the given data" % (len(data) - len(self._innercontainer)),
category=RepeatElementWarning,
stacklevel=3)
datatype = collections.Counter([type(i) for i in self._innercontainer])
if len(datatype) != 1:
raise TypeError("Different types found in the given _indexes.")
tmp_data = self._innercontainer[0]
if isinstance(tmp_data, np.generic):
# self._element_type = type(np.asscalar(tmp_data)) # deprecated in numpy v1.16
self._element_type = type(tmp_data.item())
else:
self._element_type = type(tmp_data)
@property
def index(self):
"""
Get the index of data.
"""
return copy.deepcopy(self._innercontainer)
def __getitem__(self, item):
return self._innercontainer.__getitem__(item)
def get_elementType(self):
"""
Return the type of data.
"""
return self._element_type
def pop(self):
"""
Return the popped value. Raise KeyError if empty.
"""
return self._innercontainer.pop()
def add(self, value):
"""
Add element.
It will warn if the value to add is existent.
Parameters
----------
value: object
same type of the element already in the set.
Raise if unknown type is given.
Returns
-------
self: object
return self.
"""
if self._element_type is None:
self._element_type = type(value)
# check validation
if isinstance(value, np.generic):
# value = np.asscalar(value) # deprecated in numpy v1.16
value = value.item()
if not isinstance(value, self._element_type):
raise TypeError(
"A %s parameter is expected, but received: %s" % (str(self._element_type), str(type(value))))
if value in self._innercontainer:
warnings.warn("Adding element %s has already in the collection, skip." % (value.__str__()),
category=RepeatElementWarning,
stacklevel=3)
else:
self._innercontainer.append(value)
return self
def discard(self, value):
"""Remove an element.
It will warn if the value to discard is inexistent.
Parameters
----------
value: object
Value to discard.
Returns
-------
self: object
Return self.
"""
if value not in self._innercontainer:
warnings.warn("Element %s to discard is not in the collection, skip." % (value.__str__()),
category=InexistentElementWarning,
stacklevel=3)
else:
self._innercontainer.remove(value)
return self
def difference_update(self, other):
"""Remove all elements of another array from this container.
Parameters
----------
other: object
Elements to discard. Note that, if multiple indexes are contained,
a list, numpy.ndarray or IndexCollection should be given. Otherwise,
it will be cheated as an object.
Returns
-------
self: object
Return self.
"""
if not isinstance(other, (list, np.ndarray, IndexCollection)):
other = [other]
for item in other:
self.discard(item)
return self
def update(self, other):
"""Update self with the union of itself and others.
Parameters
----------
other: object
Elements to add. Note that, if multiple indexes are contained,
a list, numpy.ndarray or IndexCollection should be given. Otherwise,
it will be cheated as an object.
Returns
-------
self: object
Return self.
"""
if not isinstance(other, (list, np.ndarray, IndexCollection)):
other = [other]
for item in other:
self.add(item)
return self
def random_sampling(self, rate=0.3):
"""Return a random sampled subset of this collection.
Parameters
----------
rate: float, optional (default=None)
The rate of sampling. Must be a number in [0,1].
Returns
-------
array: IndexCollection
The sampled index collection.
"""
assert (0 < rate < 1)
perm = randperm(len(self) - 1, round(rate * len(self)))
return IndexCollection([self.index[i] for i in perm])
class MultiLabelIndexCollection(IndexCollection):
"""Class for managing multi-label indexes.
This class stores indexes in multi-label. Each element should be a tuple.
A single index should only have 1 element (example_index, ) to query all labels or
2 elements (example_index, [label_indexes]) to query specific labels.
Some examples of valid multi-label indexes include:
queried_index = (1, [3,4])
queried_index = (1, [3])
queried_index = (1, 3)
queried_index = (1, (3))
queried_index = (1, (3,4))
queried_index = (1, ) # query all labels
Several validity checking are implemented in this class.
Such as repeated elements, Index out of bound.
Parameters
----------
data : list or np.ndarray of a single tuple, optional (default=None)
shape [n_element]. All elements should be tuples.
label_size: int, optional (default=None)
The number of classes. If not provided, an infer is attempted, raise if fail.
Attributes
----------
index: list, shape (1, n_elements)
A list contains all elements in this container.
Examples
--------
>>> multi_lab_ind1 = MultiLabelIndexCollection([(0, 1), (0, 2), (0, (3, 4)), (1, (0, 1))], label_size=5)
{(0, 1), (1, 1), (0, 4), (1, 0), (0, 2), (0, 3)}
>>> multi_lab_ind1.update((0, 0))
{(0, 1), (0, 0), (1, 1), (0, 4), (1, 0), (0, 2), (0, 3)}
>>> multi_lab_ind1.update([(1, 2), (1, (3, 4))])
{(0, 1), (1, 2), (0, 0), (1, 3), (1, 4), (1, 1), (0, 4), (1, 0), (0, 2), (0, 3)}
>>> multi_lab_ind1.update([(2,)])
{(0, 1), (1, 2), (0, 0), (1, 3), (2, 2), (1, 4), (2, 1), (2, 0), (1, 1), (2, 3), (2, 4), (0, 4), (1, 0), (0, 2), (0, 3)}
>>> multi_lab_ind1.difference_update([(0,)])
{(1, 2), (1, 3), (2, 2), (1, 4), (2, 1), (2, 0), (1, 1), (2, 3), (2, 4), (1, 0)}
"""
def __init__(self, data=None, label_size=None):
if data is None or len(data) == 0:
self._innercontainer = set()
if label_size is None:
warnings.warn("This collection does not have a label_size value, set it manually or "
"it will raise when decomposing indexes.",
category=ValidityWarning)
self._label_size = label_size
else:
if isinstance(data, MultiLabelIndexCollection):
self._innercontainer = copy.deepcopy(data.index)
self._label_size = data._label_size
return
# check given indexes
data = check_index_multilabel(data)
if label_size is None:
self._label_size = infer_label_size_multilabel(data, check_arr=False)
else:
self._label_size = label_size
# decompose all label queries.
decomposed_data = flattern_multilabel_index(data, self._label_size, check_arr=False)
self._innercontainer = set(decomposed_data)
if len(self._innercontainer) != len(decomposed_data):
warnings.warn(
"There are %d same elements in the given data" % (len(data) - len(self._innercontainer)),
category=RepeatElementWarning,
stacklevel=3)
@property
def index(self):
"""
Get the index of data.
"""
return list(self._innercontainer)
def add(self, value):
"""Add element.
It will warn if the value to add is existent. Raise if
invalid type of value is given.
Parameters
----------
value: tuple
Index for adding. Raise if index is out of bound.
Returns
-------
self: object
return self.
"""
# check validation
assert (isinstance(value, tuple))
if len(value) == 1:
value = [(value[0], i) for i in range(self._label_size)]
return self.update(value)
elif len(value) == 2:
if isinstance(value[1], collections.Iterable):
for item in value[1]:
if item >= self._label_size:
raise ValueError("Index %s is out of bound %s" % (str(item), str(self._label_size)))
else:
if value[1] >= self._label_size:
raise ValueError("Index %s is out of bound %s" % (str(value[1]), str(self._label_size)))
else:
raise ValueError("A tuple with 1 or 2 elements is expected, but received: %s" % str(value))
if value in self._innercontainer:
warnings.warn("Adding element %s has already in the collection, skip." % (value.__str__()),
category=RepeatElementWarning,
stacklevel=3)
else:
self._innercontainer.add(value)
return self
def discard(self, value):
"""Remove an element.
It will warn if the value to discard is inexistent. Raise if
invalid type of value is given.
Parameters
----------
value: tuple
Index for adding. Raise if index is out of bound.
Returns
-------
self: object
return self.
"""
assert (isinstance(value, tuple))
if len(value) == 1:
value = [(value[0], i) for i in range(self._label_size)]
return self.difference_update(value)
if value not in self._innercontainer:
warnings.warn("Element %s to discard is not in the collection, skip." % (value.__str__()),
category=InexistentElementWarning,
stacklevel=3)
else:
self._innercontainer.discard(value)
return self
def difference_update(self, other):
"""Remove all elements of another array from this container.
Parameters
----------
other: object
Elements to discard. Note that, if multiple indexes are contained,
a list, numpy.ndarray or MultiLabelIndexCollection should be given. Otherwise,
a tuple should be given.
Returns
-------
self: object
Return self.
"""
if isinstance(other, (list, np.ndarray, MultiLabelIndexCollection)):
label_ind = flattern_multilabel_index(other, self._label_size)
for j in label_ind:
self.discard(j)
elif isinstance(other, tuple):
self.discard(other)
else:
raise TypeError(
"A list or np.ndarray is expected if multiple indexes are "
"contained. Otherwise, a tuple should be provided")
return self
def update(self, other):
"""Update self with the union of itself and others.
Parameters
----------
other: object
Elements to add. Note that, if multiple indexes are contained,
a list, numpy.ndarray or MultiLabelIndexCollection should be given. Otherwise,
a tuple should be given.
Returns
-------
self: object
Return self.
"""
if isinstance(other, (list, np.ndarray, MultiLabelIndexCollection)):
label_ind = flattern_multilabel_index(other, self._label_size)
for j in label_ind:
self.add(j)
elif isinstance(other, tuple):
self.add(other)
else:
raise TypeError(
"A list or np.ndarray is expected if multiple indexes are "
"contained. Otherwise, a tuple should be provided")
return self
def get_onedim_index(self, order='C', ins_num=None):
"""Get the 1d index.
Parameters
----------
order : {'C', 'F'}, optional (default='C')
Determines whether the indices should be viewed as indexing in
row-major (C-style) or column-major (Matlab-style) order.
ins_num: int, optional
The total number of instance. Must be provided if the order is 'F'.
Examples
--------
>>> b = [1, 4, 11]
>>> mi = MultiLabelIndexCollection.construct_by_1d_array(array=b, label_mat_shape=(3, 4))
>>> print(mi)
{(1, 0), (2, 3), (1, 1)}
>>> print('col major:', mi.get_onedim_index(order='F', ins_num=3))
col major: [1, 11, 4]
>>> print('row major:', mi.get_onedim_index(order='C'))
row major: [4, 11, 5]
"""
if order == 'F':
if ins_num is None:
raise ValueError("The ins_num must be provided if the order is 'F'.")
return [tup[0] + tup[1] * ins_num for tup in self._innercontainer]
elif order == 'C':
return [tup[0] * self._label_size + tup[1] for tup in self._innercontainer]
else:
raise ValueError("The value of order must be one of {'C', 'F'}")
def get_instance_index(self):
"""Get the index of instances contained in this object.
If it is a labeled set, it is equivalent to the indexes of fully and partially labeled instances.
Returns
-------
partlab: list
The indexes of partially labeled instances.
"""
return np.unique([tp[0] for tp in self._innercontainer])
def _get_cond_instance(self, cond):
"""Return the indexes of instances according to the cond.
cond = 0: return the instances which are unbroken.
cond = 1: return the instances which have missing entries.
"""
tmp = integrate_multilabel_index(self.index, label_size=self._label_size, check_arr=False)
if cond == 0:
return [tp[0] for tp in tmp if len(tp) == 1]
else:
return [tp[0] for tp in tmp if len(tp) > 1]
def get_unbroken_instances(self):
"""Return the indexes of unbroken instances whose entries are all known."""
return self._get_cond_instance(cond=0)
def get_break_instances(self):
"""Return the indexes of break instances which have missing entries."""
return self._get_cond_instance(cond=1)
def get_matrix_mask(self, mat_shape, fill_value=1, sparse=True, sparse_format='lil_matrix'):
"""Return an array which has the same shape with the label matrix.
If an entry is known, then, the corresponding value in the mask is 1, otherwise, 0.
Parameters
----------
mat_shape: tuple
The shape of label matrix. [n_samples, n_classes]
fill_value: int
The value filled in the mask when the entry is in the container.
sparse: bool
Whether to return a sparse matrix or a dense matrix (numpy.ndarray).
sparse_format: str
The format of the returned sparse matrix. Only available if sparse==True
should be one onf [bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dia_matrix, dok_matrix, lil_matrix].
Please refer to https://docs.scipy.org/doc/scipy-0.18.1/reference/sparse.html
for the definition of each sparse format.
Returns
-------
mask: {scipy.sparse.csr_matrix, scipy.sparse.csc_matrix}
The mask of the label matrix.
"""
assert isinstance(mat_shape, tuple)
if sparse:
try:
exec("from scipy.sparse import " + sparse_format)
except:
raise ValueError(
"sparse format " + sparse_format + "is not defined. Valid format should be one of "
"[bsr_matrix, coo_matrix, csc_matrix, csr_matrix, "
"dia_matrix, dok_matrix, lil_matrix].")
mask = eval(sparse_format + '(mat_shape)')
else:
if fill_value == 1:
mask = np.zeros(mat_shape, dtype=bool)
for item in self._innercontainer:
mask[item] = True
else:
mask = np.zeros(mat_shape)
for item in self._innercontainer:
mask[item] = fill_value
return mask
@classmethod
def construct_by_1d_array(cls, array, label_mat_shape, order='F'):
"""Construct a MultiLabelIndexCollection object by providing a
1d array, and the number of classes.
Parameters
----------
array: {list, np.ndarray}
An 1d array of indexes.
label_mat_shape: tuple of ints
The shape of label matrix. The 1st element is the number of instances,
and the 2nd element is the total classes.
order : {'C', 'F'}, optional
Determines whether the indices should be viewed as indexing in
row-major (C-style) or column-major (Matlab-style) order.
Returns
-------
multi_ind: MultiLabelIndexCollection
The MultiLabelIndexCollection object.
Examples
--------
>>> b = [1, 4, 11]
>>> mi = MultiLabelIndexCollection.construct_by_1d_array(array=b, label_mat_shape=(3, 4))
>>> print(mi)
{(1, 0), (2, 3), (1, 1)}
>>> print('col major:', mi.get_onedim_index(order='F', ins_num=3))
col major: [1, 11, 4]
>>> print('row major:', mi.get_onedim_index(order='C'))
row major: [4, 11, 5]
"""
assert len(label_mat_shape) == 2
row, col = np.unravel_index(array, dims=label_mat_shape, order=order)
return cls(data=[(row[i], col[i]) for i in range(len(row))], label_size=label_mat_shape[1])
@classmethod
def construct_by_element_mask(cls, mask):
"""Construct a MultiLabelIndexCollection object by providing a
2d array whose shape should be the same as the matrix shape.
Parameters
----------
mask: {list, np.ndarray}
The 2d mask matrix of elements.
There must be only 1 and 0 in the matrix, in which,
1 means the corresponding element is known, and will be
added to the MultiLabelIndexCollection container.
Otherwise, it will be cheated as an unknown element.
Examples
--------
>>> import numpy as np
>>> mask = np.asarray([
[0, 1],
[1, 0],
[1, 0]
]) # 3 rows, 2 lines
>>> mi = MultiLabelIndexCollection.construct_by_element_mask(mask=mask)
>>> print(mi)
{(0, 1), (2, 0), (1, 0)}
"""
mask = np.asarray(mask)
ue = np.unique(mask)
if not (len(mask.shape) == 2 and len(ue) == 2 and 0 in ue and 1 in ue):
raise ValueError("The mask matrix should be a 2d array, and there must be only "
"1 and 0 in the matrix, in which, 1 means the corresponding "
"element is known, and will be added to the MultiLabelIndexCollection container.")
nz_row, nz_col = np.nonzero(mask)
return cls(data=[(nz_row[i], nz_col[i]) for i in range(len(nz_row))], label_size=mask.shape[1])
class FeatureIndexCollection(MultiLabelIndexCollection):
"""Container to store the indexes in feature querying scenario.
This class stores indexes in incomplete feature matrix setting. Each element should be a tuple.
A single index should only have 1 element (example_index, ) to query all features or
2 elements (example_index, [feature_indexes]) to query specific features.
Some examples of valid indexes include:
queried_index = (1, [3,4])
queried_index = (1, [3])
queried_index = (1, 3)
queried_index = (1, (3))
queried_index = (1, (3,4))
queried_index = (1, ) # query all _labels
Several validity checking are implemented in this class.
Such as repeated elements, Index out of bound.
Parameters
----------
data : list or np.ndarray of a single tuple, optional (default=None)
shape [n_element]. All elements should be tuples.
feature_size: int, optional (default=None)
The number of features. If not provided, an infer is attempted, raise if fail.
Attributes
----------
index: list, shape (1, n_elements)
A list contains all elements in this container.
Examples
--------
>>> fea_ind1 = FeatureIndexCollection([(0, 1), (0, 2), (0, (3, 4)), (1, (0, 1))], feature_size=5)
{(0, 1), (1, 1), (0, 4), (1, 0), (0, 2), (0, 3)}
>>> fea_ind1.update((0, 0))
{(0, 1), (0, 0), (1, 1), (0, 4), (1, 0), (0, 2), (0, 3)}
>>> fea_ind1.update([(1, 2), (1, (3, 4))])
{(0, 1), (1, 2), (0, 0), (1, 3), (1, 4), (1, 1), (0, 4), (1, 0), (0, 2), (0, 3)}
>>> fea_ind1.update([(2,)])
{(0, 1), (1, 2), (0, 0), (1, 3), (2, 2), (1, 4), (2, 1), (2, 0), (1, 1), (2, 3), (2, 4), (0, 4), (1, 0), (0, 2), (0, 3)}
>>> fea_ind1.difference_update([(0,)])
{(1, 2), (1, 3), (2, 2), (1, 4), (2, 1), (2, 0), (1, 1), (2, 3), (2, 4), (1, 0)}
"""
def __init__(self, data, feature_size=None):
try:
super(FeatureIndexCollection, self).__init__(data=data, label_size=feature_size)
except(Exception, ValueError):
raise Exception("The inference of feature_size is failed, please set a specific value.")
def map_whole_index_to_train(train_idx, index_in_whole):
"""Map the indexes from whole dataset to training set.
Parameters
----------
train_idx: {list, numpy.ndarray}
The training indexes.
index_in_whole: {IndexCollection, MultiLabelIndexCollection}
The indexes need to be mapped of the whole data.
Returns
-------
index_in_train: {IndexCollection, MultiLabelIndexCollection}
The mapped indexes.
Examples
--------
>>> train_idx = [231, 333, 423]
>>> index_in_whole = IndexCollection([333, 423])
>>> print(map_whole_index_to_train(train_idx, index_in_whole))
[1, 2]
"""
if isinstance(index_in_whole, MultiLabelIndexCollection):
ind_type = 2
elif isinstance(index_in_whole, IndexCollection):
ind_type = 1
else:
raise TypeError("index_in_whole must be one of {IndexCollection, MultiLabelIndexCollection} type.")
tr_ob = []
for entry in index_in_whole:
if ind_type == 2:
assert entry[0] in train_idx
ind_in_train = np.argwhere(train_idx == entry[0])[0][0]
tr_ob.append((ind_in_train, entry[1]))
else:
assert entry in train_idx
tr_ob.append(np.argwhere(train_idx == entry)[0][0])
if ind_type == 2:
return MultiLabelIndexCollection(tr_ob)
else:
return IndexCollection(tr_ob)
| 36.607898
| 124
| 0.571335
| 24,045
| 0.926411
| 0
| 0
| 3,195
| 0.123098
| 0
| 0
| 15,268
| 0.588249
|
fbd3b3b8ed744c1417f498327a5a9678f19a086e
| 327
|
py
|
Python
|
keycache/util.py
|
psytron/keycache
|
0b69e21719dbe76908476c01e3e487aae2612fd2
|
[
"Apache-2.0"
] | 2
|
2020-04-27T07:48:54.000Z
|
2020-10-21T17:47:54.000Z
|
keycache/util.py
|
psytron/keycache
|
0b69e21719dbe76908476c01e3e487aae2612fd2
|
[
"Apache-2.0"
] | null | null | null |
keycache/util.py
|
psytron/keycache
|
0b69e21719dbe76908476c01e3e487aae2612fd2
|
[
"Apache-2.0"
] | null | null | null |
import platform as p
import uuid
import hashlib
def basic():
sb = []
sb.append(p.node())
sb.append( ''.join([ x for x in p.architecture() ]) )
sb.append(p.machine())
sb.append(p.processor())
sb.append(p.system())
sb.append(str(uuid.getnode())) # MAC address
text = '.'.join(sb)
return text
| 21.8
| 57
| 0.599388
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 18
| 0.055046
|
fbd4db8145e5a07f88303ba81f436838785ffa65
| 995
|
py
|
Python
|
the biggidy back end/rawText.py
|
jlekas/recipe-site
|
e1c54cb0c19e2c28a968abe8988d7b57fdadbb46
|
[
"MIT"
] | 1
|
2019-09-06T00:16:27.000Z
|
2019-09-06T00:16:27.000Z
|
the biggidy back end/rawText.py
|
jlekas/recipe-site
|
e1c54cb0c19e2c28a968abe8988d7b57fdadbb46
|
[
"MIT"
] | 6
|
2021-03-09T17:29:30.000Z
|
2022-02-26T17:43:15.000Z
|
the biggidy back end/rawText.py
|
jlekas/recipe-site
|
e1c54cb0c19e2c28a968abe8988d7b57fdadbb46
|
[
"MIT"
] | null | null | null |
url = "https://www.delish.com/cooking/recipe-ideas/recipes/a53823/easy-pad-thai-recipe/"
url2 = "https://www.allrecipes.com/recipe/92462/slow-cooker-texas-pulled-pork/"
# opener = urllib.URLopener()
# opener.addheader(('User-Agent', 'Mozilla/5.0'))
# f = urllib.urlopen(url)
import requests
import html2text
h = html2text.HTML2Text()
h.ignore_links = True
f = requests.get(url2)
g = h.handle(f.text)
arrayOflines = g.split("\n")
isPrinting = False
chunk = []
chunks = []
for line in arrayOflines:
if(len(line) != 0):
chunk.append(line)
else:
chunks.append(chunk)
chunk = []
print(chunks)
for c in chunks:
print(c)
print("\n \n")
# if 'ingredients' in line.lower() and len(line) < 15:
# print(line)
# if "ingredients" in line and len(line) < :
# print(len(line))
# isPrinting = True
# if(isPrinting):
# print(line)
# if(len(line) == 0):
# isPrinting = False
# print(arrayOflines)
| 20.729167
| 88
| 0.613065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 532
| 0.534673
|
fbd4f0ccd22a107526cc04a4572d5b45f8b8bf9b
| 21,796
|
py
|
Python
|
tests/test_service_desk.py
|
p-tombez/jira
|
a2d9311aa81384382cb3cbe6c9a6bc8f56387feb
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_service_desk.py
|
p-tombez/jira
|
a2d9311aa81384382cb3cbe6c9a6bc8f56387feb
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_service_desk.py
|
p-tombez/jira
|
a2d9311aa81384382cb3cbe6c9a6bc8f56387feb
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
import inspect
import logging
import os
import platform
import sys
from time import sleep
from flaky import flaky
import pytest
import requests
from jira_test_manager import JiraTestManager
# _non_parallel is used to prevent some tests from failing due to concurrency
# issues because detox, Travis or Jenkins can run test in parallel for multiple
# python versions.
# The current workaround is to run these problematic tests only on py27
_non_parallel = True
if platform.python_version() < '3':
_non_parallel = False
try:
import unittest2 as unittest
except ImportError:
import pip
if hasattr(sys, 'real_prefix'):
pip.main(['install', '--upgrade', 'unittest2'])
else:
pip.main(['install', '--upgrade', '--user', 'unittest2'])
import unittest2 as unittest
else:
import unittest
cmd_folder = os.path.abspath(os.path.join(os.path.split(inspect.getfile(
inspect.currentframe()))[0], ".."))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
import jira # noqa
from jira import Role, Issue, JIRA, JIRAError, Project # noqa
from jira.resources import Resource, cls_for_resource # noqa
TEST_ROOT = os.path.dirname(__file__)
TEST_ICON_PATH = os.path.join(TEST_ROOT, 'icon.png')
TEST_ATTACH_PATH = os.path.join(TEST_ROOT, 'tests.py')
OAUTH = False
CONSUMER_KEY = 'oauth-consumer'
KEY_CERT_FILE = '/home/bspeakmon/src/atlassian-oauth-examples/rsa.pem'
KEY_CERT_DATA = None
try:
with open(KEY_CERT_FILE, 'r') as cert:
KEY_CERT_DATA = cert.read()
OAUTH = True
except Exception:
pass
if 'CI_JIRA_URL' in os.environ:
not_on_custom_jira_instance = pytest.mark.skipif(True, reason="Not applicable for custom JIRA instance")
logging.info('Picked up custom JIRA engine.')
else:
def noop(arg):
return arg
not_on_custom_jira_instance = noop
def jira_servicedesk_detection():
if 'CI_JIRA_URL' in os.environ:
url = os.environ['CI_JIRA_URL']
else:
url = 'https://pycontribs.atlassian.net'
url += '/rest/servicedeskapi/info'
return requests.get(url).status_code != 200
jira_servicedesk = pytest.mark.skipif(jira_servicedesk_detection(), reason="JIRA Service Desk is not available.")
@flaky
@jira_servicedesk
class ServiceDeskTests(unittest.TestCase):
def setUp(self):
self.test_manager = JiraTestManager()
self.jira = self.test_manager.jira_admin
self.desk = self.jira.desk
self.test_fullname_a = "TestCustomerFullName %s" % self.test_manager.project_a
self.test_email_a = "test_customer_%s@example.com" % self.test_manager.project_a
self.test_fullname_b = "TestCustomerFullName %s" % self.test_manager.project_b
self.test_email_b = "test_customer_%s@example.com" % self.test_manager.project_b
self.test_organization_name_a = "test_organization_%s" % self.test_manager.project_a
self.test_organization_name_b = "test_organization_%s" % self.test_manager.project_b
def test_create_and_delete_customer(self):
try:
self.jira.delete_user(self.test_email_a)
except JIRAError:
pass
customer = self.desk.create_customer(self.test_email_a, self.test_fullname_a)
self.assertEqual(customer.emailAddress, self.test_email_a)
self.assertEqual(customer.displayName, self.test_fullname_a)
result = self.jira.delete_user(self.test_email_a)
self.assertTrue(result)
def test_get_servicedesk_info(self):
result = self.desk.servicedesk_info()
self.assertNotEqual(result, False)
def test_create_and_delete_organization(self):
organization = self.desk.create_organization(self.test_organization_name_a)
self.assertEqual(organization.name, self.test_organization_name_a)
result = self.desk.delete_organization(organization.id)
self.assertTrue(result)
def test_get_organization(self):
organization = self.desk.create_organization(self.test_organization_name_a)
self.assertEqual(organization.name, self.test_organization_name_a)
result = self.desk.organization(organization.id)
self.assertEqual(result.id, organization.id)
self.assertEqual(result.name, self.test_organization_name_a)
result = self.desk.delete_organization(organization.id)
self.assertTrue(result)
def test_add_users_to_organization(self):
organization = self.desk.create_organization(self.test_organization_name_a)
self.assertEqual(organization.name, self.test_organization_name_a)
try:
self.jira.delete_user(self.test_email_a)
except JIRAError:
pass
try:
self.jira.delete_user(self.test_email_b)
except JIRAError:
pass
customer_a = self.desk.create_customer(self.test_email_a, self.test_fullname_a)
self.assertEqual(customer_a.emailAddress, self.test_email_a)
self.assertEqual(customer_a.displayName, self.test_fullname_a)
customer_b = self.desk.create_customer(self.test_email_b, self.test_fullname_b)
self.assertEqual(customer_b.emailAddress, self.test_email_b)
self.assertEqual(customer_b.displayName, self.test_fullname_b)
result = self.desk.add_users_to_organization(organization.id, [self.test_email_a, self.test_email_b])
self.assertTrue(result)
result = self.jira.delete_user(self.test_email_a)
self.assertTrue(result)
result = self.jira.delete_user(self.test_email_b)
self.assertTrue(result)
result = self.desk.delete_organization(organization.id)
self.assertTrue(result)
def test_remove_users_from_organization(self):
organization = self.desk.create_organization(self.test_organization_name_a)
self.assertEqual(organization.name, self.test_organization_name_a)
try:
self.jira.delete_user(self.test_email_a)
except JIRAError:
pass
try:
self.jira.delete_user(self.test_email_b)
except JIRAError:
pass
customer_a = self.desk.create_customer(self.test_email_a, self.test_fullname_a)
self.assertEqual(customer_a.emailAddress, self.test_email_a)
self.assertEqual(customer_a.displayName, self.test_fullname_a)
customer_b = self.desk.create_customer(self.test_email_b, self.test_fullname_b)
self.assertEqual(customer_b.emailAddress, self.test_email_b)
self.assertEqual(customer_b.displayName, self.test_fullname_b)
result = self.desk.add_users_to_organization(organization.id, [self.test_email_a, self.test_email_b])
self.assertTrue(result)
result = self.desk.remove_users_from_organization(organization.id, [self.test_email_a, self.test_email_b])
self.assertTrue(result)
result = self.jira.delete_user(self.test_email_a)
self.assertTrue(result)
result = self.jira.delete_user(self.test_email_b)
self.assertTrue(result)
result = self.desk.delete_organization(organization.id)
self.assertTrue(result)
def test_get_organizations(self):
organization_a = self.desk.create_organization(self.test_organization_name_a)
self.assertEqual(organization_a.name, self.test_organization_name_a)
organization_b = self.desk.create_organization(self.test_organization_name_b)
self.assertEqual(organization_b.name, self.test_organization_name_b)
organizations = self.desk.organizations(0, 1)
self.assertEqual(len(organizations), 1)
result = self.desk.delete_organization(organization_a.id)
self.assertTrue(result)
result = self.desk.delete_organization(organization_b.id)
self.assertTrue(result)
def test_get_users_in_organization(self):
organization = self.desk.create_organization(self.test_organization_name_a)
self.assertEqual(organization.name, self.test_organization_name_a)
try:
self.jira.delete_user(self.test_email_a)
except JIRAError:
pass
try:
self.jira.delete_user(self.test_email_b)
except JIRAError:
pass
customer_a = self.desk.create_customer(self.test_email_a, self.test_fullname_a)
self.assertEqual(customer_a.emailAddress, self.test_email_a)
self.assertEqual(customer_a.displayName, self.test_fullname_a)
customer_b = self.desk.create_customer(self.test_email_b, self.test_fullname_b)
self.assertEqual(customer_b.emailAddress, self.test_email_b)
self.assertEqual(customer_b.displayName, self.test_fullname_b)
result = self.desk.add_users_to_organization(organization.id, [self.test_email_a, self.test_email_b])
self.assertTrue(result)
result = self.desk.get_users_from_organization(organization.id)
self.assertEqual(len(result), 2)
result = self.jira.delete_user(self.test_email_a)
self.assertTrue(result)
result = self.jira.delete_user(self.test_email_b)
self.assertTrue(result)
result = self.desk.delete_organization(organization.id)
self.assertTrue(result)
def test_service_desks(self):
service_desks = self.desk.service_desks()
self.assertGreater(len(service_desks), 0)
def test_servicedesk(self):
service_desks = self.desk.service_desks()
self.assertGreater(len(service_desks), 0)
service_desk = self.desk.service_desk(service_desks[0].id)
self.assertEqual(service_desk.id, service_desks[0].id)
def test_request_types(self):
service_desks = self.desk.service_desks()
self.assertGreater(len(service_desks), 0)
request_types = self.desk.request_types(service_desks[0].id)
self.assertGreater(len(request_types), 0)
def test_request_type(self):
service_desks = self.desk.service_desks()
self.assertGreater(len(service_desks), 0)
request_types = self.desk.request_types(service_desks[0].id)
self.assertGreater(len(request_types), 0)
request_type = self.desk.request_type(service_desks[0].id, request_types[0].id)
self.assertEqual(request_type.id, request_types[0].id)
self.assertEqual(request_type.name, request_types[0].name)
def test_request_type_by_name(self):
service_desks = self.desk.service_desks()
self.assertGreater(len(service_desks), 0)
request_types = self.desk.request_types(service_desks[0].id)
self.assertGreater(len(request_types), 0)
request_type_by_name = self.desk.request_type_by_name(service_desks[0].id, request_types[0].name)
self.assertEqual(request_types[0].id, request_type_by_name.id)
self.assertEqual(request_types[0].name, request_type_by_name.name)
def test_create_and_delete_customer_request_with_prefetch(self):
service_desks = self.desk.service_desks()
self.assertGreater(len(service_desks), 0)
request_types = self.desk.request_types(service_desks[0].id)
self.assertGreater(len(request_types), 0)
fields = {
"serviceDeskId": int(service_desks[0].id),
"requestTypeId": int(request_types[0].id),
"raiseOnBehalfOf": self.test_manager.CI_JIRA_USER,
"requestFieldValues": {
"summary": "Request summary",
"description": "Request description"
}
}
request = self.desk.create_request(fields, prefetch=True)
self.jira.delete_issue(request.id)
self.assertIsNotNone(request.id)
self.assertIsNotNone(request.key)
self.assertEqual(request.fields.summary, "Request summary")
self.assertEqual(request.fields.description, "Request description")
def test_create_and_delete_customer_request_without_prefetch(self):
service_desks = self.desk.service_desks()
self.assertGreater(len(service_desks), 0)
request_types = self.desk.request_types(service_desks[0].id)
self.assertGreater(len(request_types), 0)
fields = {
"serviceDeskId": int(service_desks[0].id),
"requestTypeId": int(request_types[0].id),
"raiseOnBehalfOf": self.test_manager.CI_JIRA_USER,
"requestFieldValues": {
"summary": "Request summary",
"description": "Request description"
}
}
request = self.desk.create_request(fields, prefetch=False)
self.jira.delete_issue(request.id)
self.assertIsNotNone(request.id)
self.assertIsNotNone(request.key)
self.assertEqual(request.fields.summary, "Request summary")
self.assertEqual(request.fields.description, "Request description")
def test_get_customer_request_by_key_or_id(self):
service_desks = self.desk.service_desks()
self.assertGreater(len(service_desks), 0)
request_types = self.desk.request_types(service_desks[0].id)
self.assertGreater(len(request_types), 0)
fields = {
"serviceDeskId": int(service_desks[0].id),
"requestTypeId": int(request_types[0].id),
"raiseOnBehalfOf": self.test_manager.CI_JIRA_USER,
"requestFieldValues": {
"summary": "Request summary",
"description": "Request description"
}
}
request = self.desk.create_request(fields, prefetch=False)
expand = 'serviceDesk,requestType,participant,sla,status'
request_by_key = self.desk.request(request.key, expand=expand)
self.assertEqual(request.id, request_by_key.id)
self.assertEqual(request.key, request_by_key.key)
self.assertEqual(request_by_key.fields.summary, "Request summary")
self.assertEqual(request_by_key.fields.description, "Request description")
expand = 'serviceDesk,requestType,participant,sla,status'
request_by_id = self.desk.request(request.id, expand=expand)
self.jira.delete_issue(request.id)
self.assertEqual(request.id, request_by_id.id)
self.assertEqual(request.key, request_by_id.key)
self.assertEqual(request_by_id.fields.summary, "Request summary")
self.assertEqual(request_by_id.fields.description, "Request description")
def test_get_my_customer_requests(self):
service_desks = self.desk.service_desks()
self.assertGreater(len(service_desks), 0)
request_types = self.desk.request_types(service_desks[0].id)
self.assertGreater(len(request_types), 0)
fields = {
"serviceDeskId": int(service_desks[0].id),
"requestTypeId": int(request_types[0].id),
"raiseOnBehalfOf": self.test_manager.CI_JIRA_USER,
"requestFieldValues": {
"summary": "Request summary",
"description": "Request description"
}
}
request1 = self.desk.create_request(fields, prefetch=False)
fields = {
"serviceDeskId": int(service_desks[0].id),
"requestTypeId": int(request_types[0].id),
"raiseOnBehalfOf": self.test_manager.CI_JIRA_ADMIN,
"requestFieldValues": {
"summary": "Request summary",
"description": "Request description"
}
}
request2 = self.desk.create_request(fields, prefetch=False)
result = self.desk.my_customer_requests(request_ownership='OWNED_REQUESTS',
servicedesk_id=int(service_desks[0].id),
request_type_id=int(request_types[0].id))
count = 0
requests = (request1.id, request2.id)
for i in result:
if i.id in requests:
count += 1
self.assertEqual(count, 1)
result = self.desk.my_customer_requests(request_ownership='PARTICIPATED_REQUESTS',
servicedesk_id=int(service_desks[0].id),
request_type_id=int(request_types[0].id))
count = 0
requests_list = (request1.id, request2.id)
for i in result:
if i.id in requests_list:
count += 1
self.jira.delete_issue(request1.id)
self.jira.delete_issue(request2.id)
self.assertEqual(count, 0)
def test_request_comments(self):
service_desks = self.desk.service_desks()
self.assertGreater(len(service_desks), 0)
request_types = self.desk.request_types(service_desks[0].id)
self.assertGreater(len(request_types), 0)
fields = {
"serviceDeskId": int(service_desks[0].id),
"requestTypeId": int(request_types[0].id),
"raiseOnBehalfOf": self.test_manager.CI_JIRA_USER,
"requestFieldValues": {
"summary": "Request summary",
"description": "Request description"
}
}
request = self.desk.create_request(fields, prefetch=False)
self.jira.add_comment(request.id, "Public comment #1", is_internal=False)
self.jira.add_comment(request.id, "Internal comment #1", is_internal=True)
self.jira.add_comment(request.id, "Public comment #2", is_internal=False)
self.jira.add_comment(request.id, "Public comment #3", is_internal=False)
sleep(1)
public_comments = self.desk.request_comments(request.id, public=True, internal=False)
internal_comments = self.desk.request_comments(request.id, public=False, internal=True)
all_comments = self.desk.request_comments(request.id)
self.assertEqual(len(public_comments), 3)
self.assertEqual(len(internal_comments), 1)
self.assertEqual(len(all_comments), 4)
for comment in public_comments:
self.assertEqual(comment.public, True)
for comment in internal_comments:
self.assertEqual(comment.public, False)
self.jira.delete_issue(request.id)
def test_create_attachment(self):
service_desks = self.desk.service_desks()
self.assertGreater(len(service_desks), 0)
request_types = self.desk.request_types(service_desks[0].id)
self.assertGreater(len(request_types), 0)
fields = {
"serviceDeskId": int(service_desks[0].id),
"requestTypeId": int(request_types[0].id),
"raiseOnBehalfOf": self.test_manager.CI_JIRA_USER,
"requestFieldValues": {
"summary": "Request summary",
"description": "Request description"
}
}
request = self.desk.create_request(fields)
tmp_attachment = self.desk.attach_temporary_file(service_desks[0].id, open(TEST_ICON_PATH, 'rb'), "test.png")
self.assertEqual(len(tmp_attachment.temporaryAttachments), 1)
self.assertEqual(tmp_attachment.temporaryAttachments[0].fileName, 'test.png')
request_attachment = self.desk.servicedesk_attachment(request.id, tmp_attachment, is_public=False,
comment='Comment text')
self.jira.delete_issue(request.id)
self.assertEqual(request_attachment.comment.body, 'Comment text\n\n!test.png|thumbnail!')
if hasattr(request_attachment.attachments, 'values'):
# For Jira Servicedesk Cloud
self.assertGreater(len(request_attachment.attachments.values), 0)
self.assertEqual(request_attachment.attachments.values[0].filename, 'test.png')
self.assertGreater(request_attachment.attachments.values[0].size, 0)
else:
# For Jira Servicedesk Server
self.assertGreater(len(request_attachment.attachments), 0)
self.assertEqual(request_attachment.attachments[0].filename, 'test.png')
self.assertGreater(request_attachment.attachments[0].size, 0)
def test_attach_temporary_file(self):
service_desks = self.desk.service_desks()
self.assertGreater(len(service_desks), 0)
tmp_attachment = self.desk.attach_temporary_file(service_desks[0].id, open(TEST_ICON_PATH, 'rb'), "test.png")
self.assertEqual(len(tmp_attachment.temporaryAttachments), 1)
self.assertEqual(tmp_attachment.temporaryAttachments[0].fileName, 'test.png')
def test_create_customer_request(self):
try:
self.jira.create_project('TESTSD', template_name='IT Service Desk')
except JIRAError:
pass
service_desk = self.desk.service_desks()[0]
request_type = self.desk.request_types(service_desk.id)[0]
request = self.desk.create_customer_request(dict(
serviceDeskId=service_desk.id,
requestTypeId=int(request_type.id),
requestFieldValues=dict(
summary='Ticket title here',
description='Ticket body here'
)
))
self.assertEqual(request.fields.summary, 'Ticket title here')
self.assertEqual(request.fields.description, 'Ticket body here')
if __name__ == '__main__':
# when running tests we expect various errors and we don't want to display them by default
logging.getLogger("requests").setLevel(logging.FATAL)
logging.getLogger("urllib3").setLevel(logging.FATAL)
logging.getLogger("jira").setLevel(logging.FATAL)
# j = JIRA("https://issues.citrite.net")
# print(j.session())
dirname = "test-reports-%s%s" % (sys.version_info[0], sys.version_info[1])
unittest.main()
# pass
| 38.991055
| 117
| 0.677097
| 18,978
| 0.87071
| 0
| 0
| 19,003
| 0.871857
| 0
| 0
| 2,578
| 0.118279
|
fbd4f1c85388584979a3225e172df289b9b181ba
| 1,761
|
py
|
Python
|
mods/goofile.py
|
Natto97/discover
|
101d5457bad9345598720a49e4323b047030e496
|
[
"MIT"
] | 1
|
2018-08-11T10:28:00.000Z
|
2018-08-11T10:28:00.000Z
|
mods/goofile.py
|
Natto97/discover
|
101d5457bad9345598720a49e4323b047030e496
|
[
"MIT"
] | null | null | null |
mods/goofile.py
|
Natto97/discover
|
101d5457bad9345598720a49e4323b047030e496
|
[
"MIT"
] | 1
|
2018-11-02T18:33:00.000Z
|
2018-11-02T18:33:00.000Z
|
#!/usr/bin/env python
# Goofile v1.5a
# by Thomas (G13) Richards
# www.g13net.com
# Project Page: code.google.com/p/goofile
# TheHarvester used for inspiration
# A many thanks to the Edge-Security team!
# Modified by Lee Baird
import getopt
import httplib
import re
import string
import sys
global result
result =[]
def usage():
print "\nusage: goofile <options>"
print " -d: domain"
print " -f: filetype\n"
print "example: goofile.py -d target.com -f txt\n\n"
sys.exit()
def run(domain,file):
h = httplib.HTTP('www.google.com')
h.putrequest('GET',"/search?num=500&q=site:"+domain+"+filetype:"+file)
h.putheader('Host', 'www.google.com')
h.putheader('User-agent', 'Internet Explorer 6.0 ')
h.putheader('Referrer', 'www.g13net.com')
h.endheaders()
returncode, returnmsg, headers = h.getreply()
data=h.getfile().read()
data=re.sub('<b>','',data)
for e in ('>','=','<','\\','(',')','"','http',':','//'):
data = string.replace(data,e,' ')
r1 = re.compile('[-_.a-zA-Z0-9.-_]*'+'\.'+file)
res = r1.findall(data)
return res
def search(argv):
global limit
limit = 100
if len(sys.argv) < 2:
usage()
try :
opts, args = getopt.getopt(argv,"d:f:")
except getopt.GetoptError:
usage()
sys.exit()
for opt,arg in opts :
if opt == '-f' :
file=arg
elif opt == '-d':
domain=arg
cant = 0
while cant < limit:
res = run(domain,file)
for x in res:
if result.count(x) == 0:
result.append(x)
cant+=100
if result==[]:
print "No results were found."
else:
for x in result:
print x
if __name__ == "__main__":
try: search(sys.argv[1:])
except KeyboardInterrupt:
print "Search interrupted by user."
except:
sys.exit()
| 20.717647
| 71
| 0.609313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 626
| 0.35548
|
fbd4fbdd0e8caf1bb4e01991f3ba92b60968ec1e
| 211
|
py
|
Python
|
Atividade do Livro-Nilo Ney(PYTHON)/Cap.05/exe 5.25.py
|
EduardoJonathan0/Python
|
0e4dff4703515a6454ba25c6f401960b6155f32f
|
[
"MIT"
] | null | null | null |
Atividade do Livro-Nilo Ney(PYTHON)/Cap.05/exe 5.25.py
|
EduardoJonathan0/Python
|
0e4dff4703515a6454ba25c6f401960b6155f32f
|
[
"MIT"
] | null | null | null |
Atividade do Livro-Nilo Ney(PYTHON)/Cap.05/exe 5.25.py
|
EduardoJonathan0/Python
|
0e4dff4703515a6454ba25c6f401960b6155f32f
|
[
"MIT"
] | null | null | null |
n = int(input('Insira um número e calcule sua raiz: '))
b = 2
while True:
p = (b + (n / b)) / 2
res = p ** 2
b = p
if abs(n - res) < 0.0001:
break
print(f'p = {p}')
print(f'p² = {res}')
| 17.583333
| 55
| 0.469194
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 64
| 0.300469
|
fbd741a2b97e35d13af8722f7601c0365f0d7506
| 1,732
|
py
|
Python
|
wolframclient/utils/six.py
|
krbarker/WolframClientForPython
|
f2198b15cad0f406b78ad40a4d1e3ca76125b408
|
[
"MIT"
] | null | null | null |
wolframclient/utils/six.py
|
krbarker/WolframClientForPython
|
f2198b15cad0f406b78ad40a4d1e3ca76125b408
|
[
"MIT"
] | null | null | null |
wolframclient/utils/six.py
|
krbarker/WolframClientForPython
|
f2198b15cad0f406b78ad40a4d1e3ca76125b408
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import datetime
import decimal
import platform
import sys
import types
from itertools import chain
#stripped version of SIX
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY_35 = sys.version_info >= (3, 5)
PY_36 = sys.version_info >= (3, 6)
PY_37 = sys.version_info >= (3, 7)
WINDOWS = platform.system() == 'Windows'
LINUX = platform.system() == 'Linux'
MACOS = platform.system() == 'Darwin'
JYTHON = sys.platform.startswith('java')
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
none_type = type(None)
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
memoryview = memoryview
buffer_types = (bytes, bytearray, memoryview)
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
none_type = types.NoneType
import StringIO
StringIO = BytesIO = StringIO.StringIO
# memoryview and buffer are not strictly equivalent, but should be fine for
# django core usage (mainly BinaryField). However, Jython doesn't support
# buffer (see http://bugs.jython.org/issue1521), so we have to be careful.
if JYTHON:
memoryview = memoryview
else:
memoryview = buffer
buffer_types = (bytearray, memoryview, buffer)
iterable_types = (list, tuple, set, frozenset, types.GeneratorType, chain)
protected_types = tuple(
chain(string_types, integer_types,
(float, decimal.Decimal, datetime.date, datetime.datetime,
datetime.time, bool, none_type)))
| 25.850746
| 79
| 0.691686
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 299
| 0.172633
|
fbd89ae0e3bc378776e8ecafb307ef98cc2d28f8
| 3,388
|
py
|
Python
|
Lib/site-packages/pynput/mouse/_xorg.py
|
djaldave/laevad-python-2.7.18
|
df9aac191d554295db45d638e528880a9ab9a3ec
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/pynput/mouse/_xorg.py
|
djaldave/laevad-python-2.7.18
|
df9aac191d554295db45d638e528880a9ab9a3ec
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/pynput/mouse/_xorg.py
|
djaldave/laevad-python-2.7.18
|
df9aac191d554295db45d638e528880a9ab9a3ec
|
[
"bzip2-1.0.6"
] | null | null | null |
# coding=utf-8
# pynput
# Copyright (C) 2015-2016 Moses Palmér
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import enum
import Xlib.display
import Xlib.ext
import Xlib.ext.xtest
import Xlib.X
import Xlib.protocol
from pynput._util.xorg import *
from . import _base
class Button(enum.Enum):
"""The various buttons.
"""
left = 1
middle = 2
right = 3
scroll_up = 4
scroll_down = 5
scroll_left = 6
scroll_right = 7
class Controller(_base.Controller):
def __init__(self):
self._display = Xlib.display.Display()
def __del__(self):
if hasattr(self, '_display'):
self._display.close()
def _position_get(self):
with display_manager(self._display) as d:
data = d.screen().root.query_pointer()._data
return (data["root_x"], data["root_y"])
def _position_set(self, pos):
x, y = pos
with display_manager(self._display) as d:
Xlib.ext.xtest.fake_input(d, Xlib.X.MotionNotify, x=x, y=y)
def _scroll(self, dx, dy):
if dy:
self.click(
button=Button.scroll_up if dy > 0 else Button.scroll_down,
count=abs(dy))
if dx:
self.click(
button=Button.scroll_right if dx > 0 else Button.scroll_left,
count=abs(dx))
def _press(self, button):
with display_manager(self._display) as d:
Xlib.ext.xtest.fake_input(d, Xlib.X.ButtonPress, button.value)
def _release(self, button):
with display_manager(self._display) as d:
Xlib.ext.xtest.fake_input(d, Xlib.X.ButtonRelease, button.value)
class Listener(ListenerMixin, _base.Listener):
#: A mapping from button values to scroll directions
_SCROLL_BUTTONS = {
Button.scroll_up.value: (0, 1),
Button.scroll_down.value: (0, -1),
Button.scroll_right.value: (1, 0),
Button.scroll_left.value: (-1, 0)}
_EVENTS = (
Xlib.X.ButtonPressMask,
Xlib.X.ButtonReleaseMask)
def _handle(self, display, event):
x = event.root_x
y = event.root_y
if event.type == Xlib.X.ButtonPress:
# Scroll events are sent as button presses with the scroll
# button codes
scroll = self._SCROLL_BUTTONS.get(event.detail, None)
if scroll:
self.on_scroll(x, y, *scroll)
else:
self.on_click(x, y, Button(event.detail), True)
elif event.type == Xlib.X.ButtonRelease:
# Send an event only if this was not a scroll event
if event.detail not in self._SCROLL_BUTTONS:
self.on_click(x, y, Button(event.detail), False)
else:
self.on_move(x, y)
| 30.522523
| 79
| 0.631641
| 2,502
| 0.738271
| 0
| 0
| 0
| 0
| 0
| 0
| 935
| 0.275893
|
fbdc68b03f388458c541749b935a1b91cef73dc0
| 155,379
|
py
|
Python
|
automate_online-materials/census2010.py
|
kruschk/automate-the-boring-stuff
|
2172fa9d1846b2ba9ead4e86971d72edd54f97b3
|
[
"MIT"
] | 2
|
2020-01-18T16:01:24.000Z
|
2020-02-29T19:27:17.000Z
|
automate_online-materials/census2010.py
|
kruschk/automate-the-boring-stuff
|
2172fa9d1846b2ba9ead4e86971d72edd54f97b3
|
[
"MIT"
] | 88
|
2019-10-31T12:30:02.000Z
|
2020-08-14T12:17:12.000Z
|
automate_online-materials/census2010.py
|
kruschk/automate-the-boring-stuff
|
2172fa9d1846b2ba9ead4e86971d72edd54f97b3
|
[
"MIT"
] | 4
|
2020-08-17T16:49:06.000Z
|
2022-02-14T06:45:29.000Z
|
allData = {'AK': {'Aleutians East': {'pop': 3141, 'tracts': 1},
'Aleutians West': {'pop': 5561, 'tracts': 2},
'Anchorage': {'pop': 291826, 'tracts': 55},
'Bethel': {'pop': 17013, 'tracts': 3},
'Bristol Bay': {'pop': 997, 'tracts': 1},
'Denali': {'pop': 1826, 'tracts': 1},
'Dillingham': {'pop': 4847, 'tracts': 2},
'Fairbanks North Star': {'pop': 97581, 'tracts': 19},
'Haines': {'pop': 2508, 'tracts': 1},
'Hoonah-Angoon': {'pop': 2150, 'tracts': 2},
'Juneau': {'pop': 31275, 'tracts': 6},
'Kenai Peninsula': {'pop': 55400, 'tracts': 13},
'Ketchikan Gateway': {'pop': 13477, 'tracts': 4},
'Kodiak Island': {'pop': 13592, 'tracts': 5},
'Lake and Peninsula': {'pop': 1631, 'tracts': 1},
'Matanuska-Susitna': {'pop': 88995, 'tracts': 24},
'Nome': {'pop': 9492, 'tracts': 2},
'North Slope': {'pop': 9430, 'tracts': 3},
'Northwest Arctic': {'pop': 7523, 'tracts': 2},
'Petersburg': {'pop': 3815, 'tracts': 1},
'Prince of Wales-Hyder': {'pop': 5559, 'tracts': 4},
'Sitka': {'pop': 8881, 'tracts': 2},
'Skagway': {'pop': 968, 'tracts': 1},
'Southeast Fairbanks': {'pop': 7029, 'tracts': 2},
'Valdez-Cordova': {'pop': 9636, 'tracts': 3},
'Wade Hampton': {'pop': 7459, 'tracts': 1},
'Wrangell': {'pop': 2369, 'tracts': 1},
'Yakutat': {'pop': 662, 'tracts': 1},
'Yukon-Koyukuk': {'pop': 5588, 'tracts': 4}},
'AL': {'Autauga': {'pop': 54571, 'tracts': 12},
'Baldwin': {'pop': 182265, 'tracts': 31},
'Barbour': {'pop': 27457, 'tracts': 9},
'Bibb': {'pop': 22915, 'tracts': 4},
'Blount': {'pop': 57322, 'tracts': 9},
'Bullock': {'pop': 10914, 'tracts': 3},
'Butler': {'pop': 20947, 'tracts': 9},
'Calhoun': {'pop': 118572, 'tracts': 31},
'Chambers': {'pop': 34215, 'tracts': 9},
'Cherokee': {'pop': 25989, 'tracts': 6},
'Chilton': {'pop': 43643, 'tracts': 9},
'Choctaw': {'pop': 13859, 'tracts': 4},
'Clarke': {'pop': 25833, 'tracts': 9},
'Clay': {'pop': 13932, 'tracts': 4},
'Cleburne': {'pop': 14972, 'tracts': 4},
'Coffee': {'pop': 49948, 'tracts': 14},
'Colbert': {'pop': 54428, 'tracts': 14},
'Conecuh': {'pop': 13228, 'tracts': 5},
'Coosa': {'pop': 11539, 'tracts': 3},
'Covington': {'pop': 37765, 'tracts': 14},
'Crenshaw': {'pop': 13906, 'tracts': 6},
'Cullman': {'pop': 80406, 'tracts': 18},
'Dale': {'pop': 50251, 'tracts': 14},
'Dallas': {'pop': 43820, 'tracts': 15},
'DeKalb': {'pop': 71109, 'tracts': 14},
'Elmore': {'pop': 79303, 'tracts': 15},
'Escambia': {'pop': 38319, 'tracts': 9},
'Etowah': {'pop': 104430, 'tracts': 30},
'Fayette': {'pop': 17241, 'tracts': 5},
'Franklin': {'pop': 31704, 'tracts': 9},
'Geneva': {'pop': 26790, 'tracts': 6},
'Greene': {'pop': 9045, 'tracts': 3},
'Hale': {'pop': 15760, 'tracts': 6},
'Henry': {'pop': 17302, 'tracts': 6},
'Houston': {'pop': 101547, 'tracts': 22},
'Jackson': {'pop': 53227, 'tracts': 11},
'Jefferson': {'pop': 658466, 'tracts': 163},
'Lamar': {'pop': 14564, 'tracts': 3},
'Lauderdale': {'pop': 92709, 'tracts': 22},
'Lawrence': {'pop': 34339, 'tracts': 9},
'Lee': {'pop': 140247, 'tracts': 27},
'Limestone': {'pop': 82782, 'tracts': 16},
'Lowndes': {'pop': 11299, 'tracts': 4},
'Macon': {'pop': 21452, 'tracts': 12},
'Madison': {'pop': 334811, 'tracts': 73},
'Marengo': {'pop': 21027, 'tracts': 6},
'Marion': {'pop': 30776, 'tracts': 8},
'Marshall': {'pop': 93019, 'tracts': 18},
'Mobile': {'pop': 412992, 'tracts': 114},
'Monroe': {'pop': 23068, 'tracts': 7},
'Montgomery': {'pop': 229363, 'tracts': 65},
'Morgan': {'pop': 119490, 'tracts': 27},
'Perry': {'pop': 10591, 'tracts': 3},
'Pickens': {'pop': 19746, 'tracts': 5},
'Pike': {'pop': 32899, 'tracts': 8},
'Randolph': {'pop': 22913, 'tracts': 6},
'Russell': {'pop': 52947, 'tracts': 13},
'Shelby': {'pop': 195085, 'tracts': 48},
'St. Clair': {'pop': 83593, 'tracts': 13},
'Sumter': {'pop': 13763, 'tracts': 4},
'Talladega': {'pop': 82291, 'tracts': 22},
'Tallapoosa': {'pop': 41616, 'tracts': 10},
'Tuscaloosa': {'pop': 194656, 'tracts': 47},
'Walker': {'pop': 67023, 'tracts': 18},
'Washington': {'pop': 17581, 'tracts': 5},
'Wilcox': {'pop': 11670, 'tracts': 4},
'Winston': {'pop': 24484, 'tracts': 7}},
'AR': {'Arkansas': {'pop': 19019, 'tracts': 8},
'Ashley': {'pop': 21853, 'tracts': 7},
'Baxter': {'pop': 41513, 'tracts': 9},
'Benton': {'pop': 221339, 'tracts': 49},
'Boone': {'pop': 36903, 'tracts': 7},
'Bradley': {'pop': 11508, 'tracts': 5},
'Calhoun': {'pop': 5368, 'tracts': 2},
'Carroll': {'pop': 27446, 'tracts': 5},
'Chicot': {'pop': 11800, 'tracts': 4},
'Clark': {'pop': 22995, 'tracts': 5},
'Clay': {'pop': 16083, 'tracts': 6},
'Cleburne': {'pop': 25970, 'tracts': 7},
'Cleveland': {'pop': 8689, 'tracts': 2},
'Columbia': {'pop': 24552, 'tracts': 5},
'Conway': {'pop': 21273, 'tracts': 6},
'Craighead': {'pop': 96443, 'tracts': 17},
'Crawford': {'pop': 61948, 'tracts': 11},
'Crittenden': {'pop': 50902, 'tracts': 20},
'Cross': {'pop': 17870, 'tracts': 6},
'Dallas': {'pop': 8116, 'tracts': 3},
'Desha': {'pop': 13008, 'tracts': 5},
'Drew': {'pop': 18509, 'tracts': 5},
'Faulkner': {'pop': 113237, 'tracts': 25},
'Franklin': {'pop': 18125, 'tracts': 3},
'Fulton': {'pop': 12245, 'tracts': 2},
'Garland': {'pop': 96024, 'tracts': 20},
'Grant': {'pop': 17853, 'tracts': 4},
'Greene': {'pop': 42090, 'tracts': 9},
'Hempstead': {'pop': 22609, 'tracts': 5},
'Hot Spring': {'pop': 32923, 'tracts': 7},
'Howard': {'pop': 13789, 'tracts': 3},
'Independence': {'pop': 36647, 'tracts': 8},
'Izard': {'pop': 13696, 'tracts': 4},
'Jackson': {'pop': 17997, 'tracts': 5},
'Jefferson': {'pop': 77435, 'tracts': 24},
'Johnson': {'pop': 25540, 'tracts': 6},
'Lafayette': {'pop': 7645, 'tracts': 2},
'Lawrence': {'pop': 17415, 'tracts': 6},
'Lee': {'pop': 10424, 'tracts': 4},
'Lincoln': {'pop': 14134, 'tracts': 4},
'Little River': {'pop': 13171, 'tracts': 4},
'Logan': {'pop': 22353, 'tracts': 6},
'Lonoke': {'pop': 68356, 'tracts': 16},
'Madison': {'pop': 15717, 'tracts': 4},
'Marion': {'pop': 16653, 'tracts': 4},
'Miller': {'pop': 43462, 'tracts': 12},
'Mississippi': {'pop': 46480, 'tracts': 12},
'Monroe': {'pop': 8149, 'tracts': 3},
'Montgomery': {'pop': 9487, 'tracts': 3},
'Nevada': {'pop': 8997, 'tracts': 3},
'Newton': {'pop': 8330, 'tracts': 2},
'Ouachita': {'pop': 26120, 'tracts': 6},
'Perry': {'pop': 10445, 'tracts': 3},
'Phillips': {'pop': 21757, 'tracts': 6},
'Pike': {'pop': 11291, 'tracts': 3},
'Poinsett': {'pop': 24583, 'tracts': 7},
'Polk': {'pop': 20662, 'tracts': 6},
'Pope': {'pop': 61754, 'tracts': 11},
'Prairie': {'pop': 8715, 'tracts': 3},
'Pulaski': {'pop': 382748, 'tracts': 95},
'Randolph': {'pop': 17969, 'tracts': 4},
'Saline': {'pop': 107118, 'tracts': 21},
'Scott': {'pop': 11233, 'tracts': 3},
'Searcy': {'pop': 8195, 'tracts': 3},
'Sebastian': {'pop': 125744, 'tracts': 26},
'Sevier': {'pop': 17058, 'tracts': 4},
'Sharp': {'pop': 17264, 'tracts': 4},
'St. Francis': {'pop': 28258, 'tracts': 6},
'Stone': {'pop': 12394, 'tracts': 3},
'Union': {'pop': 41639, 'tracts': 10},
'Van Buren': {'pop': 17295, 'tracts': 5},
'Washington': {'pop': 203065, 'tracts': 32},
'White': {'pop': 77076, 'tracts': 13},
'Woodruff': {'pop': 7260, 'tracts': 2},
'Yell': {'pop': 22185, 'tracts': 6}},
'AZ': {'Apache': {'pop': 71518, 'tracts': 16},
'Cochise': {'pop': 131346, 'tracts': 32},
'Coconino': {'pop': 134421, 'tracts': 28},
'Gila': {'pop': 53597, 'tracts': 16},
'Graham': {'pop': 37220, 'tracts': 9},
'Greenlee': {'pop': 8437, 'tracts': 3},
'La Paz': {'pop': 20489, 'tracts': 9},
'Maricopa': {'pop': 3817117, 'tracts': 916},
'Mohave': {'pop': 200186, 'tracts': 43},
'Navajo': {'pop': 107449, 'tracts': 31},
'Pima': {'pop': 980263, 'tracts': 241},
'Pinal': {'pop': 375770, 'tracts': 75},
'Santa Cruz': {'pop': 47420, 'tracts': 10},
'Yavapai': {'pop': 211033, 'tracts': 42},
'Yuma': {'pop': 195751, 'tracts': 55}},
'CA': {'Alameda': {'pop': 1510271, 'tracts': 360},
'Alpine': {'pop': 1175, 'tracts': 1},
'Amador': {'pop': 38091, 'tracts': 9},
'Butte': {'pop': 220000, 'tracts': 51},
'Calaveras': {'pop': 45578, 'tracts': 10},
'Colusa': {'pop': 21419, 'tracts': 5},
'Contra Costa': {'pop': 1049025, 'tracts': 208},
'Del Norte': {'pop': 28610, 'tracts': 7},
'El Dorado': {'pop': 181058, 'tracts': 43},
'Fresno': {'pop': 930450, 'tracts': 199},
'Glenn': {'pop': 28122, 'tracts': 6},
'Humboldt': {'pop': 134623, 'tracts': 30},
'Imperial': {'pop': 174528, 'tracts': 31},
'Inyo': {'pop': 18546, 'tracts': 6},
'Kern': {'pop': 839631, 'tracts': 151},
'Kings': {'pop': 152982, 'tracts': 27},
'Lake': {'pop': 64665, 'tracts': 15},
'Lassen': {'pop': 34895, 'tracts': 9},
'Los Angeles': {'pop': 9818605, 'tracts': 2343},
'Madera': {'pop': 150865, 'tracts': 23},
'Marin': {'pop': 252409, 'tracts': 55},
'Mariposa': {'pop': 18251, 'tracts': 6},
'Mendocino': {'pop': 87841, 'tracts': 20},
'Merced': {'pop': 255793, 'tracts': 49},
'Modoc': {'pop': 9686, 'tracts': 4},
'Mono': {'pop': 14202, 'tracts': 3},
'Monterey': {'pop': 415057, 'tracts': 93},
'Napa': {'pop': 136484, 'tracts': 40},
'Nevada': {'pop': 98764, 'tracts': 20},
'Orange': {'pop': 3010232, 'tracts': 583},
'Placer': {'pop': 348432, 'tracts': 85},
'Plumas': {'pop': 20007, 'tracts': 7},
'Riverside': {'pop': 2189641, 'tracts': 453},
'Sacramento': {'pop': 1418788, 'tracts': 317},
'San Benito': {'pop': 55269, 'tracts': 11},
'San Bernardino': {'pop': 2035210, 'tracts': 369},
'San Diego': {'pop': 3095313, 'tracts': 628},
'San Francisco': {'pop': 805235, 'tracts': 196},
'San Joaquin': {'pop': 685306, 'tracts': 139},
'San Luis Obispo': {'pop': 269637, 'tracts': 53},
'San Mateo': {'pop': 718451, 'tracts': 158},
'Santa Barbara': {'pop': 423895, 'tracts': 90},
'Santa Clara': {'pop': 1781642, 'tracts': 372},
'Santa Cruz': {'pop': 262382, 'tracts': 52},
'Shasta': {'pop': 177223, 'tracts': 48},
'Sierra': {'pop': 3240, 'tracts': 1},
'Siskiyou': {'pop': 44900, 'tracts': 14},
'Solano': {'pop': 413344, 'tracts': 96},
'Sonoma': {'pop': 483878, 'tracts': 99},
'Stanislaus': {'pop': 514453, 'tracts': 94},
'Sutter': {'pop': 94737, 'tracts': 21},
'Tehama': {'pop': 63463, 'tracts': 11},
'Trinity': {'pop': 13786, 'tracts': 5},
'Tulare': {'pop': 442179, 'tracts': 78},
'Tuolumne': {'pop': 55365, 'tracts': 11},
'Ventura': {'pop': 823318, 'tracts': 174},
'Yolo': {'pop': 200849, 'tracts': 41},
'Yuba': {'pop': 72155, 'tracts': 14}},
'CO': {'Adams': {'pop': 441603, 'tracts': 97},
'Alamosa': {'pop': 15445, 'tracts': 4},
'Arapahoe': {'pop': 572003, 'tracts': 147},
'Archuleta': {'pop': 12084, 'tracts': 4},
'Baca': {'pop': 3788, 'tracts': 2},
'Bent': {'pop': 6499, 'tracts': 1},
'Boulder': {'pop': 294567, 'tracts': 68},
'Broomfield': {'pop': 55889, 'tracts': 18},
'Chaffee': {'pop': 17809, 'tracts': 5},
'Cheyenne': {'pop': 1836, 'tracts': 1},
'Clear Creek': {'pop': 9088, 'tracts': 3},
'Conejos': {'pop': 8256, 'tracts': 2},
'Costilla': {'pop': 3524, 'tracts': 2},
'Crowley': {'pop': 5823, 'tracts': 1},
'Custer': {'pop': 4255, 'tracts': 1},
'Delta': {'pop': 30952, 'tracts': 7},
'Denver': {'pop': 600158, 'tracts': 144},
'Dolores': {'pop': 2064, 'tracts': 1},
'Douglas': {'pop': 285465, 'tracts': 61},
'Eagle': {'pop': 52197, 'tracts': 14},
'El Paso': {'pop': 622263, 'tracts': 130},
'Elbert': {'pop': 23086, 'tracts': 7},
'Fremont': {'pop': 46824, 'tracts': 14},
'Garfield': {'pop': 56389, 'tracts': 11},
'Gilpin': {'pop': 5441, 'tracts': 1},
'Grand': {'pop': 14843, 'tracts': 3},
'Gunnison': {'pop': 15324, 'tracts': 4},
'Hinsdale': {'pop': 843, 'tracts': 1},
'Huerfano': {'pop': 6711, 'tracts': 2},
'Jackson': {'pop': 1394, 'tracts': 1},
'Jefferson': {'pop': 534543, 'tracts': 138},
'Kiowa': {'pop': 1398, 'tracts': 1},
'Kit Carson': {'pop': 8270, 'tracts': 3},
'La Plata': {'pop': 51334, 'tracts': 10},
'Lake': {'pop': 7310, 'tracts': 2},
'Larimer': {'pop': 299630, 'tracts': 73},
'Las Animas': {'pop': 15507, 'tracts': 6},
'Lincoln': {'pop': 5467, 'tracts': 2},
'Logan': {'pop': 22709, 'tracts': 6},
'Mesa': {'pop': 146723, 'tracts': 29},
'Mineral': {'pop': 712, 'tracts': 1},
'Moffat': {'pop': 13795, 'tracts': 4},
'Montezuma': {'pop': 25535, 'tracts': 7},
'Montrose': {'pop': 41276, 'tracts': 10},
'Morgan': {'pop': 28159, 'tracts': 8},
'Otero': {'pop': 18831, 'tracts': 7},
'Ouray': {'pop': 4436, 'tracts': 1},
'Park': {'pop': 16206, 'tracts': 5},
'Phillips': {'pop': 4442, 'tracts': 2},
'Pitkin': {'pop': 17148, 'tracts': 4},
'Prowers': {'pop': 12551, 'tracts': 5},
'Pueblo': {'pop': 159063, 'tracts': 55},
'Rio Blanco': {'pop': 6666, 'tracts': 2},
'Rio Grande': {'pop': 11982, 'tracts': 3},
'Routt': {'pop': 23509, 'tracts': 8},
'Saguache': {'pop': 6108, 'tracts': 2},
'San Juan': {'pop': 699, 'tracts': 1},
'San Miguel': {'pop': 7359, 'tracts': 4},
'Sedgwick': {'pop': 2379, 'tracts': 1},
'Summit': {'pop': 27994, 'tracts': 5},
'Teller': {'pop': 23350, 'tracts': 6},
'Washington': {'pop': 4814, 'tracts': 2},
'Weld': {'pop': 252825, 'tracts': 77},
'Yuma': {'pop': 10043, 'tracts': 2}},
'CT': {'Fairfield': {'pop': 916829, 'tracts': 211},
'Hartford': {'pop': 894014, 'tracts': 224},
'Litchfield': {'pop': 189927, 'tracts': 51},
'Middlesex': {'pop': 165676, 'tracts': 36},
'New Haven': {'pop': 862477, 'tracts': 190},
'New London': {'pop': 274055, 'tracts': 66},
'Tolland': {'pop': 152691, 'tracts': 29},
'Windham': {'pop': 118428, 'tracts': 25}},
'DC': {'District of Columbia': {'pop': 601723, 'tracts': 179}},
'DE': {'Kent': {'pop': 162310, 'tracts': 33},
'New Castle': {'pop': 538479, 'tracts': 131},
'Sussex': {'pop': 197145, 'tracts': 54}},
'FL': {'Alachua': {'pop': 247336, 'tracts': 56},
'Baker': {'pop': 27115, 'tracts': 4},
'Bay': {'pop': 168852, 'tracts': 44},
'Bradford': {'pop': 28520, 'tracts': 4},
'Brevard': {'pop': 543376, 'tracts': 113},
'Broward': {'pop': 1748066, 'tracts': 361},
'Calhoun': {'pop': 14625, 'tracts': 3},
'Charlotte': {'pop': 159978, 'tracts': 39},
'Citrus': {'pop': 141236, 'tracts': 27},
'Clay': {'pop': 190865, 'tracts': 30},
'Collier': {'pop': 321520, 'tracts': 73},
'Columbia': {'pop': 67531, 'tracts': 12},
'DeSoto': {'pop': 34862, 'tracts': 9},
'Dixie': {'pop': 16422, 'tracts': 3},
'Duval': {'pop': 864263, 'tracts': 173},
'Escambia': {'pop': 297619, 'tracts': 71},
'Flagler': {'pop': 95696, 'tracts': 20},
'Franklin': {'pop': 11549, 'tracts': 4},
'Gadsden': {'pop': 46389, 'tracts': 9},
'Gilchrist': {'pop': 16939, 'tracts': 5},
'Glades': {'pop': 12884, 'tracts': 4},
'Gulf': {'pop': 15863, 'tracts': 3},
'Hamilton': {'pop': 14799, 'tracts': 3},
'Hardee': {'pop': 27731, 'tracts': 6},
'Hendry': {'pop': 39140, 'tracts': 7},
'Hernando': {'pop': 172778, 'tracts': 45},
'Highlands': {'pop': 98786, 'tracts': 27},
'Hillsborough': {'pop': 1229226, 'tracts': 321},
'Holmes': {'pop': 19927, 'tracts': 4},
'Indian River': {'pop': 138028, 'tracts': 30},
'Jackson': {'pop': 49746, 'tracts': 11},
'Jefferson': {'pop': 14761, 'tracts': 3},
'Lafayette': {'pop': 8870, 'tracts': 2},
'Lake': {'pop': 297052, 'tracts': 56},
'Lee': {'pop': 618754, 'tracts': 166},
'Leon': {'pop': 275487, 'tracts': 68},
'Levy': {'pop': 40801, 'tracts': 9},
'Liberty': {'pop': 8365, 'tracts': 2},
'Madison': {'pop': 19224, 'tracts': 5},
'Manatee': {'pop': 322833, 'tracts': 78},
'Marion': {'pop': 331298, 'tracts': 63},
'Martin': {'pop': 146318, 'tracts': 35},
'Miami-Dade': {'pop': 2496435, 'tracts': 519},
'Monroe': {'pop': 73090, 'tracts': 30},
'Nassau': {'pop': 73314, 'tracts': 12},
'Okaloosa': {'pop': 180822, 'tracts': 41},
'Okeechobee': {'pop': 39996, 'tracts': 12},
'Orange': {'pop': 1145956, 'tracts': 207},
'Osceola': {'pop': 268685, 'tracts': 41},
'Palm Beach': {'pop': 1320134, 'tracts': 337},
'Pasco': {'pop': 464697, 'tracts': 134},
'Pinellas': {'pop': 916542, 'tracts': 245},
'Polk': {'pop': 602095, 'tracts': 154},
'Putnam': {'pop': 74364, 'tracts': 17},
'Santa Rosa': {'pop': 151372, 'tracts': 25},
'Sarasota': {'pop': 379448, 'tracts': 94},
'Seminole': {'pop': 422718, 'tracts': 86},
'St. Johns': {'pop': 190039, 'tracts': 40},
'St. Lucie': {'pop': 277789, 'tracts': 44},
'Sumter': {'pop': 93420, 'tracts': 19},
'Suwannee': {'pop': 41551, 'tracts': 7},
'Taylor': {'pop': 22570, 'tracts': 4},
'Union': {'pop': 15535, 'tracts': 3},
'Volusia': {'pop': 494593, 'tracts': 113},
'Wakulla': {'pop': 30776, 'tracts': 4},
'Walton': {'pop': 55043, 'tracts': 11},
'Washington': {'pop': 24896, 'tracts': 7}},
'GA': {'Appling': {'pop': 18236, 'tracts': 5},
'Atkinson': {'pop': 8375, 'tracts': 3},
'Bacon': {'pop': 11096, 'tracts': 3},
'Baker': {'pop': 3451, 'tracts': 2},
'Baldwin': {'pop': 45720, 'tracts': 9},
'Banks': {'pop': 18395, 'tracts': 4},
'Barrow': {'pop': 69367, 'tracts': 18},
'Bartow': {'pop': 100157, 'tracts': 15},
'Ben Hill': {'pop': 17634, 'tracts': 5},
'Berrien': {'pop': 19286, 'tracts': 6},
'Bibb': {'pop': 155547, 'tracts': 44},
'Bleckley': {'pop': 13063, 'tracts': 3},
'Brantley': {'pop': 18411, 'tracts': 3},
'Brooks': {'pop': 16243, 'tracts': 5},
'Bryan': {'pop': 30233, 'tracts': 7},
'Bulloch': {'pop': 70217, 'tracts': 12},
'Burke': {'pop': 23316, 'tracts': 6},
'Butts': {'pop': 23655, 'tracts': 3},
'Calhoun': {'pop': 6694, 'tracts': 2},
'Camden': {'pop': 50513, 'tracts': 10},
'Candler': {'pop': 10998, 'tracts': 3},
'Carroll': {'pop': 110527, 'tracts': 17},
'Catoosa': {'pop': 63942, 'tracts': 11},
'Charlton': {'pop': 12171, 'tracts': 2},
'Chatham': {'pop': 265128, 'tracts': 72},
'Chattahoochee': {'pop': 11267, 'tracts': 5},
'Chattooga': {'pop': 26015, 'tracts': 6},
'Cherokee': {'pop': 214346, 'tracts': 26},
'Clarke': {'pop': 116714, 'tracts': 30},
'Clay': {'pop': 3183, 'tracts': 1},
'Clayton': {'pop': 259424, 'tracts': 50},
'Clinch': {'pop': 6798, 'tracts': 2},
'Cobb': {'pop': 688078, 'tracts': 120},
'Coffee': {'pop': 42356, 'tracts': 9},
'Colquitt': {'pop': 45498, 'tracts': 10},
'Columbia': {'pop': 124053, 'tracts': 20},
'Cook': {'pop': 17212, 'tracts': 4},
'Coweta': {'pop': 127317, 'tracts': 20},
'Crawford': {'pop': 12630, 'tracts': 3},
'Crisp': {'pop': 23439, 'tracts': 6},
'Dade': {'pop': 16633, 'tracts': 4},
'Dawson': {'pop': 22330, 'tracts': 3},
'DeKalb': {'pop': 691893, 'tracts': 145},
'Decatur': {'pop': 27842, 'tracts': 7},
'Dodge': {'pop': 21796, 'tracts': 6},
'Dooly': {'pop': 14918, 'tracts': 3},
'Dougherty': {'pop': 94565, 'tracts': 27},
'Douglas': {'pop': 132403, 'tracts': 20},
'Early': {'pop': 11008, 'tracts': 5},
'Echols': {'pop': 4034, 'tracts': 2},
'Effingham': {'pop': 52250, 'tracts': 10},
'Elbert': {'pop': 20166, 'tracts': 5},
'Emanuel': {'pop': 22598, 'tracts': 6},
'Evans': {'pop': 11000, 'tracts': 3},
'Fannin': {'pop': 23682, 'tracts': 5},
'Fayette': {'pop': 106567, 'tracts': 20},
'Floyd': {'pop': 96317, 'tracts': 20},
'Forsyth': {'pop': 175511, 'tracts': 45},
'Franklin': {'pop': 22084, 'tracts': 5},
'Fulton': {'pop': 920581, 'tracts': 204},
'Gilmer': {'pop': 28292, 'tracts': 5},
'Glascock': {'pop': 3082, 'tracts': 1},
'Glynn': {'pop': 79626, 'tracts': 15},
'Gordon': {'pop': 55186, 'tracts': 9},
'Grady': {'pop': 25011, 'tracts': 6},
'Greene': {'pop': 15994, 'tracts': 7},
'Gwinnett': {'pop': 805321, 'tracts': 113},
'Habersham': {'pop': 43041, 'tracts': 8},
'Hall': {'pop': 179684, 'tracts': 36},
'Hancock': {'pop': 9429, 'tracts': 2},
'Haralson': {'pop': 28780, 'tracts': 5},
'Harris': {'pop': 32024, 'tracts': 5},
'Hart': {'pop': 25213, 'tracts': 5},
'Heard': {'pop': 11834, 'tracts': 3},
'Henry': {'pop': 203922, 'tracts': 25},
'Houston': {'pop': 139900, 'tracts': 23},
'Irwin': {'pop': 9538, 'tracts': 2},
'Jackson': {'pop': 60485, 'tracts': 11},
'Jasper': {'pop': 13900, 'tracts': 3},
'Jeff Davis': {'pop': 15068, 'tracts': 3},
'Jefferson': {'pop': 16930, 'tracts': 4},
'Jenkins': {'pop': 8340, 'tracts': 2},
'Johnson': {'pop': 9980, 'tracts': 3},
'Jones': {'pop': 28669, 'tracts': 6},
'Lamar': {'pop': 18317, 'tracts': 3},
'Lanier': {'pop': 10078, 'tracts': 2},
'Laurens': {'pop': 48434, 'tracts': 13},
'Lee': {'pop': 28298, 'tracts': 5},
'Liberty': {'pop': 63453, 'tracts': 14},
'Lincoln': {'pop': 7996, 'tracts': 2},
'Long': {'pop': 14464, 'tracts': 3},
'Lowndes': {'pop': 109233, 'tracts': 25},
'Lumpkin': {'pop': 29966, 'tracts': 4},
'Macon': {'pop': 14740, 'tracts': 4},
'Madison': {'pop': 28120, 'tracts': 6},
'Marion': {'pop': 8742, 'tracts': 2},
'McDuffie': {'pop': 21875, 'tracts': 5},
'McIntosh': {'pop': 14333, 'tracts': 4},
'Meriwether': {'pop': 21992, 'tracts': 4},
'Miller': {'pop': 6125, 'tracts': 3},
'Mitchell': {'pop': 23498, 'tracts': 5},
'Monroe': {'pop': 26424, 'tracts': 5},
'Montgomery': {'pop': 9123, 'tracts': 3},
'Morgan': {'pop': 17868, 'tracts': 5},
'Murray': {'pop': 39628, 'tracts': 8},
'Muscogee': {'pop': 189885, 'tracts': 53},
'Newton': {'pop': 99958, 'tracts': 13},
'Oconee': {'pop': 32808, 'tracts': 6},
'Oglethorpe': {'pop': 14899, 'tracts': 4},
'Paulding': {'pop': 142324, 'tracts': 19},
'Peach': {'pop': 27695, 'tracts': 6},
'Pickens': {'pop': 29431, 'tracts': 6},
'Pierce': {'pop': 18758, 'tracts': 4},
'Pike': {'pop': 17869, 'tracts': 4},
'Polk': {'pop': 41475, 'tracts': 7},
'Pulaski': {'pop': 12010, 'tracts': 3},
'Putnam': {'pop': 21218, 'tracts': 5},
'Quitman': {'pop': 2513, 'tracts': 1},
'Rabun': {'pop': 16276, 'tracts': 5},
'Randolph': {'pop': 7719, 'tracts': 2},
'Richmond': {'pop': 200549, 'tracts': 47},
'Rockdale': {'pop': 85215, 'tracts': 15},
'Schley': {'pop': 5010, 'tracts': 2},
'Screven': {'pop': 14593, 'tracts': 5},
'Seminole': {'pop': 8729, 'tracts': 3},
'Spalding': {'pop': 64073, 'tracts': 12},
'Stephens': {'pop': 26175, 'tracts': 5},
'Stewart': {'pop': 6058, 'tracts': 2},
'Sumter': {'pop': 32819, 'tracts': 8},
'Talbot': {'pop': 6865, 'tracts': 3},
'Taliaferro': {'pop': 1717, 'tracts': 1},
'Tattnall': {'pop': 25520, 'tracts': 5},
'Taylor': {'pop': 8906, 'tracts': 3},
'Telfair': {'pop': 16500, 'tracts': 3},
'Terrell': {'pop': 9315, 'tracts': 4},
'Thomas': {'pop': 44720, 'tracts': 11},
'Tift': {'pop': 40118, 'tracts': 9},
'Toombs': {'pop': 27223, 'tracts': 6},
'Towns': {'pop': 10471, 'tracts': 3},
'Treutlen': {'pop': 6885, 'tracts': 2},
'Troup': {'pop': 67044, 'tracts': 14},
'Turner': {'pop': 8930, 'tracts': 2},
'Twiggs': {'pop': 9023, 'tracts': 2},
'Union': {'pop': 21356, 'tracts': 6},
'Upson': {'pop': 27153, 'tracts': 7},
'Walker': {'pop': 68756, 'tracts': 13},
'Walton': {'pop': 83768, 'tracts': 15},
'Ware': {'pop': 36312, 'tracts': 9},
'Warren': {'pop': 5834, 'tracts': 2},
'Washington': {'pop': 21187, 'tracts': 5},
'Wayne': {'pop': 30099, 'tracts': 6},
'Webster': {'pop': 2799, 'tracts': 2},
'Wheeler': {'pop': 7421, 'tracts': 2},
'White': {'pop': 27144, 'tracts': 5},
'Whitfield': {'pop': 102599, 'tracts': 18},
'Wilcox': {'pop': 9255, 'tracts': 4},
'Wilkes': {'pop': 10593, 'tracts': 4},
'Wilkinson': {'pop': 9563, 'tracts': 3},
'Worth': {'pop': 21679, 'tracts': 5}},
'HI': {'Hawaii': {'pop': 185079, 'tracts': 34},
'Honolulu': {'pop': 953207, 'tracts': 244},
'Kalawao': {'pop': 90, 'tracts': 1},
'Kauai': {'pop': 67091, 'tracts': 16},
'Maui': {'pop': 154834, 'tracts': 37}},
'IA': {'Adair': {'pop': 7682, 'tracts': 3},
'Adams': {'pop': 4029, 'tracts': 2},
'Allamakee': {'pop': 14330, 'tracts': 5},
'Appanoose': {'pop': 12887, 'tracts': 5},
'Audubon': {'pop': 6119, 'tracts': 3},
'Benton': {'pop': 26076, 'tracts': 7},
'Black Hawk': {'pop': 131090, 'tracts': 38},
'Boone': {'pop': 26306, 'tracts': 7},
'Bremer': {'pop': 24276, 'tracts': 8},
'Buchanan': {'pop': 20958, 'tracts': 6},
'Buena Vista': {'pop': 20260, 'tracts': 6},
'Butler': {'pop': 14867, 'tracts': 5},
'Calhoun': {'pop': 9670, 'tracts': 4},
'Carroll': {'pop': 20816, 'tracts': 6},
'Cass': {'pop': 13956, 'tracts': 5},
'Cedar': {'pop': 18499, 'tracts': 5},
'Cerro Gordo': {'pop': 44151, 'tracts': 11},
'Cherokee': {'pop': 12072, 'tracts': 4},
'Chickasaw': {'pop': 12439, 'tracts': 4},
'Clarke': {'pop': 9286, 'tracts': 3},
'Clay': {'pop': 16667, 'tracts': 4},
'Clayton': {'pop': 18129, 'tracts': 6},
'Clinton': {'pop': 49116, 'tracts': 12},
'Crawford': {'pop': 17096, 'tracts': 5},
'Dallas': {'pop': 66135, 'tracts': 15},
'Davis': {'pop': 8753, 'tracts': 2},
'Decatur': {'pop': 8457, 'tracts': 3},
'Delaware': {'pop': 17764, 'tracts': 4},
'Des Moines': {'pop': 40325, 'tracts': 11},
'Dickinson': {'pop': 16667, 'tracts': 5},
'Dubuque': {'pop': 93653, 'tracts': 26},
'Emmet': {'pop': 10302, 'tracts': 4},
'Fayette': {'pop': 20880, 'tracts': 7},
'Floyd': {'pop': 16303, 'tracts': 5},
'Franklin': {'pop': 10680, 'tracts': 3},
'Fremont': {'pop': 7441, 'tracts': 3},
'Greene': {'pop': 9336, 'tracts': 4},
'Grundy': {'pop': 12453, 'tracts': 4},
'Guthrie': {'pop': 10954, 'tracts': 3},
'Hamilton': {'pop': 15673, 'tracts': 5},
'Hancock': {'pop': 11341, 'tracts': 4},
'Hardin': {'pop': 17534, 'tracts': 6},
'Harrison': {'pop': 14928, 'tracts': 5},
'Henry': {'pop': 20145, 'tracts': 5},
'Howard': {'pop': 9566, 'tracts': 3},
'Humboldt': {'pop': 9815, 'tracts': 4},
'Ida': {'pop': 7089, 'tracts': 3},
'Iowa': {'pop': 16355, 'tracts': 4},
'Jackson': {'pop': 19848, 'tracts': 6},
'Jasper': {'pop': 36842, 'tracts': 9},
'Jefferson': {'pop': 16843, 'tracts': 4},
'Johnson': {'pop': 130882, 'tracts': 24},
'Jones': {'pop': 20638, 'tracts': 5},
'Keokuk': {'pop': 10511, 'tracts': 4},
'Kossuth': {'pop': 15543, 'tracts': 6},
'Lee': {'pop': 35862, 'tracts': 11},
'Linn': {'pop': 211226, 'tracts': 45},
'Louisa': {'pop': 11387, 'tracts': 3},
'Lucas': {'pop': 8898, 'tracts': 4},
'Lyon': {'pop': 11581, 'tracts': 3},
'Madison': {'pop': 15679, 'tracts': 3},
'Mahaska': {'pop': 22381, 'tracts': 7},
'Marion': {'pop': 33309, 'tracts': 8},
'Marshall': {'pop': 40648, 'tracts': 10},
'Mills': {'pop': 15059, 'tracts': 5},
'Mitchell': {'pop': 10776, 'tracts': 3},
'Monona': {'pop': 9243, 'tracts': 4},
'Monroe': {'pop': 7970, 'tracts': 3},
'Montgomery': {'pop': 10740, 'tracts': 4},
'Muscatine': {'pop': 42745, 'tracts': 10},
"O'Brien": {'pop': 14398, 'tracts': 4},
'Osceola': {'pop': 6462, 'tracts': 2},
'Page': {'pop': 15932, 'tracts': 6},
'Palo Alto': {'pop': 9421, 'tracts': 4},
'Plymouth': {'pop': 24986, 'tracts': 6},
'Pocahontas': {'pop': 7310, 'tracts': 3},
'Polk': {'pop': 430640, 'tracts': 98},
'Pottawattamie': {'pop': 93158, 'tracts': 30},
'Poweshiek': {'pop': 18914, 'tracts': 5},
'Ringgold': {'pop': 5131, 'tracts': 2},
'Sac': {'pop': 10350, 'tracts': 4},
'Scott': {'pop': 165224, 'tracts': 47},
'Shelby': {'pop': 12167, 'tracts': 4},
'Sioux': {'pop': 33704, 'tracts': 7},
'Story': {'pop': 89542, 'tracts': 20},
'Tama': {'pop': 17767, 'tracts': 6},
'Taylor': {'pop': 6317, 'tracts': 3},
'Union': {'pop': 12534, 'tracts': 4},
'Van Buren': {'pop': 7570, 'tracts': 2},
'Wapello': {'pop': 35625, 'tracts': 11},
'Warren': {'pop': 46225, 'tracts': 12},
'Washington': {'pop': 21704, 'tracts': 5},
'Wayne': {'pop': 6403, 'tracts': 3},
'Webster': {'pop': 38013, 'tracts': 12},
'Winnebago': {'pop': 10866, 'tracts': 3},
'Winneshiek': {'pop': 21056, 'tracts': 5},
'Woodbury': {'pop': 102172, 'tracts': 26},
'Worth': {'pop': 7598, 'tracts': 3},
'Wright': {'pop': 13229, 'tracts': 5}},
'ID': {'Ada': {'pop': 392365, 'tracts': 59},
'Adams': {'pop': 3976, 'tracts': 2},
'Bannock': {'pop': 82839, 'tracts': 22},
'Bear Lake': {'pop': 5986, 'tracts': 2},
'Benewah': {'pop': 9285, 'tracts': 2},
'Bingham': {'pop': 45607, 'tracts': 8},
'Blaine': {'pop': 21376, 'tracts': 4},
'Boise': {'pop': 7028, 'tracts': 1},
'Bonner': {'pop': 40877, 'tracts': 9},
'Bonneville': {'pop': 104234, 'tracts': 21},
'Boundary': {'pop': 10972, 'tracts': 2},
'Butte': {'pop': 2891, 'tracts': 1},
'Camas': {'pop': 1117, 'tracts': 1},
'Canyon': {'pop': 188923, 'tracts': 29},
'Caribou': {'pop': 6963, 'tracts': 2},
'Cassia': {'pop': 22952, 'tracts': 6},
'Clark': {'pop': 982, 'tracts': 1},
'Clearwater': {'pop': 8761, 'tracts': 2},
'Custer': {'pop': 4368, 'tracts': 1},
'Elmore': {'pop': 27038, 'tracts': 5},
'Franklin': {'pop': 12786, 'tracts': 2},
'Fremont': {'pop': 13242, 'tracts': 3},
'Gem': {'pop': 16719, 'tracts': 3},
'Gooding': {'pop': 15464, 'tracts': 2},
'Idaho': {'pop': 16267, 'tracts': 5},
'Jefferson': {'pop': 26140, 'tracts': 4},
'Jerome': {'pop': 22374, 'tracts': 5},
'Kootenai': {'pop': 138494, 'tracts': 25},
'Latah': {'pop': 37244, 'tracts': 7},
'Lemhi': {'pop': 7936, 'tracts': 3},
'Lewis': {'pop': 3821, 'tracts': 3},
'Lincoln': {'pop': 5208, 'tracts': 1},
'Madison': {'pop': 37536, 'tracts': 6},
'Minidoka': {'pop': 20069, 'tracts': 5},
'Nez Perce': {'pop': 39265, 'tracts': 10},
'Oneida': {'pop': 4286, 'tracts': 1},
'Owyhee': {'pop': 11526, 'tracts': 3},
'Payette': {'pop': 22623, 'tracts': 4},
'Power': {'pop': 7817, 'tracts': 2},
'Shoshone': {'pop': 12765, 'tracts': 3},
'Teton': {'pop': 10170, 'tracts': 1},
'Twin Falls': {'pop': 77230, 'tracts': 14},
'Valley': {'pop': 9862, 'tracts': 3},
'Washington': {'pop': 10198, 'tracts': 3}},
'IL': {'Adams': {'pop': 67103, 'tracts': 18},
'Alexander': {'pop': 8238, 'tracts': 4},
'Bond': {'pop': 17768, 'tracts': 4},
'Boone': {'pop': 54165, 'tracts': 7},
'Brown': {'pop': 6937, 'tracts': 2},
'Bureau': {'pop': 34978, 'tracts': 10},
'Calhoun': {'pop': 5089, 'tracts': 2},
'Carroll': {'pop': 15387, 'tracts': 6},
'Cass': {'pop': 13642, 'tracts': 5},
'Champaign': {'pop': 201081, 'tracts': 43},
'Christian': {'pop': 34800, 'tracts': 10},
'Clark': {'pop': 16335, 'tracts': 4},
'Clay': {'pop': 13815, 'tracts': 4},
'Clinton': {'pop': 37762, 'tracts': 8},
'Coles': {'pop': 53873, 'tracts': 12},
'Cook': {'pop': 5194675, 'tracts': 1318},
'Crawford': {'pop': 19817, 'tracts': 6},
'Cumberland': {'pop': 11048, 'tracts': 3},
'De Witt': {'pop': 16561, 'tracts': 5},
'DeKalb': {'pop': 105160, 'tracts': 21},
'Douglas': {'pop': 19980, 'tracts': 5},
'DuPage': {'pop': 916924, 'tracts': 216},
'Edgar': {'pop': 18576, 'tracts': 5},
'Edwards': {'pop': 6721, 'tracts': 3},
'Effingham': {'pop': 34242, 'tracts': 8},
'Fayette': {'pop': 22140, 'tracts': 7},
'Ford': {'pop': 14081, 'tracts': 5},
'Franklin': {'pop': 39561, 'tracts': 12},
'Fulton': {'pop': 37069, 'tracts': 12},
'Gallatin': {'pop': 5589, 'tracts': 2},
'Greene': {'pop': 13886, 'tracts': 5},
'Grundy': {'pop': 50063, 'tracts': 10},
'Hamilton': {'pop': 8457, 'tracts': 3},
'Hancock': {'pop': 19104, 'tracts': 7},
'Hardin': {'pop': 4320, 'tracts': 2},
'Henderson': {'pop': 7331, 'tracts': 3},
'Henry': {'pop': 50486, 'tracts': 13},
'Iroquois': {'pop': 29718, 'tracts': 9},
'Jackson': {'pop': 60218, 'tracts': 14},
'Jasper': {'pop': 9698, 'tracts': 3},
'Jefferson': {'pop': 38827, 'tracts': 11},
'Jersey': {'pop': 22985, 'tracts': 6},
'Jo Daviess': {'pop': 22678, 'tracts': 6},
'Johnson': {'pop': 12582, 'tracts': 4},
'Kane': {'pop': 515269, 'tracts': 82},
'Kankakee': {'pop': 113449, 'tracts': 29},
'Kendall': {'pop': 114736, 'tracts': 10},
'Knox': {'pop': 52919, 'tracts': 16},
'La Salle': {'pop': 113924, 'tracts': 28},
'Lake': {'pop': 703462, 'tracts': 153},
'Lawrence': {'pop': 16833, 'tracts': 5},
'Lee': {'pop': 36031, 'tracts': 9},
'Livingston': {'pop': 38950, 'tracts': 10},
'Logan': {'pop': 30305, 'tracts': 8},
'Macon': {'pop': 110768, 'tracts': 34},
'Macoupin': {'pop': 47765, 'tracts': 13},
'Madison': {'pop': 269282, 'tracts': 61},
'Marion': {'pop': 39437, 'tracts': 12},
'Marshall': {'pop': 12640, 'tracts': 5},
'Mason': {'pop': 14666, 'tracts': 6},
'Massac': {'pop': 15429, 'tracts': 4},
'McDonough': {'pop': 32612, 'tracts': 10},
'McHenry': {'pop': 308760, 'tracts': 52},
'McLean': {'pop': 169572, 'tracts': 41},
'Menard': {'pop': 12705, 'tracts': 3},
'Mercer': {'pop': 16434, 'tracts': 4},
'Monroe': {'pop': 32957, 'tracts': 6},
'Montgomery': {'pop': 30104, 'tracts': 8},
'Morgan': {'pop': 35547, 'tracts': 10},
'Moultrie': {'pop': 14846, 'tracts': 4},
'Ogle': {'pop': 53497, 'tracts': 11},
'Peoria': {'pop': 186494, 'tracts': 48},
'Perry': {'pop': 22350, 'tracts': 6},
'Piatt': {'pop': 16729, 'tracts': 4},
'Pike': {'pop': 16430, 'tracts': 5},
'Pope': {'pop': 4470, 'tracts': 2},
'Pulaski': {'pop': 6161, 'tracts': 2},
'Putnam': {'pop': 6006, 'tracts': 2},
'Randolph': {'pop': 33476, 'tracts': 9},
'Richland': {'pop': 16233, 'tracts': 5},
'Rock Island': {'pop': 147546, 'tracts': 40},
'Saline': {'pop': 24913, 'tracts': 9},
'Sangamon': {'pop': 197465, 'tracts': 53},
'Schuyler': {'pop': 7544, 'tracts': 3},
'Scott': {'pop': 5355, 'tracts': 2},
'Shelby': {'pop': 22363, 'tracts': 6},
'St. Clair': {'pop': 270056, 'tracts': 60},
'Stark': {'pop': 5994, 'tracts': 2},
'Stephenson': {'pop': 47711, 'tracts': 13},
'Tazewell': {'pop': 135394, 'tracts': 30},
'Union': {'pop': 17808, 'tracts': 5},
'Vermilion': {'pop': 81625, 'tracts': 24},
'Wabash': {'pop': 11947, 'tracts': 4},
'Warren': {'pop': 17707, 'tracts': 5},
'Washington': {'pop': 14716, 'tracts': 4},
'Wayne': {'pop': 16760, 'tracts': 5},
'White': {'pop': 14665, 'tracts': 5},
'Whiteside': {'pop': 58498, 'tracts': 18},
'Will': {'pop': 677560, 'tracts': 152},
'Williamson': {'pop': 66357, 'tracts': 15},
'Winnebago': {'pop': 295266, 'tracts': 77},
'Woodford': {'pop': 38664, 'tracts': 9}},
'IN': {'Adams': {'pop': 34387, 'tracts': 7},
'Allen': {'pop': 355329, 'tracts': 96},
'Bartholomew': {'pop': 76794, 'tracts': 15},
'Benton': {'pop': 8854, 'tracts': 3},
'Blackford': {'pop': 12766, 'tracts': 4},
'Boone': {'pop': 56640, 'tracts': 10},
'Brown': {'pop': 15242, 'tracts': 4},
'Carroll': {'pop': 20155, 'tracts': 7},
'Cass': {'pop': 38966, 'tracts': 11},
'Clark': {'pop': 110232, 'tracts': 26},
'Clay': {'pop': 26890, 'tracts': 6},
'Clinton': {'pop': 33224, 'tracts': 8},
'Crawford': {'pop': 10713, 'tracts': 3},
'Daviess': {'pop': 31648, 'tracts': 7},
'DeKalb': {'pop': 42223, 'tracts': 9},
'Dearborn': {'pop': 50047, 'tracts': 10},
'Decatur': {'pop': 25740, 'tracts': 6},
'Delaware': {'pop': 117671, 'tracts': 30},
'Dubois': {'pop': 41889, 'tracts': 7},
'Elkhart': {'pop': 197559, 'tracts': 36},
'Fayette': {'pop': 24277, 'tracts': 7},
'Floyd': {'pop': 74578, 'tracts': 20},
'Fountain': {'pop': 17240, 'tracts': 5},
'Franklin': {'pop': 23087, 'tracts': 5},
'Fulton': {'pop': 20836, 'tracts': 6},
'Gibson': {'pop': 33503, 'tracts': 7},
'Grant': {'pop': 70061, 'tracts': 16},
'Greene': {'pop': 33165, 'tracts': 9},
'Hamilton': {'pop': 274569, 'tracts': 39},
'Hancock': {'pop': 70002, 'tracts': 10},
'Harrison': {'pop': 39364, 'tracts': 6},
'Hendricks': {'pop': 145448, 'tracts': 21},
'Henry': {'pop': 49462, 'tracts': 13},
'Howard': {'pop': 82752, 'tracts': 20},
'Huntington': {'pop': 37124, 'tracts': 9},
'Jackson': {'pop': 42376, 'tracts': 10},
'Jasper': {'pop': 33478, 'tracts': 8},
'Jay': {'pop': 21253, 'tracts': 7},
'Jefferson': {'pop': 32428, 'tracts': 7},
'Jennings': {'pop': 28525, 'tracts': 6},
'Johnson': {'pop': 139654, 'tracts': 22},
'Knox': {'pop': 38440, 'tracts': 10},
'Kosciusko': {'pop': 77358, 'tracts': 19},
'LaGrange': {'pop': 37128, 'tracts': 8},
'LaPorte': {'pop': 111467, 'tracts': 28},
'Lake': {'pop': 496005, 'tracts': 117},
'Lawrence': {'pop': 46134, 'tracts': 10},
'Madison': {'pop': 131636, 'tracts': 37},
'Marion': {'pop': 903393, 'tracts': 224},
'Marshall': {'pop': 47051, 'tracts': 12},
'Martin': {'pop': 10334, 'tracts': 3},
'Miami': {'pop': 36903, 'tracts': 10},
'Monroe': {'pop': 137974, 'tracts': 31},
'Montgomery': {'pop': 38124, 'tracts': 9},
'Morgan': {'pop': 68894, 'tracts': 13},
'Newton': {'pop': 14244, 'tracts': 4},
'Noble': {'pop': 47536, 'tracts': 10},
'Ohio': {'pop': 6128, 'tracts': 2},
'Orange': {'pop': 19840, 'tracts': 6},
'Owen': {'pop': 21575, 'tracts': 5},
'Parke': {'pop': 17339, 'tracts': 4},
'Perry': {'pop': 19338, 'tracts': 5},
'Pike': {'pop': 12845, 'tracts': 4},
'Porter': {'pop': 164343, 'tracts': 32},
'Posey': {'pop': 25910, 'tracts': 7},
'Pulaski': {'pop': 13402, 'tracts': 4},
'Putnam': {'pop': 37963, 'tracts': 7},
'Randolph': {'pop': 26171, 'tracts': 8},
'Ripley': {'pop': 28818, 'tracts': 6},
'Rush': {'pop': 17392, 'tracts': 5},
'Scott': {'pop': 24181, 'tracts': 5},
'Shelby': {'pop': 44436, 'tracts': 10},
'Spencer': {'pop': 20952, 'tracts': 5},
'St. Joseph': {'pop': 266931, 'tracts': 75},
'Starke': {'pop': 23363, 'tracts': 7},
'Steuben': {'pop': 34185, 'tracts': 9},
'Sullivan': {'pop': 21475, 'tracts': 5},
'Switzerland': {'pop': 10613, 'tracts': 3},
'Tippecanoe': {'pop': 172780, 'tracts': 37},
'Tipton': {'pop': 15936, 'tracts': 4},
'Union': {'pop': 7516, 'tracts': 2},
'Vanderburgh': {'pop': 179703, 'tracts': 49},
'Vermillion': {'pop': 16212, 'tracts': 5},
'Vigo': {'pop': 107848, 'tracts': 28},
'Wabash': {'pop': 32888, 'tracts': 8},
'Warren': {'pop': 8508, 'tracts': 2},
'Warrick': {'pop': 59689, 'tracts': 11},
'Washington': {'pop': 28262, 'tracts': 6},
'Wayne': {'pop': 68917, 'tracts': 17},
'Wells': {'pop': 27636, 'tracts': 7},
'White': {'pop': 24643, 'tracts': 8},
'Whitley': {'pop': 33292, 'tracts': 7}},
'KS': {'Allen': {'pop': 13371, 'tracts': 5},
'Anderson': {'pop': 8102, 'tracts': 2},
'Atchison': {'pop': 16924, 'tracts': 4},
'Barber': {'pop': 4861, 'tracts': 2},
'Barton': {'pop': 27674, 'tracts': 8},
'Bourbon': {'pop': 15173, 'tracts': 5},
'Brown': {'pop': 9984, 'tracts': 3},
'Butler': {'pop': 65880, 'tracts': 13},
'Chase': {'pop': 2790, 'tracts': 1},
'Chautauqua': {'pop': 3669, 'tracts': 1},
'Cherokee': {'pop': 21603, 'tracts': 6},
'Cheyenne': {'pop': 2726, 'tracts': 1},
'Clark': {'pop': 2215, 'tracts': 1},
'Clay': {'pop': 8535, 'tracts': 2},
'Cloud': {'pop': 9533, 'tracts': 4},
'Coffey': {'pop': 8601, 'tracts': 3},
'Comanche': {'pop': 1891, 'tracts': 1},
'Cowley': {'pop': 36311, 'tracts': 11},
'Crawford': {'pop': 39134, 'tracts': 11},
'Decatur': {'pop': 2961, 'tracts': 2},
'Dickinson': {'pop': 19754, 'tracts': 6},
'Doniphan': {'pop': 7945, 'tracts': 3},
'Douglas': {'pop': 110826, 'tracts': 22},
'Edwards': {'pop': 3037, 'tracts': 2},
'Elk': {'pop': 2882, 'tracts': 1},
'Ellis': {'pop': 28452, 'tracts': 6},
'Ellsworth': {'pop': 6497, 'tracts': 2},
'Finney': {'pop': 36776, 'tracts': 12},
'Ford': {'pop': 33848, 'tracts': 7},
'Franklin': {'pop': 25992, 'tracts': 5},
'Geary': {'pop': 34362, 'tracts': 8},
'Gove': {'pop': 2695, 'tracts': 2},
'Graham': {'pop': 2597, 'tracts': 2},
'Grant': {'pop': 7829, 'tracts': 2},
'Gray': {'pop': 6006, 'tracts': 2},
'Greeley': {'pop': 1247, 'tracts': 1},
'Greenwood': {'pop': 6689, 'tracts': 3},
'Hamilton': {'pop': 2690, 'tracts': 1},
'Harper': {'pop': 6034, 'tracts': 3},
'Harvey': {'pop': 34684, 'tracts': 6},
'Haskell': {'pop': 4256, 'tracts': 1},
'Hodgeman': {'pop': 1916, 'tracts': 1},
'Jackson': {'pop': 13462, 'tracts': 3},
'Jefferson': {'pop': 19126, 'tracts': 4},
'Jewell': {'pop': 3077, 'tracts': 2},
'Johnson': {'pop': 544179, 'tracts': 130},
'Kearny': {'pop': 3977, 'tracts': 1},
'Kingman': {'pop': 7858, 'tracts': 3},
'Kiowa': {'pop': 2553, 'tracts': 1},
'Labette': {'pop': 21607, 'tracts': 8},
'Lane': {'pop': 1750, 'tracts': 1},
'Leavenworth': {'pop': 76227, 'tracts': 16},
'Lincoln': {'pop': 3241, 'tracts': 1},
'Linn': {'pop': 9656, 'tracts': 2},
'Logan': {'pop': 2756, 'tracts': 1},
'Lyon': {'pop': 33690, 'tracts': 8},
'Marion': {'pop': 12660, 'tracts': 4},
'Marshall': {'pop': 10117, 'tracts': 4},
'McPherson': {'pop': 29180, 'tracts': 7},
'Meade': {'pop': 4575, 'tracts': 2},
'Miami': {'pop': 32787, 'tracts': 8},
'Mitchell': {'pop': 6373, 'tracts': 2},
'Montgomery': {'pop': 35471, 'tracts': 13},
'Morris': {'pop': 5923, 'tracts': 2},
'Morton': {'pop': 3233, 'tracts': 1},
'Nemaha': {'pop': 10178, 'tracts': 3},
'Neosho': {'pop': 16512, 'tracts': 5},
'Ness': {'pop': 3107, 'tracts': 2},
'Norton': {'pop': 5671, 'tracts': 1},
'Osage': {'pop': 16295, 'tracts': 5},
'Osborne': {'pop': 3858, 'tracts': 1},
'Ottawa': {'pop': 6091, 'tracts': 2},
'Pawnee': {'pop': 6973, 'tracts': 2},
'Phillips': {'pop': 5642, 'tracts': 3},
'Pottawatomie': {'pop': 21604, 'tracts': 4},
'Pratt': {'pop': 9656, 'tracts': 3},
'Rawlins': {'pop': 2519, 'tracts': 1},
'Reno': {'pop': 64511, 'tracts': 17},
'Republic': {'pop': 4980, 'tracts': 3},
'Rice': {'pop': 10083, 'tracts': 3},
'Riley': {'pop': 71115, 'tracts': 14},
'Rooks': {'pop': 5181, 'tracts': 2},
'Rush': {'pop': 3307, 'tracts': 2},
'Russell': {'pop': 6970, 'tracts': 2},
'Saline': {'pop': 55606, 'tracts': 12},
'Scott': {'pop': 4936, 'tracts': 1},
'Sedgwick': {'pop': 498365, 'tracts': 124},
'Seward': {'pop': 22952, 'tracts': 5},
'Shawnee': {'pop': 177934, 'tracts': 43},
'Sheridan': {'pop': 2556, 'tracts': 2},
'Sherman': {'pop': 6010, 'tracts': 2},
'Smith': {'pop': 3853, 'tracts': 2},
'Stafford': {'pop': 4437, 'tracts': 2},
'Stanton': {'pop': 2235, 'tracts': 1},
'Stevens': {'pop': 5724, 'tracts': 2},
'Sumner': {'pop': 24132, 'tracts': 6},
'Thomas': {'pop': 7900, 'tracts': 2},
'Trego': {'pop': 3001, 'tracts': 1},
'Wabaunsee': {'pop': 7053, 'tracts': 2},
'Wallace': {'pop': 1485, 'tracts': 1},
'Washington': {'pop': 5799, 'tracts': 2},
'Wichita': {'pop': 2234, 'tracts': 1},
'Wilson': {'pop': 9409, 'tracts': 4},
'Woodson': {'pop': 3309, 'tracts': 2},
'Wyandotte': {'pop': 157505, 'tracts': 70}},
'KY': {'Adair': {'pop': 18656, 'tracts': 7},
'Allen': {'pop': 19956, 'tracts': 6},
'Anderson': {'pop': 21421, 'tracts': 5},
'Ballard': {'pop': 8249, 'tracts': 3},
'Barren': {'pop': 42173, 'tracts': 10},
'Bath': {'pop': 11591, 'tracts': 3},
'Bell': {'pop': 28691, 'tracts': 9},
'Boone': {'pop': 118811, 'tracts': 22},
'Bourbon': {'pop': 19985, 'tracts': 6},
'Boyd': {'pop': 49542, 'tracts': 13},
'Boyle': {'pop': 28432, 'tracts': 7},
'Bracken': {'pop': 8488, 'tracts': 3},
'Breathitt': {'pop': 13878, 'tracts': 7},
'Breckinridge': {'pop': 20059, 'tracts': 6},
'Bullitt': {'pop': 74319, 'tracts': 18},
'Butler': {'pop': 12690, 'tracts': 5},
'Caldwell': {'pop': 12984, 'tracts': 3},
'Calloway': {'pop': 37191, 'tracts': 9},
'Campbell': {'pop': 90336, 'tracts': 25},
'Carlisle': {'pop': 5104, 'tracts': 3},
'Carroll': {'pop': 10811, 'tracts': 3},
'Carter': {'pop': 27720, 'tracts': 7},
'Casey': {'pop': 15955, 'tracts': 5},
'Christian': {'pop': 73955, 'tracts': 19},
'Clark': {'pop': 35613, 'tracts': 10},
'Clay': {'pop': 21730, 'tracts': 6},
'Clinton': {'pop': 10272, 'tracts': 3},
'Crittenden': {'pop': 9315, 'tracts': 4},
'Cumberland': {'pop': 6856, 'tracts': 2},
'Daviess': {'pop': 96656, 'tracts': 23},
'Edmonson': {'pop': 12161, 'tracts': 4},
'Elliott': {'pop': 7852, 'tracts': 2},
'Estill': {'pop': 14672, 'tracts': 4},
'Fayette': {'pop': 295803, 'tracts': 82},
'Fleming': {'pop': 14348, 'tracts': 4},
'Floyd': {'pop': 39451, 'tracts': 10},
'Franklin': {'pop': 49285, 'tracts': 11},
'Fulton': {'pop': 6813, 'tracts': 2},
'Gallatin': {'pop': 8589, 'tracts': 2},
'Garrard': {'pop': 16912, 'tracts': 4},
'Grant': {'pop': 24662, 'tracts': 4},
'Graves': {'pop': 37121, 'tracts': 9},
'Grayson': {'pop': 25746, 'tracts': 7},
'Green': {'pop': 11258, 'tracts': 4},
'Greenup': {'pop': 36910, 'tracts': 9},
'Hancock': {'pop': 8565, 'tracts': 3},
'Hardin': {'pop': 105543, 'tracts': 22},
'Harlan': {'pop': 29278, 'tracts': 11},
'Harrison': {'pop': 18846, 'tracts': 5},
'Hart': {'pop': 18199, 'tracts': 5},
'Henderson': {'pop': 46250, 'tracts': 11},
'Henry': {'pop': 15416, 'tracts': 5},
'Hickman': {'pop': 4902, 'tracts': 1},
'Hopkins': {'pop': 46920, 'tracts': 12},
'Jackson': {'pop': 13494, 'tracts': 3},
'Jefferson': {'pop': 741096, 'tracts': 191},
'Jessamine': {'pop': 48586, 'tracts': 9},
'Johnson': {'pop': 23356, 'tracts': 6},
'Kenton': {'pop': 159720, 'tracts': 41},
'Knott': {'pop': 16346, 'tracts': 5},
'Knox': {'pop': 31883, 'tracts': 8},
'Larue': {'pop': 14193, 'tracts': 4},
'Laurel': {'pop': 58849, 'tracts': 13},
'Lawrence': {'pop': 15860, 'tracts': 5},
'Lee': {'pop': 7887, 'tracts': 3},
'Leslie': {'pop': 11310, 'tracts': 3},
'Letcher': {'pop': 24519, 'tracts': 7},
'Lewis': {'pop': 13870, 'tracts': 4},
'Lincoln': {'pop': 24742, 'tracts': 6},
'Livingston': {'pop': 9519, 'tracts': 2},
'Logan': {'pop': 26835, 'tracts': 6},
'Lyon': {'pop': 8314, 'tracts': 3},
'Madison': {'pop': 82916, 'tracts': 19},
'Magoffin': {'pop': 13333, 'tracts': 4},
'Marion': {'pop': 19820, 'tracts': 6},
'Marshall': {'pop': 31448, 'tracts': 6},
'Martin': {'pop': 12929, 'tracts': 3},
'Mason': {'pop': 17490, 'tracts': 5},
'McCracken': {'pop': 65565, 'tracts': 17},
'McCreary': {'pop': 18306, 'tracts': 4},
'McLean': {'pop': 9531, 'tracts': 3},
'Meade': {'pop': 28602, 'tracts': 8},
'Menifee': {'pop': 6306, 'tracts': 2},
'Mercer': {'pop': 21331, 'tracts': 5},
'Metcalfe': {'pop': 10099, 'tracts': 3},
'Monroe': {'pop': 10963, 'tracts': 4},
'Montgomery': {'pop': 26499, 'tracts': 6},
'Morgan': {'pop': 13923, 'tracts': 5},
'Muhlenberg': {'pop': 31499, 'tracts': 9},
'Nelson': {'pop': 43437, 'tracts': 9},
'Nicholas': {'pop': 7135, 'tracts': 2},
'Ohio': {'pop': 23842, 'tracts': 7},
'Oldham': {'pop': 60316, 'tracts': 14},
'Owen': {'pop': 10841, 'tracts': 3},
'Owsley': {'pop': 4755, 'tracts': 2},
'Pendleton': {'pop': 14877, 'tracts': 3},
'Perry': {'pop': 28712, 'tracts': 8},
'Pike': {'pop': 65024, 'tracts': 19},
'Powell': {'pop': 12613, 'tracts': 2},
'Pulaski': {'pop': 63063, 'tracts': 14},
'Robertson': {'pop': 2282, 'tracts': 1},
'Rockcastle': {'pop': 17056, 'tracts': 4},
'Rowan': {'pop': 23333, 'tracts': 4},
'Russell': {'pop': 17565, 'tracts': 5},
'Scott': {'pop': 47173, 'tracts': 14},
'Shelby': {'pop': 42074, 'tracts': 9},
'Simpson': {'pop': 17327, 'tracts': 4},
'Spencer': {'pop': 17061, 'tracts': 4},
'Taylor': {'pop': 24512, 'tracts': 5},
'Todd': {'pop': 12460, 'tracts': 4},
'Trigg': {'pop': 14339, 'tracts': 5},
'Trimble': {'pop': 8809, 'tracts': 2},
'Union': {'pop': 15007, 'tracts': 4},
'Warren': {'pop': 113792, 'tracts': 24},
'Washington': {'pop': 11717, 'tracts': 3},
'Wayne': {'pop': 20813, 'tracts': 5},
'Webster': {'pop': 13621, 'tracts': 4},
'Whitley': {'pop': 35637, 'tracts': 8},
'Wolfe': {'pop': 7355, 'tracts': 2},
'Woodford': {'pop': 24939, 'tracts': 8}},
'LA': {'Acadia': {'pop': 61773, 'tracts': 12},
'Allen': {'pop': 25764, 'tracts': 5},
'Ascension': {'pop': 107215, 'tracts': 14},
'Assumption': {'pop': 23421, 'tracts': 6},
'Avoyelles': {'pop': 42073, 'tracts': 9},
'Beauregard': {'pop': 35654, 'tracts': 7},
'Bienville': {'pop': 14353, 'tracts': 5},
'Bossier': {'pop': 116979, 'tracts': 22},
'Caddo': {'pop': 254969, 'tracts': 64},
'Calcasieu': {'pop': 192768, 'tracts': 44},
'Caldwell': {'pop': 10132, 'tracts': 3},
'Cameron': {'pop': 6839, 'tracts': 3},
'Catahoula': {'pop': 10407, 'tracts': 3},
'Claiborne': {'pop': 17195, 'tracts': 5},
'Concordia': {'pop': 20822, 'tracts': 5},
'De Soto': {'pop': 26656, 'tracts': 7},
'East Baton Rouge': {'pop': 440171, 'tracts': 92},
'East Carroll': {'pop': 7759, 'tracts': 3},
'East Feliciana': {'pop': 20267, 'tracts': 5},
'Evangeline': {'pop': 33984, 'tracts': 8},
'Franklin': {'pop': 20767, 'tracts': 6},
'Grant': {'pop': 22309, 'tracts': 5},
'Iberia': {'pop': 73240, 'tracts': 15},
'Iberville': {'pop': 33387, 'tracts': 7},
'Jackson': {'pop': 16274, 'tracts': 5},
'Jefferson': {'pop': 432552, 'tracts': 127},
'Jefferson Davis': {'pop': 31594, 'tracts': 7},
'La Salle': {'pop': 14890, 'tracts': 3},
'Lafayette': {'pop': 221578, 'tracts': 43},
'Lafourche': {'pop': 96318, 'tracts': 23},
'Lincoln': {'pop': 46735, 'tracts': 10},
'Livingston': {'pop': 128026, 'tracts': 17},
'Madison': {'pop': 12093, 'tracts': 5},
'Morehouse': {'pop': 27979, 'tracts': 8},
'Natchitoches': {'pop': 39566, 'tracts': 9},
'Orleans': {'pop': 343829, 'tracts': 177},
'Ouachita': {'pop': 153720, 'tracts': 40},
'Plaquemines': {'pop': 23042, 'tracts': 9},
'Pointe Coupee': {'pop': 22802, 'tracts': 6},
'Rapides': {'pop': 131613, 'tracts': 33},
'Red River': {'pop': 9091, 'tracts': 2},
'Richland': {'pop': 20725, 'tracts': 6},
'Sabine': {'pop': 24233, 'tracts': 7},
'St. Bernard': {'pop': 35897, 'tracts': 18},
'St. Charles': {'pop': 52780, 'tracts': 13},
'St. Helena': {'pop': 11203, 'tracts': 2},
'St. James': {'pop': 22102, 'tracts': 7},
'St. John the Baptist': {'pop': 45924, 'tracts': 11},
'St. Landry': {'pop': 83384, 'tracts': 19},
'St. Martin': {'pop': 52160, 'tracts': 11},
'St. Mary': {'pop': 54650, 'tracts': 16},
'St. Tammany': {'pop': 233740, 'tracts': 43},
'Tangipahoa': {'pop': 121097, 'tracts': 20},
'Tensas': {'pop': 5252, 'tracts': 3},
'Terrebonne': {'pop': 111860, 'tracts': 21},
'Union': {'pop': 22721, 'tracts': 6},
'Vermilion': {'pop': 57999, 'tracts': 12},
'Vernon': {'pop': 52334, 'tracts': 12},
'Washington': {'pop': 47168, 'tracts': 11},
'Webster': {'pop': 41207, 'tracts': 11},
'West Baton Rouge': {'pop': 23788, 'tracts': 5},
'West Carroll': {'pop': 11604, 'tracts': 3},
'West Feliciana': {'pop': 15625, 'tracts': 3},
'Winn': {'pop': 15313, 'tracts': 4}},
'MA': {'Barnstable': {'pop': 215888, 'tracts': 57},
'Berkshire': {'pop': 131219, 'tracts': 39},
'Bristol': {'pop': 548285, 'tracts': 126},
'Dukes': {'pop': 16535, 'tracts': 4},
'Essex': {'pop': 743159, 'tracts': 163},
'Franklin': {'pop': 71372, 'tracts': 18},
'Hampden': {'pop': 463490, 'tracts': 103},
'Hampshire': {'pop': 158080, 'tracts': 36},
'Middlesex': {'pop': 1503085, 'tracts': 318},
'Nantucket': {'pop': 10172, 'tracts': 6},
'Norfolk': {'pop': 670850, 'tracts': 130},
'Plymouth': {'pop': 494919, 'tracts': 100},
'Suffolk': {'pop': 722023, 'tracts': 204},
'Worcester': {'pop': 798552, 'tracts': 172}},
'MD': {'Allegany': {'pop': 75087, 'tracts': 23},
'Anne Arundel': {'pop': 537656, 'tracts': 104},
'Baltimore': {'pop': 805029, 'tracts': 214},
'Baltimore City': {'pop': 620961, 'tracts': 200},
'Calvert': {'pop': 88737, 'tracts': 18},
'Caroline': {'pop': 33066, 'tracts': 9},
'Carroll': {'pop': 167134, 'tracts': 38},
'Cecil': {'pop': 101108, 'tracts': 19},
'Charles': {'pop': 146551, 'tracts': 30},
'Dorchester': {'pop': 32618, 'tracts': 10},
'Frederick': {'pop': 233385, 'tracts': 61},
'Garrett': {'pop': 30097, 'tracts': 7},
'Harford': {'pop': 244826, 'tracts': 57},
'Howard': {'pop': 287085, 'tracts': 55},
'Kent': {'pop': 20197, 'tracts': 5},
'Montgomery': {'pop': 971777, 'tracts': 215},
"Prince George's": {'pop': 863420, 'tracts': 218},
"Queen Anne's": {'pop': 47798, 'tracts': 12},
'Somerset': {'pop': 26470, 'tracts': 8},
"St. Mary's": {'pop': 105151, 'tracts': 18},
'Talbot': {'pop': 37782, 'tracts': 10},
'Washington': {'pop': 147430, 'tracts': 32},
'Wicomico': {'pop': 98733, 'tracts': 19},
'Worcester': {'pop': 51454, 'tracts': 17}},
'ME': {'Androscoggin': {'pop': 107702, 'tracts': 28},
'Aroostook': {'pop': 71870, 'tracts': 24},
'Cumberland': {'pop': 281674, 'tracts': 67},
'Franklin': {'pop': 30768, 'tracts': 9},
'Hancock': {'pop': 54418, 'tracts': 17},
'Kennebec': {'pop': 122151, 'tracts': 31},
'Knox': {'pop': 39736, 'tracts': 11},
'Lincoln': {'pop': 34457, 'tracts': 9},
'Oxford': {'pop': 57833, 'tracts': 17},
'Penobscot': {'pop': 153923, 'tracts': 46},
'Piscataquis': {'pop': 17535, 'tracts': 8},
'Sagadahoc': {'pop': 35293, 'tracts': 8},
'Somerset': {'pop': 52228, 'tracts': 17},
'Waldo': {'pop': 38786, 'tracts': 8},
'Washington': {'pop': 32856, 'tracts': 14},
'York': {'pop': 197131, 'tracts': 41}},
'MI': {'Alcona': {'pop': 10942, 'tracts': 5},
'Alger': {'pop': 9601, 'tracts': 3},
'Allegan': {'pop': 111408, 'tracts': 25},
'Alpena': {'pop': 29598, 'tracts': 10},
'Antrim': {'pop': 23580, 'tracts': 7},
'Arenac': {'pop': 15899, 'tracts': 5},
'Baraga': {'pop': 8860, 'tracts': 2},
'Barry': {'pop': 59173, 'tracts': 11},
'Bay': {'pop': 107771, 'tracts': 26},
'Benzie': {'pop': 17525, 'tracts': 5},
'Berrien': {'pop': 156813, 'tracts': 48},
'Branch': {'pop': 45248, 'tracts': 12},
'Calhoun': {'pop': 136146, 'tracts': 39},
'Cass': {'pop': 52293, 'tracts': 11},
'Charlevoix': {'pop': 25949, 'tracts': 13},
'Cheboygan': {'pop': 26152, 'tracts': 8},
'Chippewa': {'pop': 38520, 'tracts': 14},
'Clare': {'pop': 30926, 'tracts': 11},
'Clinton': {'pop': 75382, 'tracts': 22},
'Crawford': {'pop': 14074, 'tracts': 5},
'Delta': {'pop': 37069, 'tracts': 11},
'Dickinson': {'pop': 26168, 'tracts': 7},
'Eaton': {'pop': 107759, 'tracts': 28},
'Emmet': {'pop': 32694, 'tracts': 8},
'Genesee': {'pop': 425790, 'tracts': 131},
'Gladwin': {'pop': 25692, 'tracts': 9},
'Gogebic': {'pop': 16427, 'tracts': 7},
'Grand Traverse': {'pop': 86986, 'tracts': 16},
'Gratiot': {'pop': 42476, 'tracts': 10},
'Hillsdale': {'pop': 46688, 'tracts': 12},
'Houghton': {'pop': 36628, 'tracts': 11},
'Huron': {'pop': 33118, 'tracts': 12},
'Ingham': {'pop': 280895, 'tracts': 81},
'Ionia': {'pop': 63905, 'tracts': 13},
'Iosco': {'pop': 25887, 'tracts': 9},
'Iron': {'pop': 11817, 'tracts': 5},
'Isabella': {'pop': 70311, 'tracts': 15},
'Jackson': {'pop': 160248, 'tracts': 38},
'Kalamazoo': {'pop': 250331, 'tracts': 57},
'Kalkaska': {'pop': 17153, 'tracts': 5},
'Kent': {'pop': 602622, 'tracts': 128},
'Keweenaw': {'pop': 2156, 'tracts': 2},
'Lake': {'pop': 11539, 'tracts': 4},
'Lapeer': {'pop': 88319, 'tracts': 24},
'Leelanau': {'pop': 21708, 'tracts': 6},
'Lenawee': {'pop': 99892, 'tracts': 23},
'Livingston': {'pop': 180967, 'tracts': 61},
'Luce': {'pop': 6631, 'tracts': 3},
'Mackinac': {'pop': 11113, 'tracts': 6},
'Macomb': {'pop': 840978, 'tracts': 216},
'Manistee': {'pop': 24733, 'tracts': 9},
'Marquette': {'pop': 67077, 'tracts': 24},
'Mason': {'pop': 28705, 'tracts': 8},
'Mecosta': {'pop': 42798, 'tracts': 11},
'Menominee': {'pop': 24029, 'tracts': 7},
'Midland': {'pop': 83629, 'tracts': 19},
'Missaukee': {'pop': 14849, 'tracts': 4},
'Monroe': {'pop': 152021, 'tracts': 39},
'Montcalm': {'pop': 63342, 'tracts': 13},
'Montmorency': {'pop': 9765, 'tracts': 5},
'Muskegon': {'pop': 172188, 'tracts': 42},
'Newaygo': {'pop': 48460, 'tracts': 11},
'Oakland': {'pop': 1202362, 'tracts': 338},
'Oceana': {'pop': 26570, 'tracts': 7},
'Ogemaw': {'pop': 21699, 'tracts': 7},
'Ontonagon': {'pop': 6780, 'tracts': 4},
'Osceola': {'pop': 23528, 'tracts': 6},
'Oscoda': {'pop': 8640, 'tracts': 5},
'Otsego': {'pop': 24164, 'tracts': 6},
'Ottawa': {'pop': 263801, 'tracts': 53},
'Presque Isle': {'pop': 13376, 'tracts': 6},
'Roscommon': {'pop': 24449, 'tracts': 10},
'Saginaw': {'pop': 200169, 'tracts': 56},
'Sanilac': {'pop': 43114, 'tracts': 12},
'Schoolcraft': {'pop': 8485, 'tracts': 3},
'Shiawassee': {'pop': 70648, 'tracts': 17},
'St. Clair': {'pop': 163040, 'tracts': 49},
'St. Joseph': {'pop': 61295, 'tracts': 17},
'Tuscola': {'pop': 55729, 'tracts': 13},
'Van Buren': {'pop': 76258, 'tracts': 15},
'Washtenaw': {'pop': 344791, 'tracts': 100},
'Wayne': {'pop': 1820584, 'tracts': 610},
'Wexford': {'pop': 32735, 'tracts': 8}},
'MN': {'Aitkin': {'pop': 16202, 'tracts': 6},
'Anoka': {'pop': 330844, 'tracts': 83},
'Becker': {'pop': 32504, 'tracts': 10},
'Beltrami': {'pop': 44442, 'tracts': 10},
'Benton': {'pop': 38451, 'tracts': 9},
'Big Stone': {'pop': 5269, 'tracts': 3},
'Blue Earth': {'pop': 64013, 'tracts': 16},
'Brown': {'pop': 25893, 'tracts': 8},
'Carlton': {'pop': 35386, 'tracts': 7},
'Carver': {'pop': 91042, 'tracts': 19},
'Cass': {'pop': 28567, 'tracts': 10},
'Chippewa': {'pop': 12441, 'tracts': 4},
'Chisago': {'pop': 53887, 'tracts': 10},
'Clay': {'pop': 58999, 'tracts': 13},
'Clearwater': {'pop': 8695, 'tracts': 3},
'Cook': {'pop': 5176, 'tracts': 3},
'Cottonwood': {'pop': 11687, 'tracts': 4},
'Crow Wing': {'pop': 62500, 'tracts': 16},
'Dakota': {'pop': 398552, 'tracts': 95},
'Dodge': {'pop': 20087, 'tracts': 5},
'Douglas': {'pop': 36009, 'tracts': 9},
'Faribault': {'pop': 14553, 'tracts': 6},
'Fillmore': {'pop': 20866, 'tracts': 6},
'Freeborn': {'pop': 31255, 'tracts': 10},
'Goodhue': {'pop': 46183, 'tracts': 10},
'Grant': {'pop': 6018, 'tracts': 2},
'Hennepin': {'pop': 1152425, 'tracts': 299},
'Houston': {'pop': 19027, 'tracts': 5},
'Hubbard': {'pop': 20428, 'tracts': 7},
'Isanti': {'pop': 37816, 'tracts': 8},
'Itasca': {'pop': 45058, 'tracts': 11},
'Jackson': {'pop': 10266, 'tracts': 4},
'Kanabec': {'pop': 16239, 'tracts': 4},
'Kandiyohi': {'pop': 42239, 'tracts': 12},
'Kittson': {'pop': 4552, 'tracts': 2},
'Koochiching': {'pop': 13311, 'tracts': 4},
'Lac qui Parle': {'pop': 7259, 'tracts': 3},
'Lake': {'pop': 10866, 'tracts': 3},
'Lake of the Woods': {'pop': 4045, 'tracts': 2},
'Le Sueur': {'pop': 27703, 'tracts': 6},
'Lincoln': {'pop': 5896, 'tracts': 2},
'Lyon': {'pop': 25857, 'tracts': 7},
'Mahnomen': {'pop': 5413, 'tracts': 2},
'Marshall': {'pop': 9439, 'tracts': 4},
'Martin': {'pop': 20840, 'tracts': 6},
'McLeod': {'pop': 36651, 'tracts': 7},
'Meeker': {'pop': 23300, 'tracts': 6},
'Mille Lacs': {'pop': 26097, 'tracts': 7},
'Morrison': {'pop': 33198, 'tracts': 8},
'Mower': {'pop': 39163, 'tracts': 11},
'Murray': {'pop': 8725, 'tracts': 3},
'Nicollet': {'pop': 32727, 'tracts': 7},
'Nobles': {'pop': 21378, 'tracts': 6},
'Norman': {'pop': 6852, 'tracts': 3},
'Olmsted': {'pop': 144248, 'tracts': 33},
'Otter Tail': {'pop': 57303, 'tracts': 17},
'Pennington': {'pop': 13930, 'tracts': 5},
'Pine': {'pop': 29750, 'tracts': 8},
'Pipestone': {'pop': 9596, 'tracts': 5},
'Polk': {'pop': 31600, 'tracts': 10},
'Pope': {'pop': 10995, 'tracts': 4},
'Ramsey': {'pop': 508640, 'tracts': 137},
'Red Lake': {'pop': 4089, 'tracts': 2},
'Redwood': {'pop': 16059, 'tracts': 6},
'Renville': {'pop': 15730, 'tracts': 6},
'Rice': {'pop': 64142, 'tracts': 13},
'Rock': {'pop': 9687, 'tracts': 3},
'Roseau': {'pop': 15629, 'tracts': 5},
'Scott': {'pop': 129928, 'tracts': 21},
'Sherburne': {'pop': 88499, 'tracts': 11},
'Sibley': {'pop': 15226, 'tracts': 4},
'St. Louis': {'pop': 200226, 'tracts': 66},
'Stearns': {'pop': 150642, 'tracts': 29},
'Steele': {'pop': 36576, 'tracts': 8},
'Stevens': {'pop': 9726, 'tracts': 3},
'Swift': {'pop': 9783, 'tracts': 4},
'Todd': {'pop': 24895, 'tracts': 8},
'Traverse': {'pop': 3558, 'tracts': 2},
'Wabasha': {'pop': 21676, 'tracts': 6},
'Wadena': {'pop': 13843, 'tracts': 3},
'Waseca': {'pop': 19136, 'tracts': 5},
'Washington': {'pop': 238136, 'tracts': 50},
'Watonwan': {'pop': 11211, 'tracts': 3},
'Wilkin': {'pop': 6576, 'tracts': 2},
'Winona': {'pop': 51461, 'tracts': 10},
'Wright': {'pop': 124700, 'tracts': 17},
'Yellow Medicine': {'pop': 10438, 'tracts': 4}},
'MO': {'Adair': {'pop': 25607, 'tracts': 7},
'Andrew': {'pop': 17291, 'tracts': 4},
'Atchison': {'pop': 5685, 'tracts': 2},
'Audrain': {'pop': 25529, 'tracts': 7},
'Barry': {'pop': 35597, 'tracts': 7},
'Barton': {'pop': 12402, 'tracts': 3},
'Bates': {'pop': 17049, 'tracts': 4},
'Benton': {'pop': 19056, 'tracts': 6},
'Bollinger': {'pop': 12363, 'tracts': 3},
'Boone': {'pop': 162642, 'tracts': 29},
'Buchanan': {'pop': 89201, 'tracts': 25},
'Butler': {'pop': 42794, 'tracts': 10},
'Caldwell': {'pop': 9424, 'tracts': 2},
'Callaway': {'pop': 44332, 'tracts': 8},
'Camden': {'pop': 44002, 'tracts': 11},
'Cape Girardeau': {'pop': 75674, 'tracts': 16},
'Carroll': {'pop': 9295, 'tracts': 3},
'Carter': {'pop': 6265, 'tracts': 2},
'Cass': {'pop': 99478, 'tracts': 20},
'Cedar': {'pop': 13982, 'tracts': 3},
'Chariton': {'pop': 7831, 'tracts': 3},
'Christian': {'pop': 77422, 'tracts': 14},
'Clark': {'pop': 7139, 'tracts': 3},
'Clay': {'pop': 221939, 'tracts': 44},
'Clinton': {'pop': 20743, 'tracts': 4},
'Cole': {'pop': 75990, 'tracts': 15},
'Cooper': {'pop': 17601, 'tracts': 5},
'Crawford': {'pop': 24696, 'tracts': 6},
'Dade': {'pop': 7883, 'tracts': 2},
'Dallas': {'pop': 16777, 'tracts': 3},
'Daviess': {'pop': 8433, 'tracts': 2},
'DeKalb': {'pop': 12892, 'tracts': 2},
'Dent': {'pop': 15657, 'tracts': 4},
'Douglas': {'pop': 13684, 'tracts': 3},
'Dunklin': {'pop': 31953, 'tracts': 10},
'Franklin': {'pop': 101492, 'tracts': 17},
'Gasconade': {'pop': 15222, 'tracts': 5},
'Gentry': {'pop': 6738, 'tracts': 2},
'Greene': {'pop': 275174, 'tracts': 62},
'Grundy': {'pop': 10261, 'tracts': 4},
'Harrison': {'pop': 8957, 'tracts': 3},
'Henry': {'pop': 22272, 'tracts': 6},
'Hickory': {'pop': 9627, 'tracts': 3},
'Holt': {'pop': 4912, 'tracts': 3},
'Howard': {'pop': 10144, 'tracts': 3},
'Howell': {'pop': 40400, 'tracts': 8},
'Iron': {'pop': 10630, 'tracts': 4},
'Jackson': {'pop': 674158, 'tracts': 199},
'Jasper': {'pop': 117404, 'tracts': 22},
'Jefferson': {'pop': 218733, 'tracts': 42},
'Johnson': {'pop': 52595, 'tracts': 9},
'Knox': {'pop': 4131, 'tracts': 2},
'Laclede': {'pop': 35571, 'tracts': 6},
'Lafayette': {'pop': 33381, 'tracts': 7},
'Lawrence': {'pop': 38634, 'tracts': 7},
'Lewis': {'pop': 10211, 'tracts': 4},
'Lincoln': {'pop': 52566, 'tracts': 7},
'Linn': {'pop': 12761, 'tracts': 5},
'Livingston': {'pop': 15195, 'tracts': 5},
'Macon': {'pop': 15566, 'tracts': 5},
'Madison': {'pop': 12226, 'tracts': 3},
'Maries': {'pop': 9176, 'tracts': 3},
'Marion': {'pop': 28781, 'tracts': 8},
'McDonald': {'pop': 23083, 'tracts': 4},
'Mercer': {'pop': 3785, 'tracts': 2},
'Miller': {'pop': 24748, 'tracts': 5},
'Mississippi': {'pop': 14358, 'tracts': 4},
'Moniteau': {'pop': 15607, 'tracts': 4},
'Monroe': {'pop': 8840, 'tracts': 3},
'Montgomery': {'pop': 12236, 'tracts': 4},
'Morgan': {'pop': 20565, 'tracts': 5},
'New Madrid': {'pop': 18956, 'tracts': 6},
'Newton': {'pop': 58114, 'tracts': 12},
'Nodaway': {'pop': 23370, 'tracts': 5},
'Oregon': {'pop': 10881, 'tracts': 3},
'Osage': {'pop': 13878, 'tracts': 4},
'Ozark': {'pop': 9723, 'tracts': 2},
'Pemiscot': {'pop': 18296, 'tracts': 6},
'Perry': {'pop': 18971, 'tracts': 5},
'Pettis': {'pop': 42201, 'tracts': 11},
'Phelps': {'pop': 45156, 'tracts': 10},
'Pike': {'pop': 18516, 'tracts': 5},
'Platte': {'pop': 89322, 'tracts': 20},
'Polk': {'pop': 31137, 'tracts': 4},
'Pulaski': {'pop': 52274, 'tracts': 9},
'Putnam': {'pop': 4979, 'tracts': 2},
'Ralls': {'pop': 10167, 'tracts': 3},
'Randolph': {'pop': 25414, 'tracts': 6},
'Ray': {'pop': 23494, 'tracts': 4},
'Reynolds': {'pop': 6696, 'tracts': 2},
'Ripley': {'pop': 14100, 'tracts': 4},
'Saline': {'pop': 23370, 'tracts': 8},
'Schuyler': {'pop': 4431, 'tracts': 2},
'Scotland': {'pop': 4843, 'tracts': 2},
'Scott': {'pop': 39191, 'tracts': 10},
'Shannon': {'pop': 8441, 'tracts': 2},
'Shelby': {'pop': 6373, 'tracts': 3},
'St. Charles': {'pop': 360485, 'tracts': 79},
'St. Clair': {'pop': 9805, 'tracts': 3},
'St. Francois': {'pop': 65359, 'tracts': 11},
'St. Louis': {'pop': 998954, 'tracts': 199},
'St. Louis City': {'pop': 319294, 'tracts': 106},
'Ste. Genevieve': {'pop': 18145, 'tracts': 4},
'Stoddard': {'pop': 29968, 'tracts': 8},
'Stone': {'pop': 32202, 'tracts': 6},
'Sullivan': {'pop': 6714, 'tracts': 3},
'Taney': {'pop': 51675, 'tracts': 10},
'Texas': {'pop': 26008, 'tracts': 4},
'Vernon': {'pop': 21159, 'tracts': 6},
'Warren': {'pop': 32513, 'tracts': 5},
'Washington': {'pop': 25195, 'tracts': 5},
'Wayne': {'pop': 13521, 'tracts': 4},
'Webster': {'pop': 36202, 'tracts': 8},
'Worth': {'pop': 2171, 'tracts': 1},
'Wright': {'pop': 18815, 'tracts': 4}},
'MS': {'Adams': {'pop': 32297, 'tracts': 9},
'Alcorn': {'pop': 37057, 'tracts': 7},
'Amite': {'pop': 13131, 'tracts': 3},
'Attala': {'pop': 19564, 'tracts': 6},
'Benton': {'pop': 8729, 'tracts': 2},
'Bolivar': {'pop': 34145, 'tracts': 8},
'Calhoun': {'pop': 14962, 'tracts': 5},
'Carroll': {'pop': 10597, 'tracts': 2},
'Chickasaw': {'pop': 17392, 'tracts': 4},
'Choctaw': {'pop': 8547, 'tracts': 3},
'Claiborne': {'pop': 9604, 'tracts': 3},
'Clarke': {'pop': 16732, 'tracts': 4},
'Clay': {'pop': 20634, 'tracts': 5},
'Coahoma': {'pop': 26151, 'tracts': 7},
'Copiah': {'pop': 29449, 'tracts': 6},
'Covington': {'pop': 19568, 'tracts': 4},
'DeSoto': {'pop': 161252, 'tracts': 33},
'Forrest': {'pop': 74934, 'tracts': 17},
'Franklin': {'pop': 8118, 'tracts': 2},
'George': {'pop': 22578, 'tracts': 5},
'Greene': {'pop': 14400, 'tracts': 2},
'Grenada': {'pop': 21906, 'tracts': 5},
'Hancock': {'pop': 43929, 'tracts': 7},
'Harrison': {'pop': 187105, 'tracts': 46},
'Hinds': {'pop': 245285, 'tracts': 64},
'Holmes': {'pop': 19198, 'tracts': 5},
'Humphreys': {'pop': 9375, 'tracts': 3},
'Issaquena': {'pop': 1406, 'tracts': 1},
'Itawamba': {'pop': 23401, 'tracts': 5},
'Jackson': {'pop': 139668, 'tracts': 28},
'Jasper': {'pop': 17062, 'tracts': 4},
'Jefferson': {'pop': 7726, 'tracts': 2},
'Jefferson Davis': {'pop': 12487, 'tracts': 3},
'Jones': {'pop': 67761, 'tracts': 14},
'Kemper': {'pop': 10456, 'tracts': 2},
'Lafayette': {'pop': 47351, 'tracts': 10},
'Lamar': {'pop': 55658, 'tracts': 8},
'Lauderdale': {'pop': 80261, 'tracts': 19},
'Lawrence': {'pop': 12929, 'tracts': 3},
'Leake': {'pop': 23805, 'tracts': 5},
'Lee': {'pop': 82910, 'tracts': 19},
'Leflore': {'pop': 32317, 'tracts': 8},
'Lincoln': {'pop': 34869, 'tracts': 6},
'Lowndes': {'pop': 59779, 'tracts': 14},
'Madison': {'pop': 95203, 'tracts': 21},
'Marion': {'pop': 27088, 'tracts': 6},
'Marshall': {'pop': 37144, 'tracts': 6},
'Monroe': {'pop': 36989, 'tracts': 9},
'Montgomery': {'pop': 10925, 'tracts': 3},
'Neshoba': {'pop': 29676, 'tracts': 7},
'Newton': {'pop': 21720, 'tracts': 5},
'Noxubee': {'pop': 11545, 'tracts': 3},
'Oktibbeha': {'pop': 47671, 'tracts': 8},
'Panola': {'pop': 34707, 'tracts': 6},
'Pearl River': {'pop': 55834, 'tracts': 9},
'Perry': {'pop': 12250, 'tracts': 3},
'Pike': {'pop': 40404, 'tracts': 8},
'Pontotoc': {'pop': 29957, 'tracts': 6},
'Prentiss': {'pop': 25276, 'tracts': 5},
'Quitman': {'pop': 8223, 'tracts': 3},
'Rankin': {'pop': 141617, 'tracts': 27},
'Scott': {'pop': 28264, 'tracts': 6},
'Sharkey': {'pop': 4916, 'tracts': 2},
'Simpson': {'pop': 27503, 'tracts': 5},
'Smith': {'pop': 16491, 'tracts': 3},
'Stone': {'pop': 17786, 'tracts': 3},
'Sunflower': {'pop': 29450, 'tracts': 7},
'Tallahatchie': {'pop': 15378, 'tracts': 4},
'Tate': {'pop': 28886, 'tracts': 5},
'Tippah': {'pop': 22232, 'tracts': 4},
'Tishomingo': {'pop': 19593, 'tracts': 4},
'Tunica': {'pop': 10778, 'tracts': 3},
'Union': {'pop': 27134, 'tracts': 6},
'Walthall': {'pop': 15443, 'tracts': 3},
'Warren': {'pop': 48773, 'tracts': 12},
'Washington': {'pop': 51137, 'tracts': 19},
'Wayne': {'pop': 20747, 'tracts': 4},
'Webster': {'pop': 10253, 'tracts': 3},
'Wilkinson': {'pop': 9878, 'tracts': 2},
'Winston': {'pop': 19198, 'tracts': 5},
'Yalobusha': {'pop': 12678, 'tracts': 3},
'Yazoo': {'pop': 28065, 'tracts': 6}},
'MT': {'Beaverhead': {'pop': 9246, 'tracts': 3},
'Big Horn': {'pop': 12865, 'tracts': 5},
'Blaine': {'pop': 6491, 'tracts': 4},
'Broadwater': {'pop': 5612, 'tracts': 2},
'Carbon': {'pop': 10078, 'tracts': 5},
'Carter': {'pop': 1160, 'tracts': 1},
'Cascade': {'pop': 81327, 'tracts': 22},
'Chouteau': {'pop': 5813, 'tracts': 2},
'Custer': {'pop': 11699, 'tracts': 6},
'Daniels': {'pop': 1751, 'tracts': 1},
'Dawson': {'pop': 8966, 'tracts': 3},
'Deer Lodge': {'pop': 9298, 'tracts': 3},
'Fallon': {'pop': 2890, 'tracts': 1},
'Fergus': {'pop': 11586, 'tracts': 2},
'Flathead': {'pop': 90928, 'tracts': 19},
'Gallatin': {'pop': 89513, 'tracts': 22},
'Garfield': {'pop': 1206, 'tracts': 1},
'Glacier': {'pop': 13399, 'tracts': 4},
'Golden Valley': {'pop': 884, 'tracts': 1},
'Granite': {'pop': 3079, 'tracts': 1},
'Hill': {'pop': 16096, 'tracts': 6},
'Jefferson': {'pop': 11406, 'tracts': 3},
'Judith Basin': {'pop': 2072, 'tracts': 1},
'Lake': {'pop': 28746, 'tracts': 8},
'Lewis and Clark': {'pop': 63395, 'tracts': 14},
'Liberty': {'pop': 2339, 'tracts': 1},
'Lincoln': {'pop': 19687, 'tracts': 5},
'Madison': {'pop': 7691, 'tracts': 3},
'McCone': {'pop': 1734, 'tracts': 1},
'Meagher': {'pop': 1891, 'tracts': 1},
'Mineral': {'pop': 4223, 'tracts': 2},
'Missoula': {'pop': 109299, 'tracts': 20},
'Musselshell': {'pop': 4538, 'tracts': 2},
'Park': {'pop': 15636, 'tracts': 6},
'Petroleum': {'pop': 494, 'tracts': 1},
'Phillips': {'pop': 4253, 'tracts': 1},
'Pondera': {'pop': 6153, 'tracts': 2},
'Powder River': {'pop': 1743, 'tracts': 1},
'Powell': {'pop': 7027, 'tracts': 2},
'Prairie': {'pop': 1179, 'tracts': 1},
'Ravalli': {'pop': 40212, 'tracts': 10},
'Richland': {'pop': 9746, 'tracts': 4},
'Roosevelt': {'pop': 10425, 'tracts': 3},
'Rosebud': {'pop': 9233, 'tracts': 4},
'Sanders': {'pop': 11413, 'tracts': 3},
'Sheridan': {'pop': 3384, 'tracts': 2},
'Silver Bow': {'pop': 34200, 'tracts': 8},
'Stillwater': {'pop': 9117, 'tracts': 3},
'Sweet Grass': {'pop': 3651, 'tracts': 1},
'Teton': {'pop': 6073, 'tracts': 3},
'Toole': {'pop': 5324, 'tracts': 3},
'Treasure': {'pop': 718, 'tracts': 1},
'Valley': {'pop': 7369, 'tracts': 3},
'Wheatland': {'pop': 2168, 'tracts': 1},
'Wibaux': {'pop': 1017, 'tracts': 1},
'Yellowstone': {'pop': 147972, 'tracts': 32}},
'NC': {'Alamance': {'pop': 151131, 'tracts': 36},
'Alexander': {'pop': 37198, 'tracts': 7},
'Alleghany': {'pop': 11155, 'tracts': 3},
'Anson': {'pop': 26948, 'tracts': 6},
'Ashe': {'pop': 27281, 'tracts': 6},
'Avery': {'pop': 17797, 'tracts': 5},
'Beaufort': {'pop': 47759, 'tracts': 11},
'Bertie': {'pop': 21282, 'tracts': 4},
'Bladen': {'pop': 35190, 'tracts': 6},
'Brunswick': {'pop': 107431, 'tracts': 33},
'Buncombe': {'pop': 238318, 'tracts': 56},
'Burke': {'pop': 90912, 'tracts': 18},
'Cabarrus': {'pop': 178011, 'tracts': 37},
'Caldwell': {'pop': 83029, 'tracts': 17},
'Camden': {'pop': 9980, 'tracts': 2},
'Carteret': {'pop': 66469, 'tracts': 38},
'Caswell': {'pop': 23719, 'tracts': 6},
'Catawba': {'pop': 154358, 'tracts': 31},
'Chatham': {'pop': 63505, 'tracts': 13},
'Cherokee': {'pop': 27444, 'tracts': 7},
'Chowan': {'pop': 14793, 'tracts': 3},
'Clay': {'pop': 10587, 'tracts': 2},
'Cleveland': {'pop': 98078, 'tracts': 22},
'Columbus': {'pop': 58098, 'tracts': 13},
'Craven': {'pop': 103505, 'tracts': 21},
'Cumberland': {'pop': 319431, 'tracts': 68},
'Currituck': {'pop': 23547, 'tracts': 8},
'Dare': {'pop': 33920, 'tracts': 11},
'Davidson': {'pop': 162878, 'tracts': 34},
'Davie': {'pop': 41240, 'tracts': 7},
'Duplin': {'pop': 58505, 'tracts': 11},
'Durham': {'pop': 267587, 'tracts': 60},
'Edgecombe': {'pop': 56552, 'tracts': 14},
'Forsyth': {'pop': 350670, 'tracts': 93},
'Franklin': {'pop': 60619, 'tracts': 12},
'Gaston': {'pop': 206086, 'tracts': 65},
'Gates': {'pop': 12197, 'tracts': 3},
'Graham': {'pop': 8861, 'tracts': 3},
'Granville': {'pop': 59916, 'tracts': 13},
'Greene': {'pop': 21362, 'tracts': 4},
'Guilford': {'pop': 488406, 'tracts': 119},
'Halifax': {'pop': 54691, 'tracts': 12},
'Harnett': {'pop': 114678, 'tracts': 27},
'Haywood': {'pop': 59036, 'tracts': 16},
'Henderson': {'pop': 106740, 'tracts': 27},
'Hertford': {'pop': 24669, 'tracts': 5},
'Hoke': {'pop': 46952, 'tracts': 9},
'Hyde': {'pop': 5810, 'tracts': 2},
'Iredell': {'pop': 159437, 'tracts': 44},
'Jackson': {'pop': 40271, 'tracts': 9},
'Johnston': {'pop': 168878, 'tracts': 25},
'Jones': {'pop': 10153, 'tracts': 3},
'Lee': {'pop': 57866, 'tracts': 13},
'Lenoir': {'pop': 59495, 'tracts': 15},
'Lincoln': {'pop': 78265, 'tracts': 18},
'Macon': {'pop': 33922, 'tracts': 9},
'Madison': {'pop': 20764, 'tracts': 6},
'Martin': {'pop': 24505, 'tracts': 6},
'McDowell': {'pop': 44996, 'tracts': 10},
'Mecklenburg': {'pop': 919628, 'tracts': 233},
'Mitchell': {'pop': 15579, 'tracts': 4},
'Montgomery': {'pop': 27798, 'tracts': 6},
'Moore': {'pop': 88247, 'tracts': 18},
'Nash': {'pop': 95840, 'tracts': 18},
'New Hanover': {'pop': 202667, 'tracts': 45},
'Northampton': {'pop': 22099, 'tracts': 5},
'Onslow': {'pop': 177772, 'tracts': 32},
'Orange': {'pop': 133801, 'tracts': 28},
'Pamlico': {'pop': 13144, 'tracts': 4},
'Pasquotank': {'pop': 40661, 'tracts': 10},
'Pender': {'pop': 52217, 'tracts': 16},
'Perquimans': {'pop': 13453, 'tracts': 3},
'Person': {'pop': 39464, 'tracts': 7},
'Pitt': {'pop': 168148, 'tracts': 32},
'Polk': {'pop': 20510, 'tracts': 7},
'Randolph': {'pop': 141752, 'tracts': 28},
'Richmond': {'pop': 46639, 'tracts': 11},
'Robeson': {'pop': 134168, 'tracts': 31},
'Rockingham': {'pop': 93643, 'tracts': 21},
'Rowan': {'pop': 138428, 'tracts': 30},
'Rutherford': {'pop': 67810, 'tracts': 13},
'Sampson': {'pop': 63431, 'tracts': 11},
'Scotland': {'pop': 36157, 'tracts': 7},
'Stanly': {'pop': 60585, 'tracts': 13},
'Stokes': {'pop': 47401, 'tracts': 9},
'Surry': {'pop': 73673, 'tracts': 22},
'Swain': {'pop': 13981, 'tracts': 5},
'Transylvania': {'pop': 33090, 'tracts': 7},
'Tyrrell': {'pop': 4407, 'tracts': 1},
'Union': {'pop': 201292, 'tracts': 41},
'Vance': {'pop': 45422, 'tracts': 10},
'Wake': {'pop': 900993, 'tracts': 187},
'Warren': {'pop': 20972, 'tracts': 6},
'Washington': {'pop': 13228, 'tracts': 3},
'Watauga': {'pop': 51079, 'tracts': 13},
'Wayne': {'pop': 122623, 'tracts': 26},
'Wilkes': {'pop': 69340, 'tracts': 14},
'Wilson': {'pop': 81234, 'tracts': 19},
'Yadkin': {'pop': 38406, 'tracts': 7},
'Yancey': {'pop': 17818, 'tracts': 5}},
'ND': {'Adams': {'pop': 2343, 'tracts': 1},
'Barnes': {'pop': 11066, 'tracts': 4},
'Benson': {'pop': 6660, 'tracts': 4},
'Billings': {'pop': 783, 'tracts': 1},
'Bottineau': {'pop': 6429, 'tracts': 3},
'Bowman': {'pop': 3151, 'tracts': 2},
'Burke': {'pop': 1968, 'tracts': 1},
'Burleigh': {'pop': 81308, 'tracts': 19},
'Cass': {'pop': 149778, 'tracts': 33},
'Cavalier': {'pop': 3993, 'tracts': 2},
'Dickey': {'pop': 5289, 'tracts': 3},
'Divide': {'pop': 2071, 'tracts': 1},
'Dunn': {'pop': 3536, 'tracts': 1},
'Eddy': {'pop': 2385, 'tracts': 1},
'Emmons': {'pop': 3550, 'tracts': 1},
'Foster': {'pop': 3343, 'tracts': 1},
'Golden Valley': {'pop': 1680, 'tracts': 1},
'Grand Forks': {'pop': 66861, 'tracts': 18},
'Grant': {'pop': 2394, 'tracts': 1},
'Griggs': {'pop': 2420, 'tracts': 1},
'Hettinger': {'pop': 2477, 'tracts': 2},
'Kidder': {'pop': 2435, 'tracts': 1},
'LaMoure': {'pop': 4139, 'tracts': 2},
'Logan': {'pop': 1990, 'tracts': 1},
'McHenry': {'pop': 5395, 'tracts': 2},
'McIntosh': {'pop': 2809, 'tracts': 1},
'McKenzie': {'pop': 6360, 'tracts': 4},
'McLean': {'pop': 8962, 'tracts': 2},
'Mercer': {'pop': 8424, 'tracts': 3},
'Morton': {'pop': 27471, 'tracts': 5},
'Mountrail': {'pop': 7673, 'tracts': 3},
'Nelson': {'pop': 3126, 'tracts': 1},
'Oliver': {'pop': 1846, 'tracts': 1},
'Pembina': {'pop': 7413, 'tracts': 5},
'Pierce': {'pop': 4357, 'tracts': 2},
'Ramsey': {'pop': 11451, 'tracts': 3},
'Ransom': {'pop': 5457, 'tracts': 3},
'Renville': {'pop': 2470, 'tracts': 1},
'Richland': {'pop': 16321, 'tracts': 6},
'Rolette': {'pop': 13937, 'tracts': 4},
'Sargent': {'pop': 3829, 'tracts': 2},
'Sheridan': {'pop': 1321, 'tracts': 1},
'Sioux': {'pop': 4153, 'tracts': 2},
'Slope': {'pop': 727, 'tracts': 1},
'Stark': {'pop': 24199, 'tracts': 8},
'Steele': {'pop': 1975, 'tracts': 1},
'Stutsman': {'pop': 21100, 'tracts': 6},
'Towner': {'pop': 2246, 'tracts': 1},
'Traill': {'pop': 8121, 'tracts': 4},
'Walsh': {'pop': 11119, 'tracts': 6},
'Ward': {'pop': 61675, 'tracts': 13},
'Wells': {'pop': 4207, 'tracts': 2},
'Williams': {'pop': 22398, 'tracts': 7}},
'NE': {'Adams': {'pop': 31364, 'tracts': 9},
'Antelope': {'pop': 6685, 'tracts': 3},
'Arthur': {'pop': 460, 'tracts': 1},
'Banner': {'pop': 690, 'tracts': 1},
'Blaine': {'pop': 478, 'tracts': 1},
'Boone': {'pop': 5505, 'tracts': 2},
'Box Butte': {'pop': 11308, 'tracts': 3},
'Boyd': {'pop': 2099, 'tracts': 1},
'Brown': {'pop': 3145, 'tracts': 1},
'Buffalo': {'pop': 46102, 'tracts': 11},
'Burt': {'pop': 6858, 'tracts': 3},
'Butler': {'pop': 8395, 'tracts': 3},
'Cass': {'pop': 25241, 'tracts': 6},
'Cedar': {'pop': 8852, 'tracts': 2},
'Chase': {'pop': 3966, 'tracts': 1},
'Cherry': {'pop': 5713, 'tracts': 2},
'Cheyenne': {'pop': 9998, 'tracts': 3},
'Clay': {'pop': 6542, 'tracts': 2},
'Colfax': {'pop': 10515, 'tracts': 3},
'Cuming': {'pop': 9139, 'tracts': 3},
'Custer': {'pop': 10939, 'tracts': 4},
'Dakota': {'pop': 21006, 'tracts': 4},
'Dawes': {'pop': 9182, 'tracts': 2},
'Dawson': {'pop': 24326, 'tracts': 7},
'Deuel': {'pop': 1941, 'tracts': 1},
'Dixon': {'pop': 6000, 'tracts': 2},
'Dodge': {'pop': 36691, 'tracts': 9},
'Douglas': {'pop': 517110, 'tracts': 156},
'Dundy': {'pop': 2008, 'tracts': 1},
'Fillmore': {'pop': 5890, 'tracts': 2},
'Franklin': {'pop': 3225, 'tracts': 2},
'Frontier': {'pop': 2756, 'tracts': 1},
'Furnas': {'pop': 4959, 'tracts': 1},
'Gage': {'pop': 22311, 'tracts': 7},
'Garden': {'pop': 2057, 'tracts': 1},
'Garfield': {'pop': 2049, 'tracts': 1},
'Gosper': {'pop': 2044, 'tracts': 1},
'Grant': {'pop': 614, 'tracts': 1},
'Greeley': {'pop': 2538, 'tracts': 1},
'Hall': {'pop': 58607, 'tracts': 14},
'Hamilton': {'pop': 9124, 'tracts': 3},
'Harlan': {'pop': 3423, 'tracts': 1},
'Hayes': {'pop': 967, 'tracts': 1},
'Hitchcock': {'pop': 2908, 'tracts': 1},
'Holt': {'pop': 10435, 'tracts': 4},
'Hooker': {'pop': 736, 'tracts': 1},
'Howard': {'pop': 6274, 'tracts': 2},
'Jefferson': {'pop': 7547, 'tracts': 3},
'Johnson': {'pop': 5217, 'tracts': 2},
'Kearney': {'pop': 6489, 'tracts': 2},
'Keith': {'pop': 8368, 'tracts': 3},
'Keya Paha': {'pop': 824, 'tracts': 1},
'Kimball': {'pop': 3821, 'tracts': 1},
'Knox': {'pop': 8701, 'tracts': 3},
'Lancaster': {'pop': 285407, 'tracts': 74},
'Lincoln': {'pop': 36288, 'tracts': 8},
'Logan': {'pop': 763, 'tracts': 1},
'Loup': {'pop': 632, 'tracts': 1},
'Madison': {'pop': 34876, 'tracts': 9},
'McPherson': {'pop': 539, 'tracts': 1},
'Merrick': {'pop': 7845, 'tracts': 3},
'Morrill': {'pop': 5042, 'tracts': 1},
'Nance': {'pop': 3735, 'tracts': 1},
'Nemaha': {'pop': 7248, 'tracts': 2},
'Nuckolls': {'pop': 4500, 'tracts': 2},
'Otoe': {'pop': 15740, 'tracts': 5},
'Pawnee': {'pop': 2773, 'tracts': 1},
'Perkins': {'pop': 2970, 'tracts': 1},
'Phelps': {'pop': 9188, 'tracts': 3},
'Pierce': {'pop': 7266, 'tracts': 2},
'Platte': {'pop': 32237, 'tracts': 7},
'Polk': {'pop': 5406, 'tracts': 2},
'Red Willow': {'pop': 11055, 'tracts': 3},
'Richardson': {'pop': 8363, 'tracts': 3},
'Rock': {'pop': 1526, 'tracts': 1},
'Saline': {'pop': 14200, 'tracts': 4},
'Sarpy': {'pop': 158840, 'tracts': 43},
'Saunders': {'pop': 20780, 'tracts': 5},
'Scotts Bluff': {'pop': 36970, 'tracts': 11},
'Seward': {'pop': 16750, 'tracts': 4},
'Sheridan': {'pop': 5469, 'tracts': 2},
'Sherman': {'pop': 3152, 'tracts': 1},
'Sioux': {'pop': 1311, 'tracts': 1},
'Stanton': {'pop': 6129, 'tracts': 2},
'Thayer': {'pop': 5228, 'tracts': 2},
'Thomas': {'pop': 647, 'tracts': 1},
'Thurston': {'pop': 6940, 'tracts': 2},
'Valley': {'pop': 4260, 'tracts': 2},
'Washington': {'pop': 20234, 'tracts': 5},
'Wayne': {'pop': 9595, 'tracts': 2},
'Webster': {'pop': 3812, 'tracts': 2},
'Wheeler': {'pop': 818, 'tracts': 1},
'York': {'pop': 13665, 'tracts': 4}},
'NH': {'Belknap': {'pop': 60088, 'tracts': 15},
'Carroll': {'pop': 47818, 'tracts': 11},
'Cheshire': {'pop': 77117, 'tracts': 16},
'Coos': {'pop': 33055, 'tracts': 11},
'Grafton': {'pop': 89118, 'tracts': 19},
'Hillsborough': {'pop': 400721, 'tracts': 86},
'Merrimack': {'pop': 146445, 'tracts': 36},
'Rockingham': {'pop': 295223, 'tracts': 66},
'Strafford': {'pop': 123143, 'tracts': 25},
'Sullivan': {'pop': 43742, 'tracts': 10}},
'NJ': {'Atlantic': {'pop': 274549, 'tracts': 69},
'Bergen': {'pop': 905116, 'tracts': 179},
'Burlington': {'pop': 448734, 'tracts': 114},
'Camden': {'pop': 513657, 'tracts': 127},
'Cape May': {'pop': 97265, 'tracts': 32},
'Cumberland': {'pop': 156898, 'tracts': 35},
'Essex': {'pop': 783969, 'tracts': 210},
'Gloucester': {'pop': 288288, 'tracts': 63},
'Hudson': {'pop': 634266, 'tracts': 166},
'Hunterdon': {'pop': 128349, 'tracts': 26},
'Mercer': {'pop': 366513, 'tracts': 77},
'Middlesex': {'pop': 809858, 'tracts': 175},
'Monmouth': {'pop': 630380, 'tracts': 144},
'Morris': {'pop': 492276, 'tracts': 100},
'Ocean': {'pop': 576567, 'tracts': 126},
'Passaic': {'pop': 501226, 'tracts': 100},
'Salem': {'pop': 66083, 'tracts': 24},
'Somerset': {'pop': 323444, 'tracts': 68},
'Sussex': {'pop': 149265, 'tracts': 41},
'Union': {'pop': 536499, 'tracts': 108},
'Warren': {'pop': 108692, 'tracts': 23}},
'NM': {'Bernalillo': {'pop': 662564, 'tracts': 153},
'Catron': {'pop': 3725, 'tracts': 1},
'Chaves': {'pop': 65645, 'tracts': 16},
'Cibola': {'pop': 27213, 'tracts': 7},
'Colfax': {'pop': 13750, 'tracts': 3},
'Curry': {'pop': 48376, 'tracts': 12},
'De Baca': {'pop': 2022, 'tracts': 1},
'Dona Ana': {'pop': 209233, 'tracts': 41},
'Eddy': {'pop': 53829, 'tracts': 12},
'Grant': {'pop': 29514, 'tracts': 8},
'Guadalupe': {'pop': 4687, 'tracts': 1},
'Harding': {'pop': 695, 'tracts': 1},
'Hidalgo': {'pop': 4894, 'tracts': 2},
'Lea': {'pop': 64727, 'tracts': 18},
'Lincoln': {'pop': 20497, 'tracts': 5},
'Los Alamos': {'pop': 17950, 'tracts': 4},
'Luna': {'pop': 25095, 'tracts': 6},
'McKinley': {'pop': 71492, 'tracts': 17},
'Mora': {'pop': 4881, 'tracts': 1},
'Otero': {'pop': 63797, 'tracts': 16},
'Quay': {'pop': 9041, 'tracts': 3},
'Rio Arriba': {'pop': 40246, 'tracts': 9},
'Roosevelt': {'pop': 19846, 'tracts': 5},
'San Juan': {'pop': 130044, 'tracts': 33},
'San Miguel': {'pop': 29393, 'tracts': 7},
'Sandoval': {'pop': 131561, 'tracts': 28},
'Santa Fe': {'pop': 144170, 'tracts': 50},
'Sierra': {'pop': 11988, 'tracts': 4},
'Socorro': {'pop': 17866, 'tracts': 6},
'Taos': {'pop': 32937, 'tracts': 6},
'Torrance': {'pop': 16383, 'tracts': 4},
'Union': {'pop': 4549, 'tracts': 1},
'Valencia': {'pop': 76569, 'tracts': 18}},
'NV': {'Carson City': {'pop': 55274, 'tracts': 14},
'Churchill': {'pop': 24877, 'tracts': 7},
'Clark': {'pop': 1951269, 'tracts': 487},
'Douglas': {'pop': 46997, 'tracts': 17},
'Elko': {'pop': 48818, 'tracts': 14},
'Esmeralda': {'pop': 783, 'tracts': 1},
'Eureka': {'pop': 1987, 'tracts': 1},
'Humboldt': {'pop': 16528, 'tracts': 4},
'Lander': {'pop': 5775, 'tracts': 1},
'Lincoln': {'pop': 5345, 'tracts': 2},
'Lyon': {'pop': 51980, 'tracts': 10},
'Mineral': {'pop': 4772, 'tracts': 2},
'Nye': {'pop': 43946, 'tracts': 10},
'Pershing': {'pop': 6753, 'tracts': 1},
'Storey': {'pop': 4010, 'tracts': 1},
'Washoe': {'pop': 421407, 'tracts': 112},
'White Pine': {'pop': 10030, 'tracts': 3}},
'NY': {'Albany': {'pop': 304204, 'tracts': 75},
'Allegany': {'pop': 48946, 'tracts': 13},
'Bronx': {'pop': 1385108, 'tracts': 339},
'Broome': {'pop': 200600, 'tracts': 55},
'Cattaraugus': {'pop': 80317, 'tracts': 21},
'Cayuga': {'pop': 80026, 'tracts': 20},
'Chautauqua': {'pop': 134905, 'tracts': 35},
'Chemung': {'pop': 88830, 'tracts': 22},
'Chenango': {'pop': 50477, 'tracts': 12},
'Clinton': {'pop': 82128, 'tracts': 19},
'Columbia': {'pop': 63096, 'tracts': 21},
'Cortland': {'pop': 49336, 'tracts': 12},
'Delaware': {'pop': 47980, 'tracts': 14},
'Dutchess': {'pop': 297488, 'tracts': 79},
'Erie': {'pop': 919040, 'tracts': 237},
'Essex': {'pop': 39370, 'tracts': 13},
'Franklin': {'pop': 51599, 'tracts': 14},
'Fulton': {'pop': 55531, 'tracts': 15},
'Genesee': {'pop': 60079, 'tracts': 15},
'Greene': {'pop': 49221, 'tracts': 15},
'Hamilton': {'pop': 4836, 'tracts': 4},
'Herkimer': {'pop': 64519, 'tracts': 19},
'Jefferson': {'pop': 116229, 'tracts': 26},
'Kings': {'pop': 2504700, 'tracts': 760},
'Lewis': {'pop': 27087, 'tracts': 7},
'Livingston': {'pop': 65393, 'tracts': 15},
'Madison': {'pop': 73442, 'tracts': 16},
'Monroe': {'pop': 744344, 'tracts': 192},
'Montgomery': {'pop': 50219, 'tracts': 16},
'Nassau': {'pop': 1339532, 'tracts': 280},
'New York': {'pop': 1585873, 'tracts': 288},
'Niagara': {'pop': 216469, 'tracts': 61},
'Oneida': {'pop': 234878, 'tracts': 74},
'Onondaga': {'pop': 467026, 'tracts': 140},
'Ontario': {'pop': 107931, 'tracts': 25},
'Orange': {'pop': 372813, 'tracts': 79},
'Orleans': {'pop': 42883, 'tracts': 11},
'Oswego': {'pop': 122109, 'tracts': 29},
'Otsego': {'pop': 62259, 'tracts': 17},
'Putnam': {'pop': 99710, 'tracts': 19},
'Queens': {'pop': 2230722, 'tracts': 669},
'Rensselaer': {'pop': 159429, 'tracts': 42},
'Richmond': {'pop': 468730, 'tracts': 109},
'Rockland': {'pop': 311687, 'tracts': 65},
'Saratoga': {'pop': 219607, 'tracts': 50},
'Schenectady': {'pop': 154727, 'tracts': 43},
'Schoharie': {'pop': 32749, 'tracts': 8},
'Schuyler': {'pop': 18343, 'tracts': 5},
'Seneca': {'pop': 35251, 'tracts': 10},
'St. Lawrence': {'pop': 111944, 'tracts': 28},
'Steuben': {'pop': 98990, 'tracts': 30},
'Suffolk': {'pop': 1493350, 'tracts': 322},
'Sullivan': {'pop': 77547, 'tracts': 24},
'Tioga': {'pop': 51125, 'tracts': 10},
'Tompkins': {'pop': 101564, 'tracts': 23},
'Ulster': {'pop': 182493, 'tracts': 47},
'Warren': {'pop': 65707, 'tracts': 19},
'Washington': {'pop': 63216, 'tracts': 17},
'Wayne': {'pop': 93772, 'tracts': 23},
'Westchester': {'pop': 949113, 'tracts': 223},
'Wyoming': {'pop': 42155, 'tracts': 11},
'Yates': {'pop': 25348, 'tracts': 5}},
'OH': {'Adams': {'pop': 28550, 'tracts': 6},
'Allen': {'pop': 106331, 'tracts': 33},
'Ashland': {'pop': 53139, 'tracts': 11},
'Ashtabula': {'pop': 101497, 'tracts': 25},
'Athens': {'pop': 64757, 'tracts': 15},
'Auglaize': {'pop': 45949, 'tracts': 11},
'Belmont': {'pop': 70400, 'tracts': 20},
'Brown': {'pop': 44846, 'tracts': 9},
'Butler': {'pop': 368130, 'tracts': 80},
'Carroll': {'pop': 28836, 'tracts': 7},
'Champaign': {'pop': 40097, 'tracts': 10},
'Clark': {'pop': 138333, 'tracts': 44},
'Clermont': {'pop': 197363, 'tracts': 40},
'Clinton': {'pop': 42040, 'tracts': 9},
'Columbiana': {'pop': 107841, 'tracts': 24},
'Coshocton': {'pop': 36901, 'tracts': 10},
'Crawford': {'pop': 43784, 'tracts': 13},
'Cuyahoga': {'pop': 1280122, 'tracts': 447},
'Darke': {'pop': 52959, 'tracts': 12},
'Defiance': {'pop': 39037, 'tracts': 9},
'Delaware': {'pop': 174214, 'tracts': 35},
'Erie': {'pop': 77079, 'tracts': 19},
'Fairfield': {'pop': 146156, 'tracts': 28},
'Fayette': {'pop': 29030, 'tracts': 7},
'Franklin': {'pop': 1163414, 'tracts': 284},
'Fulton': {'pop': 42698, 'tracts': 9},
'Gallia': {'pop': 30934, 'tracts': 7},
'Geauga': {'pop': 93389, 'tracts': 21},
'Greene': {'pop': 161573, 'tracts': 35},
'Guernsey': {'pop': 40087, 'tracts': 10},
'Hamilton': {'pop': 802374, 'tracts': 222},
'Hancock': {'pop': 74782, 'tracts': 13},
'Hardin': {'pop': 32058, 'tracts': 7},
'Harrison': {'pop': 15864, 'tracts': 5},
'Henry': {'pop': 28215, 'tracts': 7},
'Highland': {'pop': 43589, 'tracts': 9},
'Hocking': {'pop': 29380, 'tracts': 7},
'Holmes': {'pop': 42366, 'tracts': 8},
'Huron': {'pop': 59626, 'tracts': 13},
'Jackson': {'pop': 33225, 'tracts': 7},
'Jefferson': {'pop': 69709, 'tracts': 23},
'Knox': {'pop': 60921, 'tracts': 12},
'Lake': {'pop': 230041, 'tracts': 59},
'Lawrence': {'pop': 62450, 'tracts': 16},
'Licking': {'pop': 166492, 'tracts': 32},
'Logan': {'pop': 45858, 'tracts': 11},
'Lorain': {'pop': 301356, 'tracts': 73},
'Lucas': {'pop': 441815, 'tracts': 127},
'Madison': {'pop': 43435, 'tracts': 12},
'Mahoning': {'pop': 238823, 'tracts': 70},
'Marion': {'pop': 66501, 'tracts': 18},
'Medina': {'pop': 172332, 'tracts': 37},
'Meigs': {'pop': 23770, 'tracts': 6},
'Mercer': {'pop': 40814, 'tracts': 9},
'Miami': {'pop': 102506, 'tracts': 21},
'Monroe': {'pop': 14642, 'tracts': 4},
'Montgomery': {'pop': 535153, 'tracts': 153},
'Morgan': {'pop': 15054, 'tracts': 4},
'Morrow': {'pop': 34827, 'tracts': 6},
'Muskingum': {'pop': 86074, 'tracts': 19},
'Noble': {'pop': 14645, 'tracts': 3},
'Ottawa': {'pop': 41428, 'tracts': 13},
'Paulding': {'pop': 19614, 'tracts': 5},
'Perry': {'pop': 36058, 'tracts': 6},
'Pickaway': {'pop': 55698, 'tracts': 13},
'Pike': {'pop': 28709, 'tracts': 6},
'Portage': {'pop': 161419, 'tracts': 35},
'Preble': {'pop': 42270, 'tracts': 12},
'Putnam': {'pop': 34499, 'tracts': 7},
'Richland': {'pop': 124475, 'tracts': 30},
'Ross': {'pop': 78064, 'tracts': 17},
'Sandusky': {'pop': 60944, 'tracts': 15},
'Scioto': {'pop': 79499, 'tracts': 20},
'Seneca': {'pop': 56745, 'tracts': 14},
'Shelby': {'pop': 49423, 'tracts': 10},
'Stark': {'pop': 375586, 'tracts': 86},
'Summit': {'pop': 541781, 'tracts': 135},
'Trumbull': {'pop': 210312, 'tracts': 55},
'Tuscarawas': {'pop': 92582, 'tracts': 21},
'Union': {'pop': 52300, 'tracts': 10},
'Van Wert': {'pop': 28744, 'tracts': 9},
'Vinton': {'pop': 13435, 'tracts': 3},
'Warren': {'pop': 212693, 'tracts': 33},
'Washington': {'pop': 61778, 'tracts': 16},
'Wayne': {'pop': 114520, 'tracts': 32},
'Williams': {'pop': 37642, 'tracts': 9},
'Wood': {'pop': 125488, 'tracts': 28},
'Wyandot': {'pop': 22615, 'tracts': 6}},
'OK': {'Adair': {'pop': 22683, 'tracts': 5},
'Alfalfa': {'pop': 5642, 'tracts': 3},
'Atoka': {'pop': 14182, 'tracts': 4},
'Beaver': {'pop': 5636, 'tracts': 3},
'Beckham': {'pop': 22119, 'tracts': 4},
'Blaine': {'pop': 11943, 'tracts': 5},
'Bryan': {'pop': 42416, 'tracts': 11},
'Caddo': {'pop': 29600, 'tracts': 8},
'Canadian': {'pop': 115541, 'tracts': 29},
'Carter': {'pop': 47557, 'tracts': 11},
'Cherokee': {'pop': 46987, 'tracts': 9},
'Choctaw': {'pop': 15205, 'tracts': 5},
'Cimarron': {'pop': 2475, 'tracts': 2},
'Cleveland': {'pop': 255755, 'tracts': 62},
'Coal': {'pop': 5925, 'tracts': 2},
'Comanche': {'pop': 124098, 'tracts': 32},
'Cotton': {'pop': 6193, 'tracts': 2},
'Craig': {'pop': 15029, 'tracts': 5},
'Creek': {'pop': 69967, 'tracts': 21},
'Custer': {'pop': 27469, 'tracts': 5},
'Delaware': {'pop': 41487, 'tracts': 9},
'Dewey': {'pop': 4810, 'tracts': 3},
'Ellis': {'pop': 4151, 'tracts': 2},
'Garfield': {'pop': 60580, 'tracts': 12},
'Garvin': {'pop': 27576, 'tracts': 9},
'Grady': {'pop': 52431, 'tracts': 10},
'Grant': {'pop': 4527, 'tracts': 2},
'Greer': {'pop': 6239, 'tracts': 2},
'Harmon': {'pop': 2922, 'tracts': 1},
'Harper': {'pop': 3685, 'tracts': 2},
'Haskell': {'pop': 12769, 'tracts': 4},
'Hughes': {'pop': 14003, 'tracts': 5},
'Jackson': {'pop': 26446, 'tracts': 8},
'Jefferson': {'pop': 6472, 'tracts': 3},
'Johnston': {'pop': 10957, 'tracts': 3},
'Kay': {'pop': 46562, 'tracts': 11},
'Kingfisher': {'pop': 15034, 'tracts': 4},
'Kiowa': {'pop': 9446, 'tracts': 3},
'Latimer': {'pop': 11154, 'tracts': 3},
'Le Flore': {'pop': 50384, 'tracts': 12},
'Lincoln': {'pop': 34273, 'tracts': 7},
'Logan': {'pop': 41848, 'tracts': 8},
'Love': {'pop': 9423, 'tracts': 3},
'Major': {'pop': 7527, 'tracts': 3},
'Marshall': {'pop': 15840, 'tracts': 4},
'Mayes': {'pop': 41259, 'tracts': 9},
'McClain': {'pop': 34506, 'tracts': 6},
'McCurtain': {'pop': 33151, 'tracts': 8},
'McIntosh': {'pop': 20252, 'tracts': 6},
'Murray': {'pop': 13488, 'tracts': 3},
'Muskogee': {'pop': 70990, 'tracts': 16},
'Noble': {'pop': 11561, 'tracts': 4},
'Nowata': {'pop': 10536, 'tracts': 4},
'Okfuskee': {'pop': 12191, 'tracts': 4},
'Oklahoma': {'pop': 718633, 'tracts': 241},
'Okmulgee': {'pop': 40069, 'tracts': 10},
'Osage': {'pop': 47472, 'tracts': 11},
'Ottawa': {'pop': 31848, 'tracts': 9},
'Pawnee': {'pop': 16577, 'tracts': 5},
'Payne': {'pop': 77350, 'tracts': 17},
'Pittsburg': {'pop': 45837, 'tracts': 13},
'Pontotoc': {'pop': 37492, 'tracts': 10},
'Pottawatomie': {'pop': 69442, 'tracts': 16},
'Pushmataha': {'pop': 11572, 'tracts': 3},
'Roger Mills': {'pop': 3647, 'tracts': 1},
'Rogers': {'pop': 86905, 'tracts': 28},
'Seminole': {'pop': 25482, 'tracts': 9},
'Sequoyah': {'pop': 42391, 'tracts': 9},
'Stephens': {'pop': 45048, 'tracts': 11},
'Texas': {'pop': 20640, 'tracts': 5},
'Tillman': {'pop': 7992, 'tracts': 5},
'Tulsa': {'pop': 603403, 'tracts': 175},
'Wagoner': {'pop': 73085, 'tracts': 22},
'Washington': {'pop': 50976, 'tracts': 13},
'Washita': {'pop': 11629, 'tracts': 4},
'Woods': {'pop': 8878, 'tracts': 3},
'Woodward': {'pop': 20081, 'tracts': 5}},
'OR': {'Baker': {'pop': 16134, 'tracts': 6},
'Benton': {'pop': 85579, 'tracts': 18},
'Clackamas': {'pop': 375992, 'tracts': 80},
'Clatsop': {'pop': 37039, 'tracts': 12},
'Columbia': {'pop': 49351, 'tracts': 10},
'Coos': {'pop': 63043, 'tracts': 13},
'Crook': {'pop': 20978, 'tracts': 4},
'Curry': {'pop': 22364, 'tracts': 6},
'Deschutes': {'pop': 157733, 'tracts': 24},
'Douglas': {'pop': 107667, 'tracts': 22},
'Gilliam': {'pop': 1871, 'tracts': 1},
'Grant': {'pop': 7445, 'tracts': 2},
'Harney': {'pop': 7422, 'tracts': 2},
'Hood River': {'pop': 22346, 'tracts': 4},
'Jackson': {'pop': 203206, 'tracts': 41},
'Jefferson': {'pop': 21720, 'tracts': 6},
'Josephine': {'pop': 82713, 'tracts': 16},
'Klamath': {'pop': 66380, 'tracts': 20},
'Lake': {'pop': 7895, 'tracts': 2},
'Lane': {'pop': 351715, 'tracts': 86},
'Lincoln': {'pop': 46034, 'tracts': 18},
'Linn': {'pop': 116672, 'tracts': 21},
'Malheur': {'pop': 31313, 'tracts': 8},
'Marion': {'pop': 315335, 'tracts': 58},
'Morrow': {'pop': 11173, 'tracts': 2},
'Multnomah': {'pop': 735334, 'tracts': 171},
'Polk': {'pop': 75403, 'tracts': 12},
'Sherman': {'pop': 1765, 'tracts': 1},
'Tillamook': {'pop': 25250, 'tracts': 8},
'Umatilla': {'pop': 75889, 'tracts': 15},
'Union': {'pop': 25748, 'tracts': 8},
'Wallowa': {'pop': 7008, 'tracts': 3},
'Wasco': {'pop': 25213, 'tracts': 8},
'Washington': {'pop': 529710, 'tracts': 104},
'Wheeler': {'pop': 1441, 'tracts': 1},
'Yamhill': {'pop': 99193, 'tracts': 17}},
'PA': {'Adams': {'pop': 101407, 'tracts': 23},
'Allegheny': {'pop': 1223348, 'tracts': 402},
'Armstrong': {'pop': 68941, 'tracts': 19},
'Beaver': {'pop': 170539, 'tracts': 51},
'Bedford': {'pop': 49762, 'tracts': 11},
'Berks': {'pop': 411442, 'tracts': 90},
'Blair': {'pop': 127089, 'tracts': 34},
'Bradford': {'pop': 62622, 'tracts': 14},
'Bucks': {'pop': 625249, 'tracts': 143},
'Butler': {'pop': 183862, 'tracts': 44},
'Cambria': {'pop': 143679, 'tracts': 42},
'Cameron': {'pop': 5085, 'tracts': 2},
'Carbon': {'pop': 65249, 'tracts': 12},
'Centre': {'pop': 153990, 'tracts': 31},
'Chester': {'pop': 498886, 'tracts': 116},
'Clarion': {'pop': 39988, 'tracts': 10},
'Clearfield': {'pop': 81642, 'tracts': 20},
'Clinton': {'pop': 39238, 'tracts': 9},
'Columbia': {'pop': 67295, 'tracts': 15},
'Crawford': {'pop': 88765, 'tracts': 23},
'Cumberland': {'pop': 235406, 'tracts': 49},
'Dauphin': {'pop': 268100, 'tracts': 65},
'Delaware': {'pop': 558979, 'tracts': 144},
'Elk': {'pop': 31946, 'tracts': 9},
'Erie': {'pop': 280566, 'tracts': 72},
'Fayette': {'pop': 136606, 'tracts': 36},
'Forest': {'pop': 7716, 'tracts': 3},
'Franklin': {'pop': 149618, 'tracts': 27},
'Fulton': {'pop': 14845, 'tracts': 3},
'Greene': {'pop': 38686, 'tracts': 9},
'Huntingdon': {'pop': 45913, 'tracts': 12},
'Indiana': {'pop': 88880, 'tracts': 23},
'Jefferson': {'pop': 45200, 'tracts': 13},
'Juniata': {'pop': 24636, 'tracts': 5},
'Lackawanna': {'pop': 214437, 'tracts': 59},
'Lancaster': {'pop': 519445, 'tracts': 98},
'Lawrence': {'pop': 91108, 'tracts': 28},
'Lebanon': {'pop': 133568, 'tracts': 31},
'Lehigh': {'pop': 349497, 'tracts': 76},
'Luzerne': {'pop': 320918, 'tracts': 104},
'Lycoming': {'pop': 116111, 'tracts': 29},
'McKean': {'pop': 43450, 'tracts': 12},
'Mercer': {'pop': 116638, 'tracts': 30},
'Mifflin': {'pop': 46682, 'tracts': 12},
'Monroe': {'pop': 169842, 'tracts': 33},
'Montgomery': {'pop': 799874, 'tracts': 211},
'Montour': {'pop': 18267, 'tracts': 4},
'Northampton': {'pop': 297735, 'tracts': 68},
'Northumberland': {'pop': 94528, 'tracts': 24},
'Perry': {'pop': 45969, 'tracts': 10},
'Philadelphia': {'pop': 1526006, 'tracts': 384},
'Pike': {'pop': 57369, 'tracts': 18},
'Potter': {'pop': 17457, 'tracts': 5},
'Schuylkill': {'pop': 148289, 'tracts': 40},
'Snyder': {'pop': 39702, 'tracts': 8},
'Somerset': {'pop': 77742, 'tracts': 21},
'Sullivan': {'pop': 6428, 'tracts': 2},
'Susquehanna': {'pop': 43356, 'tracts': 11},
'Tioga': {'pop': 41981, 'tracts': 10},
'Union': {'pop': 44947, 'tracts': 10},
'Venango': {'pop': 54984, 'tracts': 16},
'Warren': {'pop': 41815, 'tracts': 13},
'Washington': {'pop': 207820, 'tracts': 59},
'Wayne': {'pop': 52822, 'tracts': 14},
'Westmoreland': {'pop': 365169, 'tracts': 100},
'Wyoming': {'pop': 28276, 'tracts': 7},
'York': {'pop': 434972, 'tracts': 90}},
'RI': {'Bristol': {'pop': 49875, 'tracts': 11},
'Kent': {'pop': 166158, 'tracts': 39},
'Newport': {'pop': 82888, 'tracts': 22},
'Providence': {'pop': 626667, 'tracts': 141},
'Washington': {'pop': 126979, 'tracts': 29}},
'SC': {'Abbeville': {'pop': 25417, 'tracts': 6},
'Aiken': {'pop': 160099, 'tracts': 33},
'Allendale': {'pop': 10419, 'tracts': 3},
'Anderson': {'pop': 187126, 'tracts': 39},
'Bamberg': {'pop': 15987, 'tracts': 4},
'Barnwell': {'pop': 22621, 'tracts': 6},
'Beaufort': {'pop': 162233, 'tracts': 41},
'Berkeley': {'pop': 177843, 'tracts': 45},
'Calhoun': {'pop': 15175, 'tracts': 3},
'Charleston': {'pop': 350209, 'tracts': 86},
'Cherokee': {'pop': 55342, 'tracts': 13},
'Chester': {'pop': 33140, 'tracts': 11},
'Chesterfield': {'pop': 46734, 'tracts': 10},
'Clarendon': {'pop': 34971, 'tracts': 12},
'Colleton': {'pop': 38892, 'tracts': 10},
'Darlington': {'pop': 68681, 'tracts': 16},
'Dillon': {'pop': 32062, 'tracts': 6},
'Dorchester': {'pop': 136555, 'tracts': 25},
'Edgefield': {'pop': 26985, 'tracts': 6},
'Fairfield': {'pop': 23956, 'tracts': 5},
'Florence': {'pop': 136885, 'tracts': 33},
'Georgetown': {'pop': 60158, 'tracts': 15},
'Greenville': {'pop': 451225, 'tracts': 111},
'Greenwood': {'pop': 69661, 'tracts': 14},
'Hampton': {'pop': 21090, 'tracts': 5},
'Horry': {'pop': 269291, 'tracts': 72},
'Jasper': {'pop': 24777, 'tracts': 5},
'Kershaw': {'pop': 61697, 'tracts': 15},
'Lancaster': {'pop': 76652, 'tracts': 14},
'Laurens': {'pop': 66537, 'tracts': 17},
'Lee': {'pop': 19220, 'tracts': 7},
'Lexington': {'pop': 262391, 'tracts': 74},
'Marion': {'pop': 33062, 'tracts': 8},
'Marlboro': {'pop': 28933, 'tracts': 7},
'McCormick': {'pop': 10233, 'tracts': 3},
'Newberry': {'pop': 37508, 'tracts': 8},
'Oconee': {'pop': 74273, 'tracts': 15},
'Orangeburg': {'pop': 92501, 'tracts': 20},
'Pickens': {'pop': 119224, 'tracts': 28},
'Richland': {'pop': 384504, 'tracts': 89},
'Saluda': {'pop': 19875, 'tracts': 5},
'Spartanburg': {'pop': 284307, 'tracts': 69},
'Sumter': {'pop': 107456, 'tracts': 23},
'Union': {'pop': 28961, 'tracts': 9},
'Williamsburg': {'pop': 34423, 'tracts': 11},
'York': {'pop': 226073, 'tracts': 46}},
'SD': {'Aurora': {'pop': 2710, 'tracts': 1},
'Beadle': {'pop': 17398, 'tracts': 6},
'Bennett': {'pop': 3431, 'tracts': 2},
'Bon Homme': {'pop': 7070, 'tracts': 2},
'Brookings': {'pop': 31965, 'tracts': 6},
'Brown': {'pop': 36531, 'tracts': 8},
'Brule': {'pop': 5255, 'tracts': 2},
'Buffalo': {'pop': 1912, 'tracts': 1},
'Butte': {'pop': 10110, 'tracts': 2},
'Campbell': {'pop': 1466, 'tracts': 1},
'Charles Mix': {'pop': 9129, 'tracts': 3},
'Clark': {'pop': 3691, 'tracts': 1},
'Clay': {'pop': 13864, 'tracts': 3},
'Codington': {'pop': 27227, 'tracts': 7},
'Corson': {'pop': 4050, 'tracts': 2},
'Custer': {'pop': 8216, 'tracts': 2},
'Davison': {'pop': 19504, 'tracts': 4},
'Day': {'pop': 5710, 'tracts': 3},
'Deuel': {'pop': 4364, 'tracts': 2},
'Dewey': {'pop': 5301, 'tracts': 2},
'Douglas': {'pop': 3002, 'tracts': 1},
'Edmunds': {'pop': 4071, 'tracts': 2},
'Fall River': {'pop': 7094, 'tracts': 2},
'Faulk': {'pop': 2364, 'tracts': 1},
'Grant': {'pop': 7356, 'tracts': 2},
'Gregory': {'pop': 4271, 'tracts': 2},
'Haakon': {'pop': 1937, 'tracts': 1},
'Hamlin': {'pop': 5903, 'tracts': 2},
'Hand': {'pop': 3431, 'tracts': 2},
'Hanson': {'pop': 3331, 'tracts': 1},
'Harding': {'pop': 1255, 'tracts': 1},
'Hughes': {'pop': 17022, 'tracts': 4},
'Hutchinson': {'pop': 7343, 'tracts': 3},
'Hyde': {'pop': 1420, 'tracts': 1},
'Jackson': {'pop': 3031, 'tracts': 2},
'Jerauld': {'pop': 2071, 'tracts': 1},
'Jones': {'pop': 1006, 'tracts': 1},
'Kingsbury': {'pop': 5148, 'tracts': 2},
'Lake': {'pop': 11200, 'tracts': 3},
'Lawrence': {'pop': 24097, 'tracts': 5},
'Lincoln': {'pop': 44828, 'tracts': 11},
'Lyman': {'pop': 3755, 'tracts': 2},
'Marshall': {'pop': 4656, 'tracts': 1},
'McCook': {'pop': 5618, 'tracts': 2},
'McPherson': {'pop': 2459, 'tracts': 1},
'Meade': {'pop': 25434, 'tracts': 5},
'Mellette': {'pop': 2048, 'tracts': 1},
'Miner': {'pop': 2389, 'tracts': 1},
'Minnehaha': {'pop': 169468, 'tracts': 42},
'Moody': {'pop': 6486, 'tracts': 2},
'Pennington': {'pop': 100948, 'tracts': 23},
'Perkins': {'pop': 2982, 'tracts': 1},
'Potter': {'pop': 2329, 'tracts': 1},
'Roberts': {'pop': 10149, 'tracts': 4},
'Sanborn': {'pop': 2355, 'tracts': 1},
'Shannon': {'pop': 13586, 'tracts': 3},
'Spink': {'pop': 6415, 'tracts': 3},
'Stanley': {'pop': 2966, 'tracts': 1},
'Sully': {'pop': 1373, 'tracts': 1},
'Todd': {'pop': 9612, 'tracts': 2},
'Tripp': {'pop': 5644, 'tracts': 2},
'Turner': {'pop': 8347, 'tracts': 2},
'Union': {'pop': 14399, 'tracts': 3},
'Walworth': {'pop': 5438, 'tracts': 2},
'Yankton': {'pop': 22438, 'tracts': 5},
'Ziebach': {'pop': 2801, 'tracts': 1}},
'TN': {'Anderson': {'pop': 75129, 'tracts': 18},
'Bedford': {'pop': 45058, 'tracts': 9},
'Benton': {'pop': 16489, 'tracts': 5},
'Bledsoe': {'pop': 12876, 'tracts': 3},
'Blount': {'pop': 123010, 'tracts': 28},
'Bradley': {'pop': 98963, 'tracts': 19},
'Campbell': {'pop': 40716, 'tracts': 11},
'Cannon': {'pop': 13801, 'tracts': 3},
'Carroll': {'pop': 28522, 'tracts': 8},
'Carter': {'pop': 57424, 'tracts': 17},
'Cheatham': {'pop': 39105, 'tracts': 9},
'Chester': {'pop': 17131, 'tracts': 3},
'Claiborne': {'pop': 32213, 'tracts': 9},
'Clay': {'pop': 7861, 'tracts': 2},
'Cocke': {'pop': 35662, 'tracts': 9},
'Coffee': {'pop': 52796, 'tracts': 12},
'Crockett': {'pop': 14586, 'tracts': 5},
'Cumberland': {'pop': 56053, 'tracts': 14},
'Davidson': {'pop': 626681, 'tracts': 161},
'DeKalb': {'pop': 18723, 'tracts': 4},
'Decatur': {'pop': 11757, 'tracts': 4},
'Dickson': {'pop': 49666, 'tracts': 10},
'Dyer': {'pop': 38335, 'tracts': 8},
'Fayette': {'pop': 38413, 'tracts': 11},
'Fentress': {'pop': 17959, 'tracts': 4},
'Franklin': {'pop': 41052, 'tracts': 9},
'Gibson': {'pop': 49683, 'tracts': 14},
'Giles': {'pop': 29485, 'tracts': 8},
'Grainger': {'pop': 22657, 'tracts': 5},
'Greene': {'pop': 68831, 'tracts': 15},
'Grundy': {'pop': 13703, 'tracts': 4},
'Hamblen': {'pop': 62544, 'tracts': 12},
'Hamilton': {'pop': 336463, 'tracts': 82},
'Hancock': {'pop': 6819, 'tracts': 2},
'Hardeman': {'pop': 27253, 'tracts': 6},
'Hardin': {'pop': 26026, 'tracts': 6},
'Hawkins': {'pop': 56833, 'tracts': 13},
'Haywood': {'pop': 18787, 'tracts': 6},
'Henderson': {'pop': 27769, 'tracts': 6},
'Henry': {'pop': 32330, 'tracts': 9},
'Hickman': {'pop': 24690, 'tracts': 6},
'Houston': {'pop': 8426, 'tracts': 3},
'Humphreys': {'pop': 18538, 'tracts': 5},
'Jackson': {'pop': 11638, 'tracts': 4},
'Jefferson': {'pop': 51407, 'tracts': 9},
'Johnson': {'pop': 18244, 'tracts': 5},
'Knox': {'pop': 432226, 'tracts': 112},
'Lake': {'pop': 7832, 'tracts': 2},
'Lauderdale': {'pop': 27815, 'tracts': 9},
'Lawrence': {'pop': 41869, 'tracts': 11},
'Lewis': {'pop': 12161, 'tracts': 2},
'Lincoln': {'pop': 33361, 'tracts': 9},
'Loudon': {'pop': 48556, 'tracts': 10},
'Macon': {'pop': 22248, 'tracts': 4},
'Madison': {'pop': 98294, 'tracts': 27},
'Marion': {'pop': 28237, 'tracts': 6},
'Marshall': {'pop': 30617, 'tracts': 6},
'Maury': {'pop': 80956, 'tracts': 17},
'McMinn': {'pop': 52266, 'tracts': 10},
'McNairy': {'pop': 26075, 'tracts': 7},
'Meigs': {'pop': 11753, 'tracts': 3},
'Monroe': {'pop': 44519, 'tracts': 7},
'Montgomery': {'pop': 172331, 'tracts': 39},
'Moore': {'pop': 6362, 'tracts': 2},
'Morgan': {'pop': 21987, 'tracts': 5},
'Obion': {'pop': 31807, 'tracts': 10},
'Overton': {'pop': 22083, 'tracts': 7},
'Perry': {'pop': 7915, 'tracts': 2},
'Pickett': {'pop': 5077, 'tracts': 1},
'Polk': {'pop': 16825, 'tracts': 5},
'Putnam': {'pop': 72321, 'tracts': 15},
'Rhea': {'pop': 31809, 'tracts': 6},
'Roane': {'pop': 54181, 'tracts': 11},
'Robertson': {'pop': 66283, 'tracts': 14},
'Rutherford': {'pop': 262604, 'tracts': 49},
'Scott': {'pop': 22228, 'tracts': 5},
'Sequatchie': {'pop': 14112, 'tracts': 3},
'Sevier': {'pop': 89889, 'tracts': 18},
'Shelby': {'pop': 927644, 'tracts': 221},
'Smith': {'pop': 19166, 'tracts': 5},
'Stewart': {'pop': 13324, 'tracts': 5},
'Sullivan': {'pop': 156823, 'tracts': 39},
'Sumner': {'pop': 160645, 'tracts': 42},
'Tipton': {'pop': 61081, 'tracts': 13},
'Trousdale': {'pop': 7870, 'tracts': 2},
'Unicoi': {'pop': 18313, 'tracts': 4},
'Union': {'pop': 19109, 'tracts': 4},
'Van Buren': {'pop': 5548, 'tracts': 2},
'Warren': {'pop': 39839, 'tracts': 9},
'Washington': {'pop': 122979, 'tracts': 23},
'Wayne': {'pop': 17021, 'tracts': 4},
'Weakley': {'pop': 35021, 'tracts': 11},
'White': {'pop': 25841, 'tracts': 6},
'Williamson': {'pop': 183182, 'tracts': 37},
'Wilson': {'pop': 113993, 'tracts': 21}},
'TX': {'Anderson': {'pop': 58458, 'tracts': 11},
'Andrews': {'pop': 14786, 'tracts': 4},
'Angelina': {'pop': 86771, 'tracts': 17},
'Aransas': {'pop': 23158, 'tracts': 5},
'Archer': {'pop': 9054, 'tracts': 3},
'Armstrong': {'pop': 1901, 'tracts': 1},
'Atascosa': {'pop': 44911, 'tracts': 8},
'Austin': {'pop': 28417, 'tracts': 6},
'Bailey': {'pop': 7165, 'tracts': 1},
'Bandera': {'pop': 20485, 'tracts': 5},
'Bastrop': {'pop': 74171, 'tracts': 10},
'Baylor': {'pop': 3726, 'tracts': 1},
'Bee': {'pop': 31861, 'tracts': 7},
'Bell': {'pop': 310235, 'tracts': 65},
'Bexar': {'pop': 1714773, 'tracts': 366},
'Blanco': {'pop': 10497, 'tracts': 2},
'Borden': {'pop': 641, 'tracts': 1},
'Bosque': {'pop': 18212, 'tracts': 7},
'Bowie': {'pop': 92565, 'tracts': 18},
'Brazoria': {'pop': 313166, 'tracts': 51},
'Brazos': {'pop': 194851, 'tracts': 42},
'Brewster': {'pop': 9232, 'tracts': 3},
'Briscoe': {'pop': 1637, 'tracts': 1},
'Brooks': {'pop': 7223, 'tracts': 2},
'Brown': {'pop': 38106, 'tracts': 12},
'Burleson': {'pop': 17187, 'tracts': 5},
'Burnet': {'pop': 42750, 'tracts': 8},
'Caldwell': {'pop': 38066, 'tracts': 8},
'Calhoun': {'pop': 21381, 'tracts': 6},
'Callahan': {'pop': 13544, 'tracts': 3},
'Cameron': {'pop': 406220, 'tracts': 86},
'Camp': {'pop': 12401, 'tracts': 3},
'Carson': {'pop': 6182, 'tracts': 2},
'Cass': {'pop': 30464, 'tracts': 7},
'Castro': {'pop': 8062, 'tracts': 3},
'Chambers': {'pop': 35096, 'tracts': 6},
'Cherokee': {'pop': 50845, 'tracts': 12},
'Childress': {'pop': 7041, 'tracts': 2},
'Clay': {'pop': 10752, 'tracts': 3},
'Cochran': {'pop': 3127, 'tracts': 1},
'Coke': {'pop': 3320, 'tracts': 2},
'Coleman': {'pop': 8895, 'tracts': 3},
'Collin': {'pop': 782341, 'tracts': 152},
'Collingsworth': {'pop': 3057, 'tracts': 1},
'Colorado': {'pop': 20874, 'tracts': 5},
'Comal': {'pop': 108472, 'tracts': 24},
'Comanche': {'pop': 13974, 'tracts': 4},
'Concho': {'pop': 4087, 'tracts': 1},
'Cooke': {'pop': 38437, 'tracts': 8},
'Coryell': {'pop': 75388, 'tracts': 19},
'Cottle': {'pop': 1505, 'tracts': 1},
'Crane': {'pop': 4375, 'tracts': 1},
'Crockett': {'pop': 3719, 'tracts': 1},
'Crosby': {'pop': 6059, 'tracts': 3},
'Culberson': {'pop': 2398, 'tracts': 1},
'Dallam': {'pop': 6703, 'tracts': 2},
'Dallas': {'pop': 2368139, 'tracts': 529},
'Dawson': {'pop': 13833, 'tracts': 4},
'DeWitt': {'pop': 20097, 'tracts': 5},
'Deaf Smith': {'pop': 19372, 'tracts': 4},
'Delta': {'pop': 5231, 'tracts': 2},
'Denton': {'pop': 662614, 'tracts': 137},
'Dickens': {'pop': 2444, 'tracts': 1},
'Dimmit': {'pop': 9996, 'tracts': 2},
'Donley': {'pop': 3677, 'tracts': 2},
'Duval': {'pop': 11782, 'tracts': 3},
'Eastland': {'pop': 18583, 'tracts': 5},
'Ector': {'pop': 137130, 'tracts': 28},
'Edwards': {'pop': 2002, 'tracts': 1},
'El Paso': {'pop': 800647, 'tracts': 161},
'Ellis': {'pop': 149610, 'tracts': 31},
'Erath': {'pop': 37890, 'tracts': 8},
'Falls': {'pop': 17866, 'tracts': 6},
'Fannin': {'pop': 33915, 'tracts': 9},
'Fayette': {'pop': 24554, 'tracts': 7},
'Fisher': {'pop': 3974, 'tracts': 2},
'Floyd': {'pop': 6446, 'tracts': 2},
'Foard': {'pop': 1336, 'tracts': 1},
'Fort Bend': {'pop': 585375, 'tracts': 76},
'Franklin': {'pop': 10605, 'tracts': 3},
'Freestone': {'pop': 19816, 'tracts': 7},
'Frio': {'pop': 17217, 'tracts': 3},
'Gaines': {'pop': 17526, 'tracts': 3},
'Galveston': {'pop': 291309, 'tracts': 67},
'Garza': {'pop': 6461, 'tracts': 1},
'Gillespie': {'pop': 24837, 'tracts': 5},
'Glasscock': {'pop': 1226, 'tracts': 1},
'Goliad': {'pop': 7210, 'tracts': 2},
'Gonzales': {'pop': 19807, 'tracts': 6},
'Gray': {'pop': 22535, 'tracts': 7},
'Grayson': {'pop': 120877, 'tracts': 26},
'Gregg': {'pop': 121730, 'tracts': 25},
'Grimes': {'pop': 26604, 'tracts': 6},
'Guadalupe': {'pop': 131533, 'tracts': 29},
'Hale': {'pop': 36273, 'tracts': 9},
'Hall': {'pop': 3353, 'tracts': 1},
'Hamilton': {'pop': 8517, 'tracts': 3},
'Hansford': {'pop': 5613, 'tracts': 2},
'Hardeman': {'pop': 4139, 'tracts': 1},
'Hardin': {'pop': 54635, 'tracts': 11},
'Harris': {'pop': 4092459, 'tracts': 786},
'Harrison': {'pop': 65631, 'tracts': 14},
'Hartley': {'pop': 6062, 'tracts': 1},
'Haskell': {'pop': 5899, 'tracts': 2},
'Hays': {'pop': 157107, 'tracts': 25},
'Hemphill': {'pop': 3807, 'tracts': 1},
'Henderson': {'pop': 78532, 'tracts': 17},
'Hidalgo': {'pop': 774769, 'tracts': 113},
'Hill': {'pop': 35089, 'tracts': 11},
'Hockley': {'pop': 22935, 'tracts': 7},
'Hood': {'pop': 51182, 'tracts': 10},
'Hopkins': {'pop': 35161, 'tracts': 9},
'Houston': {'pop': 23732, 'tracts': 7},
'Howard': {'pop': 35012, 'tracts': 10},
'Hudspeth': {'pop': 3476, 'tracts': 1},
'Hunt': {'pop': 86129, 'tracts': 19},
'Hutchinson': {'pop': 22150, 'tracts': 7},
'Irion': {'pop': 1599, 'tracts': 1},
'Jack': {'pop': 9044, 'tracts': 3},
'Jackson': {'pop': 14075, 'tracts': 3},
'Jasper': {'pop': 35710, 'tracts': 8},
'Jeff Davis': {'pop': 2342, 'tracts': 1},
'Jefferson': {'pop': 252273, 'tracts': 72},
'Jim Hogg': {'pop': 5300, 'tracts': 2},
'Jim Wells': {'pop': 40838, 'tracts': 7},
'Johnson': {'pop': 150934, 'tracts': 28},
'Jones': {'pop': 20202, 'tracts': 6},
'Karnes': {'pop': 14824, 'tracts': 4},
'Kaufman': {'pop': 103350, 'tracts': 18},
'Kendall': {'pop': 33410, 'tracts': 6},
'Kenedy': {'pop': 416, 'tracts': 1},
'Kent': {'pop': 808, 'tracts': 1},
'Kerr': {'pop': 49625, 'tracts': 10},
'Kimble': {'pop': 4607, 'tracts': 2},
'King': {'pop': 286, 'tracts': 1},
'Kinney': {'pop': 3598, 'tracts': 1},
'Kleberg': {'pop': 32061, 'tracts': 6},
'Knox': {'pop': 3719, 'tracts': 2},
'La Salle': {'pop': 6886, 'tracts': 1},
'Lamar': {'pop': 49793, 'tracts': 12},
'Lamb': {'pop': 13977, 'tracts': 5},
'Lampasas': {'pop': 19677, 'tracts': 5},
'Lavaca': {'pop': 19263, 'tracts': 6},
'Lee': {'pop': 16612, 'tracts': 4},
'Leon': {'pop': 16801, 'tracts': 3},
'Liberty': {'pop': 75643, 'tracts': 14},
'Limestone': {'pop': 23384, 'tracts': 8},
'Lipscomb': {'pop': 3302, 'tracts': 2},
'Live Oak': {'pop': 11531, 'tracts': 4},
'Llano': {'pop': 19301, 'tracts': 6},
'Loving': {'pop': 82, 'tracts': 1},
'Lubbock': {'pop': 278831, 'tracts': 68},
'Lynn': {'pop': 5915, 'tracts': 3},
'Madison': {'pop': 13664, 'tracts': 4},
'Marion': {'pop': 10546, 'tracts': 4},
'Martin': {'pop': 4799, 'tracts': 2},
'Mason': {'pop': 4012, 'tracts': 2},
'Matagorda': {'pop': 36702, 'tracts': 10},
'Maverick': {'pop': 54258, 'tracts': 9},
'McCulloch': {'pop': 8283, 'tracts': 3},
'McLennan': {'pop': 234906, 'tracts': 51},
'McMullen': {'pop': 707, 'tracts': 1},
'Medina': {'pop': 46006, 'tracts': 8},
'Menard': {'pop': 2242, 'tracts': 1},
'Midland': {'pop': 136872, 'tracts': 27},
'Milam': {'pop': 24757, 'tracts': 7},
'Mills': {'pop': 4936, 'tracts': 2},
'Mitchell': {'pop': 9403, 'tracts': 2},
'Montague': {'pop': 19719, 'tracts': 6},
'Montgomery': {'pop': 455746, 'tracts': 59},
'Moore': {'pop': 21904, 'tracts': 4},
'Morris': {'pop': 12934, 'tracts': 3},
'Motley': {'pop': 1210, 'tracts': 1},
'Nacogdoches': {'pop': 64524, 'tracts': 13},
'Navarro': {'pop': 47735, 'tracts': 10},
'Newton': {'pop': 14445, 'tracts': 4},
'Nolan': {'pop': 15216, 'tracts': 5},
'Nueces': {'pop': 340223, 'tracts': 81},
'Ochiltree': {'pop': 10223, 'tracts': 3},
'Oldham': {'pop': 2052, 'tracts': 1},
'Orange': {'pop': 81837, 'tracts': 21},
'Palo Pinto': {'pop': 28111, 'tracts': 9},
'Panola': {'pop': 23796, 'tracts': 6},
'Parker': {'pop': 116927, 'tracts': 19},
'Parmer': {'pop': 10269, 'tracts': 2},
'Pecos': {'pop': 15507, 'tracts': 4},
'Polk': {'pop': 45413, 'tracts': 10},
'Potter': {'pop': 121073, 'tracts': 34},
'Presidio': {'pop': 7818, 'tracts': 2},
'Rains': {'pop': 10914, 'tracts': 2},
'Randall': {'pop': 120725, 'tracts': 29},
'Reagan': {'pop': 3367, 'tracts': 1},
'Real': {'pop': 3309, 'tracts': 1},
'Red River': {'pop': 12860, 'tracts': 4},
'Reeves': {'pop': 13783, 'tracts': 5},
'Refugio': {'pop': 7383, 'tracts': 2},
'Roberts': {'pop': 929, 'tracts': 1},
'Robertson': {'pop': 16622, 'tracts': 5},
'Rockwall': {'pop': 78337, 'tracts': 11},
'Runnels': {'pop': 10501, 'tracts': 4},
'Rusk': {'pop': 53330, 'tracts': 13},
'Sabine': {'pop': 10834, 'tracts': 3},
'San Augustine': {'pop': 8865, 'tracts': 3},
'San Jacinto': {'pop': 26384, 'tracts': 4},
'San Patricio': {'pop': 64804, 'tracts': 16},
'San Saba': {'pop': 6131, 'tracts': 2},
'Schleicher': {'pop': 3461, 'tracts': 1},
'Scurry': {'pop': 16921, 'tracts': 4},
'Shackelford': {'pop': 3378, 'tracts': 1},
'Shelby': {'pop': 25448, 'tracts': 6},
'Sherman': {'pop': 3034, 'tracts': 1},
'Smith': {'pop': 209714, 'tracts': 41},
'Somervell': {'pop': 8490, 'tracts': 2},
'Starr': {'pop': 60968, 'tracts': 15},
'Stephens': {'pop': 9630, 'tracts': 3},
'Sterling': {'pop': 1143, 'tracts': 1},
'Stonewall': {'pop': 1490, 'tracts': 1},
'Sutton': {'pop': 4128, 'tracts': 1},
'Swisher': {'pop': 7854, 'tracts': 3},
'Tarrant': {'pop': 1809034, 'tracts': 357},
'Taylor': {'pop': 131506, 'tracts': 38},
'Terrell': {'pop': 984, 'tracts': 1},
'Terry': {'pop': 12651, 'tracts': 3},
'Throckmorton': {'pop': 1641, 'tracts': 1},
'Titus': {'pop': 32334, 'tracts': 8},
'Tom Green': {'pop': 110224, 'tracts': 25},
'Travis': {'pop': 1024266, 'tracts': 218},
'Trinity': {'pop': 14585, 'tracts': 5},
'Tyler': {'pop': 21766, 'tracts': 5},
'Upshur': {'pop': 39309, 'tracts': 7},
'Upton': {'pop': 3355, 'tracts': 2},
'Uvalde': {'pop': 26405, 'tracts': 5},
'Val Verde': {'pop': 48879, 'tracts': 10},
'Van Zandt': {'pop': 52579, 'tracts': 10},
'Victoria': {'pop': 86793, 'tracts': 23},
'Walker': {'pop': 67861, 'tracts': 10},
'Waller': {'pop': 43205, 'tracts': 6},
'Ward': {'pop': 10658, 'tracts': 3},
'Washington': {'pop': 33718, 'tracts': 6},
'Webb': {'pop': 250304, 'tracts': 61},
'Wharton': {'pop': 41280, 'tracts': 11},
'Wheeler': {'pop': 5410, 'tracts': 2},
'Wichita': {'pop': 131500, 'tracts': 37},
'Wilbarger': {'pop': 13535, 'tracts': 4},
'Willacy': {'pop': 22134, 'tracts': 6},
'Williamson': {'pop': 422679, 'tracts': 89},
'Wilson': {'pop': 42918, 'tracts': 11},
'Winkler': {'pop': 7110, 'tracts': 3},
'Wise': {'pop': 59127, 'tracts': 11},
'Wood': {'pop': 41964, 'tracts': 10},
'Yoakum': {'pop': 7879, 'tracts': 2},
'Young': {'pop': 18550, 'tracts': 4},
'Zapata': {'pop': 14018, 'tracts': 3},
'Zavala': {'pop': 11677, 'tracts': 4}},
'UT': {'Beaver': {'pop': 6629, 'tracts': 2},
'Box Elder': {'pop': 49975, 'tracts': 11},
'Cache': {'pop': 112656, 'tracts': 26},
'Carbon': {'pop': 21403, 'tracts': 5},
'Daggett': {'pop': 1059, 'tracts': 1},
'Davis': {'pop': 306479, 'tracts': 54},
'Duchesne': {'pop': 18607, 'tracts': 3},
'Emery': {'pop': 10976, 'tracts': 3},
'Garfield': {'pop': 5172, 'tracts': 2},
'Grand': {'pop': 9225, 'tracts': 2},
'Iron': {'pop': 46163, 'tracts': 8},
'Juab': {'pop': 10246, 'tracts': 2},
'Kane': {'pop': 7125, 'tracts': 2},
'Millard': {'pop': 12503, 'tracts': 3},
'Morgan': {'pop': 9469, 'tracts': 2},
'Piute': {'pop': 1556, 'tracts': 1},
'Rich': {'pop': 2264, 'tracts': 1},
'Salt Lake': {'pop': 1029655, 'tracts': 212},
'San Juan': {'pop': 14746, 'tracts': 4},
'Sanpete': {'pop': 27822, 'tracts': 5},
'Sevier': {'pop': 20802, 'tracts': 5},
'Summit': {'pop': 36324, 'tracts': 13},
'Tooele': {'pop': 58218, 'tracts': 11},
'Uintah': {'pop': 32588, 'tracts': 6},
'Utah': {'pop': 516564, 'tracts': 128},
'Wasatch': {'pop': 23530, 'tracts': 4},
'Washington': {'pop': 138115, 'tracts': 21},
'Wayne': {'pop': 2778, 'tracts': 1},
'Weber': {'pop': 231236, 'tracts': 50}},
'VA': {'Accomack': {'pop': 33164, 'tracts': 11},
'Albemarle': {'pop': 98970, 'tracts': 22},
'Alexandria': {'pop': 139966, 'tracts': 38},
'Alleghany': {'pop': 16250, 'tracts': 6},
'Amelia': {'pop': 12690, 'tracts': 2},
'Amherst': {'pop': 32353, 'tracts': 9},
'Appomattox': {'pop': 14973, 'tracts': 3},
'Arlington': {'pop': 207627, 'tracts': 59},
'Augusta': {'pop': 73750, 'tracts': 13},
'Bath': {'pop': 4731, 'tracts': 1},
'Bedford': {'pop': 68676, 'tracts': 16},
'Bedford City': {'pop': 6222, 'tracts': 1},
'Bland': {'pop': 6824, 'tracts': 2},
'Botetourt': {'pop': 33148, 'tracts': 8},
'Bristol': {'pop': 17835, 'tracts': 4},
'Brunswick': {'pop': 17434, 'tracts': 5},
'Buchanan': {'pop': 24098, 'tracts': 7},
'Buckingham': {'pop': 17146, 'tracts': 4},
'Buena Vista': {'pop': 6650, 'tracts': 1},
'Campbell': {'pop': 54842, 'tracts': 12},
'Caroline': {'pop': 28545, 'tracts': 7},
'Carroll': {'pop': 30042, 'tracts': 7},
'Charles City': {'pop': 7256, 'tracts': 3},
'Charlotte': {'pop': 12586, 'tracts': 3},
'Charlottesville': {'pop': 43475, 'tracts': 12},
'Chesapeake': {'pop': 222209, 'tracts': 41},
'Chesterfield': {'pop': 316236, 'tracts': 71},
'Clarke': {'pop': 14034, 'tracts': 3},
'Colonial Heights': {'pop': 17411, 'tracts': 5},
'Covington': {'pop': 5961, 'tracts': 2},
'Craig': {'pop': 5190, 'tracts': 1},
'Culpeper': {'pop': 46689, 'tracts': 8},
'Cumberland': {'pop': 10052, 'tracts': 2},
'Danville': {'pop': 43055, 'tracts': 16},
'Dickenson': {'pop': 15903, 'tracts': 4},
'Dinwiddie': {'pop': 28001, 'tracts': 7},
'Emporia': {'pop': 5927, 'tracts': 2},
'Essex': {'pop': 11151, 'tracts': 3},
'Fairfax': {'pop': 1081726, 'tracts': 258},
'Fairfax City': {'pop': 22565, 'tracts': 5},
'Falls Church': {'pop': 12332, 'tracts': 3},
'Fauquier': {'pop': 65203, 'tracts': 17},
'Floyd': {'pop': 15279, 'tracts': 3},
'Fluvanna': {'pop': 25691, 'tracts': 4},
'Franklin': {'pop': 56159, 'tracts': 10},
'Franklin City': {'pop': 8582, 'tracts': 2},
'Frederick': {'pop': 78305, 'tracts': 14},
'Fredericksburg': {'pop': 24286, 'tracts': 6},
'Galax': {'pop': 7042, 'tracts': 2},
'Giles': {'pop': 17286, 'tracts': 4},
'Gloucester': {'pop': 36858, 'tracts': 8},
'Goochland': {'pop': 21717, 'tracts': 5},
'Grayson': {'pop': 15533, 'tracts': 5},
'Greene': {'pop': 18403, 'tracts': 3},
'Greensville': {'pop': 12243, 'tracts': 3},
'Halifax': {'pop': 36241, 'tracts': 9},
'Hampton': {'pop': 137436, 'tracts': 34},
'Hanover': {'pop': 99863, 'tracts': 23},
'Harrisonburg': {'pop': 48914, 'tracts': 11},
'Henrico': {'pop': 306935, 'tracts': 64},
'Henry': {'pop': 54151, 'tracts': 14},
'Highland': {'pop': 2321, 'tracts': 1},
'Hopewell': {'pop': 22591, 'tracts': 7},
'Isle of Wight': {'pop': 35270, 'tracts': 8},
'James City': {'pop': 67009, 'tracts': 11},
'King George': {'pop': 23584, 'tracts': 5},
'King William': {'pop': 15935, 'tracts': 4},
'King and Queen': {'pop': 6945, 'tracts': 2},
'Lancaster': {'pop': 11391, 'tracts': 3},
'Lee': {'pop': 25587, 'tracts': 6},
'Lexington': {'pop': 7042, 'tracts': 1},
'Loudoun': {'pop': 312311, 'tracts': 65},
'Louisa': {'pop': 33153, 'tracts': 6},
'Lunenburg': {'pop': 12914, 'tracts': 3},
'Lynchburg': {'pop': 75568, 'tracts': 19},
'Madison': {'pop': 13308, 'tracts': 2},
'Manassas': {'pop': 37821, 'tracts': 7},
'Manassas Park': {'pop': 14273, 'tracts': 2},
'Martinsville': {'pop': 13821, 'tracts': 5},
'Mathews': {'pop': 8978, 'tracts': 2},
'Mecklenburg': {'pop': 32727, 'tracts': 9},
'Middlesex': {'pop': 10959, 'tracts': 4},
'Montgomery': {'pop': 94392, 'tracts': 16},
'Nelson': {'pop': 15020, 'tracts': 3},
'New Kent': {'pop': 18429, 'tracts': 3},
'Newport News': {'pop': 180719, 'tracts': 44},
'Norfolk': {'pop': 242803, 'tracts': 81},
'Northampton': {'pop': 12389, 'tracts': 4},
'Northumberland': {'pop': 12330, 'tracts': 3},
'Norton': {'pop': 3958, 'tracts': 1},
'Nottoway': {'pop': 15853, 'tracts': 4},
'Orange': {'pop': 33481, 'tracts': 5},
'Page': {'pop': 24042, 'tracts': 5},
'Patrick': {'pop': 18490, 'tracts': 4},
'Petersburg': {'pop': 32420, 'tracts': 11},
'Pittsylvania': {'pop': 63506, 'tracts': 16},
'Poquoson': {'pop': 12150, 'tracts': 3},
'Portsmouth': {'pop': 95535, 'tracts': 31},
'Powhatan': {'pop': 28046, 'tracts': 5},
'Prince Edward': {'pop': 23368, 'tracts': 5},
'Prince George': {'pop': 35725, 'tracts': 7},
'Prince William': {'pop': 402002, 'tracts': 83},
'Pulaski': {'pop': 34872, 'tracts': 10},
'Radford': {'pop': 16408, 'tracts': 3},
'Rappahannock': {'pop': 7373, 'tracts': 2},
'Richmond': {'pop': 9254, 'tracts': 2},
'Richmond City': {'pop': 204214, 'tracts': 66},
'Roanoke': {'pop': 92376, 'tracts': 18},
'Roanoke City': {'pop': 97032, 'tracts': 23},
'Rockbridge': {'pop': 22307, 'tracts': 4},
'Rockingham': {'pop': 76314, 'tracts': 19},
'Russell': {'pop': 28897, 'tracts': 7},
'Salem': {'pop': 24802, 'tracts': 5},
'Scott': {'pop': 23177, 'tracts': 6},
'Shenandoah': {'pop': 41993, 'tracts': 9},
'Smyth': {'pop': 32208, 'tracts': 9},
'Southampton': {'pop': 18570, 'tracts': 5},
'Spotsylvania': {'pop': 122397, 'tracts': 30},
'Stafford': {'pop': 128961, 'tracts': 27},
'Staunton': {'pop': 23746, 'tracts': 6},
'Suffolk': {'pop': 84585, 'tracts': 28},
'Surry': {'pop': 7058, 'tracts': 2},
'Sussex': {'pop': 12087, 'tracts': 5},
'Tazewell': {'pop': 45078, 'tracts': 11},
'Virginia Beach': {'pop': 437994, 'tracts': 100},
'Warren': {'pop': 37575, 'tracts': 8},
'Washington': {'pop': 54876, 'tracts': 13},
'Waynesboro': {'pop': 21006, 'tracts': 5},
'Westmoreland': {'pop': 17454, 'tracts': 4},
'Williamsburg': {'pop': 14068, 'tracts': 3},
'Winchester': {'pop': 26203, 'tracts': 5},
'Wise': {'pop': 41452, 'tracts': 11},
'Wythe': {'pop': 29235, 'tracts': 6},
'York': {'pop': 65464, 'tracts': 14}},
'VT': {'Addison': {'pop': 36821, 'tracts': 10},
'Bennington': {'pop': 37125, 'tracts': 12},
'Caledonia': {'pop': 31227, 'tracts': 10},
'Chittenden': {'pop': 156545, 'tracts': 35},
'Essex': {'pop': 6306, 'tracts': 3},
'Franklin': {'pop': 47746, 'tracts': 10},
'Grand Isle': {'pop': 6970, 'tracts': 2},
'Lamoille': {'pop': 24475, 'tracts': 7},
'Orange': {'pop': 28936, 'tracts': 10},
'Orleans': {'pop': 27231, 'tracts': 10},
'Rutland': {'pop': 61642, 'tracts': 20},
'Washington': {'pop': 59534, 'tracts': 19},
'Windham': {'pop': 44513, 'tracts': 18},
'Windsor': {'pop': 56670, 'tracts': 18}},
'WA': {'Adams': {'pop': 18728, 'tracts': 5},
'Asotin': {'pop': 21623, 'tracts': 6},
'Benton': {'pop': 175177, 'tracts': 37},
'Chelan': {'pop': 72453, 'tracts': 14},
'Clallam': {'pop': 71404, 'tracts': 22},
'Clark': {'pop': 425363, 'tracts': 104},
'Columbia': {'pop': 4078, 'tracts': 1},
'Cowlitz': {'pop': 102410, 'tracts': 24},
'Douglas': {'pop': 38431, 'tracts': 8},
'Ferry': {'pop': 7551, 'tracts': 3},
'Franklin': {'pop': 78163, 'tracts': 13},
'Garfield': {'pop': 2266, 'tracts': 1},
'Grant': {'pop': 89120, 'tracts': 16},
'Grays Harbor': {'pop': 72797, 'tracts': 17},
'Island': {'pop': 78506, 'tracts': 22},
'Jefferson': {'pop': 29872, 'tracts': 7},
'King': {'pop': 1931249, 'tracts': 397},
'Kitsap': {'pop': 251133, 'tracts': 55},
'Kittitas': {'pop': 40915, 'tracts': 8},
'Klickitat': {'pop': 20318, 'tracts': 3},
'Lewis': {'pop': 75455, 'tracts': 20},
'Lincoln': {'pop': 10570, 'tracts': 4},
'Mason': {'pop': 60699, 'tracts': 14},
'Okanogan': {'pop': 41120, 'tracts': 10},
'Pacific': {'pop': 20920, 'tracts': 8},
'Pend Oreille': {'pop': 13001, 'tracts': 5},
'Pierce': {'pop': 795225, 'tracts': 172},
'San Juan': {'pop': 15769, 'tracts': 5},
'Skagit': {'pop': 116901, 'tracts': 30},
'Skamania': {'pop': 11066, 'tracts': 5},
'Snohomish': {'pop': 713335, 'tracts': 151},
'Spokane': {'pop': 471221, 'tracts': 105},
'Stevens': {'pop': 43531, 'tracts': 12},
'Thurston': {'pop': 252264, 'tracts': 49},
'Wahkiakum': {'pop': 3978, 'tracts': 1},
'Walla Walla': {'pop': 58781, 'tracts': 12},
'Whatcom': {'pop': 201140, 'tracts': 34},
'Whitman': {'pop': 44776, 'tracts': 10},
'Yakima': {'pop': 243231, 'tracts': 45}},
'WI': {'Adams': {'pop': 20875, 'tracts': 7},
'Ashland': {'pop': 16157, 'tracts': 7},
'Barron': {'pop': 45870, 'tracts': 10},
'Bayfield': {'pop': 15014, 'tracts': 5},
'Brown': {'pop': 248007, 'tracts': 54},
'Buffalo': {'pop': 13587, 'tracts': 5},
'Burnett': {'pop': 15457, 'tracts': 6},
'Calumet': {'pop': 48971, 'tracts': 11},
'Chippewa': {'pop': 62415, 'tracts': 11},
'Clark': {'pop': 34690, 'tracts': 8},
'Columbia': {'pop': 56833, 'tracts': 12},
'Crawford': {'pop': 16644, 'tracts': 6},
'Dane': {'pop': 488073, 'tracts': 107},
'Dodge': {'pop': 88759, 'tracts': 20},
'Door': {'pop': 27785, 'tracts': 9},
'Douglas': {'pop': 44159, 'tracts': 12},
'Dunn': {'pop': 43857, 'tracts': 8},
'Eau Claire': {'pop': 98736, 'tracts': 20},
'Florence': {'pop': 4423, 'tracts': 2},
'Fond du Lac': {'pop': 101633, 'tracts': 20},
'Forest': {'pop': 9304, 'tracts': 4},
'Grant': {'pop': 51208, 'tracts': 12},
'Green': {'pop': 36842, 'tracts': 8},
'Green Lake': {'pop': 19051, 'tracts': 6},
'Iowa': {'pop': 23687, 'tracts': 6},
'Iron': {'pop': 5916, 'tracts': 3},
'Jackson': {'pop': 20449, 'tracts': 5},
'Jefferson': {'pop': 83686, 'tracts': 20},
'Juneau': {'pop': 26664, 'tracts': 7},
'Kenosha': {'pop': 166426, 'tracts': 35},
'Kewaunee': {'pop': 20574, 'tracts': 4},
'La Crosse': {'pop': 114638, 'tracts': 25},
'Lafayette': {'pop': 16836, 'tracts': 5},
'Langlade': {'pop': 19977, 'tracts': 6},
'Lincoln': {'pop': 28743, 'tracts': 10},
'Manitowoc': {'pop': 81442, 'tracts': 19},
'Marathon': {'pop': 134063, 'tracts': 27},
'Marinette': {'pop': 41749, 'tracts': 12},
'Marquette': {'pop': 15404, 'tracts': 5},
'Menominee': {'pop': 4232, 'tracts': 2},
'Milwaukee': {'pop': 947735, 'tracts': 297},
'Monroe': {'pop': 44673, 'tracts': 9},
'Oconto': {'pop': 37660, 'tracts': 10},
'Oneida': {'pop': 35998, 'tracts': 14},
'Outagamie': {'pop': 176695, 'tracts': 40},
'Ozaukee': {'pop': 86395, 'tracts': 18},
'Pepin': {'pop': 7469, 'tracts': 2},
'Pierce': {'pop': 41019, 'tracts': 8},
'Polk': {'pop': 44205, 'tracts': 10},
'Portage': {'pop': 70019, 'tracts': 14},
'Price': {'pop': 14159, 'tracts': 6},
'Racine': {'pop': 195408, 'tracts': 44},
'Richland': {'pop': 18021, 'tracts': 5},
'Rock': {'pop': 160331, 'tracts': 38},
'Rusk': {'pop': 14755, 'tracts': 5},
'Sauk': {'pop': 61976, 'tracts': 13},
'Sawyer': {'pop': 16557, 'tracts': 6},
'Shawano': {'pop': 41949, 'tracts': 11},
'Sheboygan': {'pop': 115507, 'tracts': 26},
'St. Croix': {'pop': 84345, 'tracts': 14},
'Taylor': {'pop': 20689, 'tracts': 6},
'Trempealeau': {'pop': 28816, 'tracts': 8},
'Vernon': {'pop': 29773, 'tracts': 7},
'Vilas': {'pop': 21430, 'tracts': 5},
'Walworth': {'pop': 102228, 'tracts': 22},
'Washburn': {'pop': 15911, 'tracts': 5},
'Washington': {'pop': 131887, 'tracts': 28},
'Waukesha': {'pop': 389891, 'tracts': 86},
'Waupaca': {'pop': 52410, 'tracts': 12},
'Waushara': {'pop': 24496, 'tracts': 7},
'Winnebago': {'pop': 166994, 'tracts': 41},
'Wood': {'pop': 74749, 'tracts': 17}},
'WV': {'Barbour': {'pop': 16589, 'tracts': 4},
'Berkeley': {'pop': 104169, 'tracts': 14},
'Boone': {'pop': 24629, 'tracts': 8},
'Braxton': {'pop': 14523, 'tracts': 3},
'Brooke': {'pop': 24069, 'tracts': 6},
'Cabell': {'pop': 96319, 'tracts': 29},
'Calhoun': {'pop': 7627, 'tracts': 2},
'Clay': {'pop': 9386, 'tracts': 3},
'Doddridge': {'pop': 8202, 'tracts': 2},
'Fayette': {'pop': 46039, 'tracts': 12},
'Gilmer': {'pop': 8693, 'tracts': 2},
'Grant': {'pop': 11937, 'tracts': 3},
'Greenbrier': {'pop': 35480, 'tracts': 7},
'Hampshire': {'pop': 23964, 'tracts': 5},
'Hancock': {'pop': 30676, 'tracts': 8},
'Hardy': {'pop': 14025, 'tracts': 3},
'Harrison': {'pop': 69099, 'tracts': 22},
'Jackson': {'pop': 29211, 'tracts': 6},
'Jefferson': {'pop': 53498, 'tracts': 15},
'Kanawha': {'pop': 193063, 'tracts': 53},
'Lewis': {'pop': 16372, 'tracts': 5},
'Lincoln': {'pop': 21720, 'tracts': 5},
'Logan': {'pop': 36743, 'tracts': 9},
'Marion': {'pop': 56418, 'tracts': 18},
'Marshall': {'pop': 33107, 'tracts': 9},
'Mason': {'pop': 27324, 'tracts': 6},
'McDowell': {'pop': 22113, 'tracts': 8},
'Mercer': {'pop': 62264, 'tracts': 16},
'Mineral': {'pop': 28212, 'tracts': 7},
'Mingo': {'pop': 26839, 'tracts': 7},
'Monongalia': {'pop': 96189, 'tracts': 24},
'Monroe': {'pop': 13502, 'tracts': 3},
'Morgan': {'pop': 17541, 'tracts': 4},
'Nicholas': {'pop': 26233, 'tracts': 7},
'Ohio': {'pop': 44443, 'tracts': 18},
'Pendleton': {'pop': 7695, 'tracts': 3},
'Pleasants': {'pop': 7605, 'tracts': 2},
'Pocahontas': {'pop': 8719, 'tracts': 4},
'Preston': {'pop': 33520, 'tracts': 8},
'Putnam': {'pop': 55486, 'tracts': 10},
'Raleigh': {'pop': 78859, 'tracts': 17},
'Randolph': {'pop': 29405, 'tracts': 7},
'Ritchie': {'pop': 10449, 'tracts': 3},
'Roane': {'pop': 14926, 'tracts': 4},
'Summers': {'pop': 13927, 'tracts': 4},
'Taylor': {'pop': 16895, 'tracts': 4},
'Tucker': {'pop': 7141, 'tracts': 3},
'Tyler': {'pop': 9208, 'tracts': 3},
'Upshur': {'pop': 24254, 'tracts': 6},
'Wayne': {'pop': 42481, 'tracts': 11},
'Webster': {'pop': 9154, 'tracts': 3},
'Wetzel': {'pop': 16583, 'tracts': 5},
'Wirt': {'pop': 5717, 'tracts': 2},
'Wood': {'pop': 86956, 'tracts': 26},
'Wyoming': {'pop': 23796, 'tracts': 6}},
'WY': {'Albany': {'pop': 36299, 'tracts': 10},
'Big Horn': {'pop': 11668, 'tracts': 3},
'Campbell': {'pop': 46133, 'tracts': 7},
'Carbon': {'pop': 15885, 'tracts': 5},
'Converse': {'pop': 13833, 'tracts': 4},
'Crook': {'pop': 7083, 'tracts': 2},
'Fremont': {'pop': 40123, 'tracts': 10},
'Goshen': {'pop': 13249, 'tracts': 4},
'Hot Springs': {'pop': 4812, 'tracts': 2},
'Johnson': {'pop': 8569, 'tracts': 2},
'Laramie': {'pop': 91738, 'tracts': 21},
'Lincoln': {'pop': 18106, 'tracts': 4},
'Natrona': {'pop': 75450, 'tracts': 18},
'Niobrara': {'pop': 2484, 'tracts': 1},
'Park': {'pop': 28205, 'tracts': 5},
'Platte': {'pop': 8667, 'tracts': 2},
'Sheridan': {'pop': 29116, 'tracts': 6},
'Sublette': {'pop': 10247, 'tracts': 2},
'Sweetwater': {'pop': 43806, 'tracts': 12},
'Teton': {'pop': 21294, 'tracts': 4},
'Uinta': {'pop': 21118, 'tracts': 3},
'Washakie': {'pop': 8533, 'tracts': 3},
'Weston': {'pop': 7208, 'tracts': 2}}}
| 49.436526
| 65
| 0.452481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 69,441
| 0.446914
|
fbdc81d7845fea02b2d263cc5ca73eecc6dfae8e
| 2,259
|
py
|
Python
|
piecutter/engines/mock.py
|
diecutter/piecutter
|
250a90a4cae1b72ff3c141dffb8c58de74dbedfd
|
[
"BSD-3-Clause"
] | 2
|
2016-05-02T02:22:34.000Z
|
2021-02-08T18:17:30.000Z
|
piecutter/engines/mock.py
|
diecutter/piecutter
|
250a90a4cae1b72ff3c141dffb8c58de74dbedfd
|
[
"BSD-3-Clause"
] | 2
|
2016-03-22T10:09:13.000Z
|
2016-07-01T08:04:43.000Z
|
piecutter/engines/mock.py
|
diecutter/piecutter
|
250a90a4cae1b72ff3c141dffb8c58de74dbedfd
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Mock template engine, for use in tests."""
from piecutter.engines import Engine
#: Default value used as :py:attr:`MockEngine.render_result`
default_render_result = u'RENDER WITH ARGS={args!s} AND KWARGS={kwargs!s}'
class MockEngine(Engine):
"""Template engine mock.
Typical usage:
>>> from piecutter.engines.mock import MockEngine
>>> mock_result = u'this is expected result'
>>> mock = MockEngine(mock_result)
>>> args = ('arg 1', 'arg 2')
>>> kwargs = {'kwarg1': 'kwarg 1', 'kwarg2': 'kwarg 2'}
>>> mock.render(*args, **kwargs) == mock_result
True
>>> mock.args == args
True
>>> mock.kwargs == kwargs
True
You can use ``{args}`` and ``{kwargs}`` in mock result, because render()
uses ``self.render_result.format(args=args, kwargs=kwargs)``.
This feature is used by default:
>>> mock = MockEngine()
>>> mock.render_result
u'RENDER WITH ARGS={args!s} AND KWARGS={kwargs!s}'
>>> mock.render()
u'RENDER WITH ARGS=() AND KWARGS={}'
If you setup an exception as :py:attr:`fail` attribute,
then :py:meth:`render` will raise that exception.
>>> mock = MockEngine(fail=Exception('An error occured'))
>>> mock.render() # Doctest: +ELLIPSIS
Traceback (most recent call last):
...
Exception: An error occured
"""
def __init__(self, render_result=default_render_result, fail=None):
#: Value to be returned by :py:meth:`render`.
self.render_result = render_result
#: Whether to raise a :py:class:`TemplateError` or not.
#: Also, value used as message in the exception.
self.fail = fail
#: Stores positional arguments of the last call to :py:meth:`render`.
self.args = None
#: Stores keyword arguments of the last call to :py:meth:`render`.
self.kwargs = None
def render(self, *args, **kwargs):
"""Return self.render_result + populates args and kwargs.
If self.fail is not None, then raises a TemplateError(self.fail).
"""
if self.fail is not None:
raise self.fail
self.args = args
self.kwargs = kwargs
return self.render_result.format(args=args, kwargs=kwargs)
| 31.375
| 77
| 0.626826
| 2,011
| 0.890217
| 0
| 0
| 0
| 0
| 0
| 0
| 1,687
| 0.746791
|
fbdcdec6f6daabaf8ab83907c267233eb37da60d
| 4,276
|
py
|
Python
|
Groups/Group_ID_18/CWMVFE.py
|
shekhar-sharma/DataScience
|
1fd771f873a9bc0800458fd7c05e228bb6c4e8a0
|
[
"MIT"
] | 5
|
2020-12-13T07:53:22.000Z
|
2020-12-20T18:49:27.000Z
|
Groups/Group_ID_18/CWMVFE.py
|
Gulnaz-Tabassum/DataScience
|
1fd771f873a9bc0800458fd7c05e228bb6c4e8a0
|
[
"MIT"
] | null | null | null |
Groups/Group_ID_18/CWMVFE.py
|
Gulnaz-Tabassum/DataScience
|
1fd771f873a9bc0800458fd7c05e228bb6c4e8a0
|
[
"MIT"
] | 24
|
2020-12-12T11:23:28.000Z
|
2021-10-04T13:09:38.000Z
|
#MCCA (Multiview Canonical Correlation Analysis)
import numpy as np
from scipy import linalg as lin
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
class CWMVFE:
def __init__(self,n_components=2,reg_param=0.01 , L):
self.n_components = n_components
self.reg_param = reg_param
self.dimen = []
self.C = [[]] #covariance matix
#To normalize data so that mean=0 and std dev=1
def normalize(self,X):
return StandardScaler().fit_transform(X)
#for adding regularization parameter
def add_reg_param(self,c):
I = np.identity(c.shape[0])
R = np.dot(self.reg_param,I)
c = c+R
return c
#for calculating covariance matrix
def cov_mat(self,X_list):
views = len(X_list)
N = len(X_list[0])
C = [[np.array([]) for i in range(views)] for j in range(views)]
for i in range(views):
for j in range(views):
C[i][j]=np.dot(X_list[i].T,X_list[j])
C[i][j]=np.divide(C[i][j],float(N))
if i==j:
C[i][j]=self.add_reg_param(C[i][j])
self.C = C
return C
# it will find the k nearest element
def k_nearest(a_list , b_list , n_neighbors):
knn = KNeighborsClassifier(n_neighbors)
knn.fit(a_list , b_list)
return (a_list , b_list)
# it will calculate ecludian distance for jenson shannon algorithm.
def ecludian_distance(x_list):
size = len(x_list)
d = [[]]*size
for i in range(size):
for j in range(size):
m=x[i] - x[j]
if (m >= 0):
d[i][j]=m
else:
d[i][j]= -m
# while the i and j value is not specified is not specified in the algo so i am taking i=0 and j=size.
upperis = pow((1+pow(d[i][j] , 2) , -1)
for i in range(size):
for j in range(i):
loweris = pow((1+pow(d[i][j] , 2) , -1)
q= float(upperis / loweris)
return q
def jenson_shannon(self ,a_list , b_list , L):
mid_q = ((ecludian_distance(a_list) + ecludian_distance(b_list))/2)
num_a = 0
num_b = 0
for i in range(L):
num_a = num_a + (ecludian_distance(a_list)log10(ecludian_distance(a_list)/mid_q))
num_b = num_b + (ecludian_distance(b_list)log10(ecludian_distance(b_list)/mid_q))
js=(0.5(num_a)+num_b)
return js
def sigh(a_list , b_list):
old_a_list = a_list
old_b_list = b_list
sigh_a[] = 0*len(a_list)
sigh_b[] = 0*len(b_list)
k_nearest(a_list , b_list , 5)
for i in range(len(a_list)):
sigh_a
sigh_b[i]= (old_b_list[i] - b_list[i])
sigh_a[i] = (old_a_list[i] - a_list[i])
return (sigh_a , sigh_b)
def transform(self,X_list):
views = len(X_list)
X_list = [self.normalize(x) for x in X_list]
X_reduced = [[]]*views
for i in range(views):
for i in range(views):
X_reduced[i]=np.dot(X_list[i],self.weights[i].T)
return X_reduced
def fit(self , a_list , b_list):
view = len(a_list)
#normalize the data
a_list = [self.normalize(x) for x in a_list]
b_list = [self.normalize(x) for x in b_list]
for i in range(view):
sigh(a_list[i] ,b_list[i])
first_term =first_term + (jenson_shannon(a_list[i] , b_list[i] ,i)(sigh_a)(sigh_b.T)(np.dot(cov_mat(a_list[i]).T , cov_mat(b_list[i]))))
second_term =second_term + (jenson_shannon(a_list[i] , b_list[i] ,i)(sigh_a)(sigh_a.T )(np.dot(cov_mat(a_list[i]).T ,cov_mat(a_list[i])))
# n order to get more generalized flexibility, a parameter γ > 0 is introduced to balance the above two terms, so i assume lamda is 0.5
lamda=0.5
final_value = (first_term - (lamda)*(second_term))
return final_value
def fit_transform(self , a_list , b_list):
self.fit(a_list , b_list)
final_value=np.transpose(final_value)
return final_value
| 32.641221
| 149
| 0.563143
| 1,948
| 0.455459
| 0
| 0
| 0
| 0
| 0
| 0
| 545
| 0.127426
|
fbddad4ad03be76385839104dc7ae9091a1e919e
| 1,331
|
py
|
Python
|
scripts/lda.py
|
flatironinstitute/catvae
|
4bfdce83a24c0fb0e55215dd24cda5dcaa9d418a
|
[
"BSD-3-Clause"
] | 6
|
2021-05-23T18:50:48.000Z
|
2022-02-23T20:57:36.000Z
|
scripts/lda.py
|
flatironinstitute/catvae
|
4bfdce83a24c0fb0e55215dd24cda5dcaa9d418a
|
[
"BSD-3-Clause"
] | 24
|
2021-05-19T17:43:33.000Z
|
2022-03-03T21:41:13.000Z
|
scripts/lda.py
|
mortonjt/catvae
|
003a46682fc33e5b0d66c17e85e59e464a465c53
|
[
"BSD-3-Clause"
] | 2
|
2021-05-19T16:21:13.000Z
|
2021-09-23T01:11:29.000Z
|
import argparse
from sklearn.decomposition import LatentDirichletAllocation as LDA
import pickle
from biom import load_table
def main(args):
model = LDA(n_components=args.n_latent, max_iter=args.iterations,
verbose=1, learning_method='online')
table = load_table(args.train_biom)
X = table.matrix_data.T
model.fit(X)
with open(args.model_checkpoint, 'wb') as f:
pickle.dump(model, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train-biom', help='Training biom file',
required=True)
parser.add_argument('--n-latent', type=int, help='Number of components')
parser.add_argument('--iterations', type=int,
default=10000, required=False,
help='Number of iterations.')
parser.add_argument('--batch-size', type=int,
default=256, required=False,
help='Batch size')
parser.add_argument('--n-jobs', type=int,
default=-1, required=False,
help='Number of concurrent jobs.')
parser.add_argument('--model-checkpoint',
required=True,
help='Location of saved model.')
args = parser.parse_args()
main(args)
| 36.972222
| 76
| 0.596544
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 237
| 0.178062
|
fbdf88f92268a5b7f1908449c372b3ef7604c3eb
| 2,984
|
py
|
Python
|
splitjoin.py
|
sonicskye/senarai
|
1d531372b9d290812df97c2be644fe1d4d4ffb1c
|
[
"MIT"
] | 1
|
2018-12-31T02:55:26.000Z
|
2018-12-31T02:55:26.000Z
|
splitjoin.py
|
sonicskye/senarai
|
1d531372b9d290812df97c2be644fe1d4d4ffb1c
|
[
"MIT"
] | null | null | null |
splitjoin.py
|
sonicskye/senarai
|
1d531372b9d290812df97c2be644fe1d4d4ffb1c
|
[
"MIT"
] | null | null | null |
'''
splitjoin.py
sonicskye@2018
The functions are used to split and join files
based on:
https://stonesoupprogramming.com/2017/09/16/python-split-and-join-file/
with modification by adding natural sort
'''
import os
import re
# https://stackoverflow.com/questions/11150239/python-natural-sorting
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(l, key = alphanum_key)
def split(source, dest_folder, write_size):
# Make a destination folder if it doesn't exist yet
if not os.path.exists(dest_folder):
os.mkdir(dest_folder)
else:
# Otherwise clean out all files in the destination folder
for file in os.listdir(dest_folder):
os.remove(os.path.join(dest_folder, file))
partnum = 0
# Open the source file in binary mode
input_file = open(source, 'rb')
while True:
# Read a portion of the input file
chunk = input_file.read(write_size)
# End the loop if we have hit EOF
if not chunk:
break
# Increment partnum
partnum += 1
# Create a new file name
filename = os.path.join(dest_folder, ('part-' + str(partnum)))
# Create a destination file
dest_file = open(filename, 'wb')
# Write to this portion of the destination file
dest_file.write(chunk)
# Explicitly close
dest_file.close()
# Explicitly close
input_file.close()
# Return the number of files created by the split
return partnum
def join(source_dir, dest_file, read_size):
# Create a new destination file
output_file = open(dest_file, 'wb')
# Get a list of the file parts
parts = os.listdir(source_dir)
# Sort them by name (remember that the order num is part of the file name)
# should use natural sort
#parts.sort()
parts = natural_sort(parts)
# Go through each portion one by one
for file in parts:
# Assemble the full path to the file
path = os.path.join(source_dir, file)
# Open the part
input_file = open(path, 'rb')
while True:
# Read all bytes of the part
bytes = input_file.read(read_size)
# Break out of loop if we are at end of file
if not bytes:
break
# Write the bytes to the output file
output_file.write(bytes)
# Close the input file
input_file.close()
# Close the output file
output_file.close()
# example
'''
imageFilePath = os.path.join(os.path.dirname(__file__), 'cryptocurrency.jpg')
destinationFolderPath = os.path.join(os.path.dirname(__file__), 'tmp')
imageFilePath2 = os.path.join(os.path.dirname(__file__), 'cryptocurrency2.jpg')
split(imageFilePath, destinationFolderPath, 2350)
join(destinationFolderPath, imageFilePath2, 4700)
'''
| 25.724138
| 79
| 0.647118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,487
| 0.498324
|
fbdfcde9b6324f3991c172b4af39c20e74cff120
| 9,653
|
py
|
Python
|
daily_leader_board.py
|
bundgus/hadoop-yarn-resource-consumption-report
|
92ef200b4dbd5fd9d7877817b72d4d407126896f
|
[
"MIT"
] | 1
|
2019-04-29T18:32:19.000Z
|
2019-04-29T18:32:19.000Z
|
daily_leader_board.py
|
bundgus/hadoop-yarn-resource-consumption-report
|
92ef200b4dbd5fd9d7877817b72d4d407126896f
|
[
"MIT"
] | null | null | null |
daily_leader_board.py
|
bundgus/hadoop-yarn-resource-consumption-report
|
92ef200b4dbd5fd9d7877817b72d4d407126896f
|
[
"MIT"
] | null | null | null |
# Mark Bundgus 2019
import luigi
import logging
from yarn_api_client import ResourceManager # https://python-client-for-hadoop-yarn-api.readthedocs.io
from datetime import datetime
from datetime import timedelta
import pandas as pd
from tabulate import tabulate
import os
import configuration
log = logging.getLogger("luigi-interface")
class LeaderBoard(luigi.Task):
jobs_year = luigi.Parameter()
jobs_month = luigi.Parameter()
jobs_day = luigi.Parameter()
def output(self):
output_path = os.path.join('daily_leader_boards',
'leader_board_'
+ str(self.jobs_year)
+ '-' + str(self.jobs_month).zfill(2)
+ '-' + str(self.jobs_day).zfill(2) + '.csv')
return luigi.LocalTarget(output_path)
def run(self):
analysis_timestamp = str(datetime.now())
output_path = os.path.join('daily_leader_boards',
'leader_board_'
+ str(self.jobs_year)
+ '-' + str(self.jobs_month).zfill(2)
+ '-' + str(self.jobs_day).zfill(2) + '.csv')
rm = ResourceManager(configuration.yarn_resource_managers)
metrics = rm.cluster_metrics()
cluster_vcores_total = metrics.data['clusterMetrics']['totalVirtualCores']
cluster_daily_vcore_seconds = int(cluster_vcores_total * 60 * 60 * 24)
cluster_memory_total_mb = metrics.data['clusterMetrics']['totalMB']
cluster_daily_megabyte_memory_seconds = int(cluster_memory_total_mb * 60 * 60 * 24)
begin_date = datetime(int(str(self.jobs_year)), int(str(self.jobs_month)), int(str(self.jobs_day)))
end_date = begin_date + timedelta(1)
begin_ms = str(int(begin_date.timestamp() * 1000))
end_ms = str(int(end_date.timestamp() * 1000))
# filter out jobs that started after the end of the analyzed day
apps = rm.cluster_applications(
# finished_time_begin=begin_ms,
started_time_end=end_ms
)
applist = apps.data['apps']['app']
total_vcore_seconds = 0
total_mb_seconds = 0
sum_elapsed_time_ms = 0
overall_started_time_ms = 9999999999999
overall_finished_time_ms = 0
total_yarn_apps = 0
users = {}
app_file = 'app_lists/apps_' + str(self.jobs_year) \
+ '-' + str(self.jobs_month).zfill(2) \
+ '-' + str(self.jobs_day).zfill(2) + '.csv'
apps_df = pd.DataFrame(applist)
apps_df.to_csv(app_file)
for app in applist:
begin_ms_int = int(begin_ms)
end_ms_int = int(end_ms)
started_time = app['startedTime']
finished_time = app['finishedTime']
elapsed_time = app['elapsedTime']
# disregard apps that haven't ever or yet consumed any resources
if app['state'] not in ['FINISHED', 'FAILED', 'KILLED', 'RUNNING']:
continue
# disregard apps that finished before the beginning of the analyzed day
if 0 < finished_time < begin_ms_int:
continue
# for scenario where job began and ended in the same day
percent_within_day = 1.0
# scenario where job began before the beginning of the day and ended before the end of the day
if started_time < begin_ms_int < finished_time < end_ms_int:
percent_within_day = (finished_time - begin_ms_int)/elapsed_time
# scenario where job began before the beginning of the day and continued beyond the end of the day
if started_time < begin_ms_int and (finished_time == 0 or finished_time > end_ms_int):
percent_within_day = 86400000/elapsed_time
# scenario where job began before the end of the day and continued beyond the end of the day
if begin_ms_int < started_time < end_ms_int \
and (finished_time == 0 or end_ms_int < finished_time):
percent_within_day = (end_ms_int-started_time)/elapsed_time
weighted_app_vcore_seconds = int(app['vcoreSeconds'] * percent_within_day)
weighted_app_memory_seconds = int(app['memorySeconds'] * percent_within_day)
user = users.setdefault(app['user'], {'user_first_task_started_time_ms': 9999999999999,
'last_task_finished_time_ms': 0})
total_vcore_seconds += weighted_app_vcore_seconds
total_mb_seconds += weighted_app_memory_seconds
user['user_first_task_started_time_ms'] = app['startedTime'] \
if app['startedTime'] < user['user_first_task_started_time_ms'] \
else user['user_first_task_started_time_ms']
user['last_task_finished_time_ms'] = app['finishedTime'] \
if app['finishedTime'] > user['last_task_finished_time_ms'] \
else user['last_task_finished_time_ms']
overall_started_time_ms = app['startedTime'] if app['startedTime'] < overall_started_time_ms \
else overall_started_time_ms
overall_finished_time_ms = app['finishedTime'] if app['finishedTime'] > overall_finished_time_ms \
else overall_finished_time_ms
sum_elapsed_time_ms += app['elapsedTime']
total_yarn_apps += 1
user_total_vcore_seconds = user.setdefault('total_vcore_seconds', 0)
user['total_vcore_seconds'] = user_total_vcore_seconds + weighted_app_vcore_seconds
user_total_mb_seconds = user.setdefault('total_MB_seconds', 0)
user['total_MB_seconds'] = user_total_mb_seconds + weighted_app_memory_seconds
header = ['jobs_year',
'jobs_month',
'jobs_day',
'cluster_daily_vcore_seconds',
'cluster_daily_megabyte_memory_seconds',
'user',
'used_vcore_seconds',
'percent_used_of_all_used_vcore_seconds',
'percent_used_of_total_cluster_vcore_seconds',
'used_MB_seconds',
'percent_used_of_all_used_MB_seconds',
'percent_used_of_total_cluster_MB_seconds',
'user_first_task_started_time',
'user_last_task_finished_time'
]
table = []
for user in users:
# set last_task_finished_time to None if timestamp == 0 representing that the task hasn't finished yet
if int(users[user]['last_task_finished_time_ms']) == 0:
last_task_finished_time_string = ''
else:
last_task_finished_time_string = \
datetime.fromtimestamp(users[user]['last_task_finished_time_ms'] / 1000.0)\
.strftime('%Y-%m-%d %H:%M')
row = [
self.jobs_year,
self.jobs_month,
self.jobs_day,
cluster_daily_vcore_seconds,
cluster_daily_megabyte_memory_seconds,
user,
round(users[user]['total_vcore_seconds'], 0),
round(100 * users[user]['total_vcore_seconds'] / total_vcore_seconds, 2),
round(100 * users[user]['total_vcore_seconds'] / cluster_daily_vcore_seconds, 2),
round(users[user]['total_MB_seconds'], 0),
round(100 * users[user]['total_MB_seconds'] / total_mb_seconds, 2),
round(100 * users[user]['total_MB_seconds'] / cluster_daily_megabyte_memory_seconds, 2),
datetime.fromtimestamp(users[user]['user_first_task_started_time_ms'] / 1000.0)
.strftime('%Y-%m-%d %H:%M'),
last_task_finished_time_string,
]
table.append(row)
df = pd.DataFrame(table, columns=header)
df = df.sort_values(by='used_MB_seconds', ascending=False)
print()
print('analysis timestamp: ' + analysis_timestamp)
# print('functional account:', job_user)
print('jobs date: ' + begin_date.strftime('%Y-%m-%d'))
print('----------------------')
print('count of yarn apps: ' + str(total_yarn_apps))
print('overall daily jobs started time ',
datetime.fromtimestamp(overall_started_time_ms / 1000.0).strftime('%Y-%m-%d %H:%M'))
print('overall daily jobs finished time',
datetime.fromtimestamp(overall_finished_time_ms / 1000.0).strftime('%Y-%m-%d %H:%M'))
print()
print(tabulate(df, headers='keys', showindex=False))
df.to_csv(output_path,
index=False)
# create leader boards for the last 3 days
class CreateDailyLeaderBoards(luigi.Task):
def complete(self):
return False
def requires(self):
required = []
now = datetime.now()
log.info('Attempting to create leader board')
for days_int in range(1, 3):
date = now - timedelta(days_int)
year = date.year
month = date.month
day = date.day
required.append(
LeaderBoard(
jobs_year=str(year),
jobs_month=str(month),
jobs_day=str(day)))
return required
| 43.09375
| 114
| 0.58531
| 9,266
| 0.959909
| 0
| 0
| 0
| 0
| 0
| 0
| 2,461
| 0.254947
|
fbdfdcb0c2a9cd822c60b6faedf5d56ff354a6c6
| 1,071
|
py
|
Python
|
bfxhfindicators/wma.py
|
quadramadery/bfx-hf-indicators-py
|
fe523607ae6c16fc26f1bb1d5e8062a3770b43e4
|
[
"Apache-2.0"
] | 1
|
2022-01-12T09:31:45.000Z
|
2022-01-12T09:31:45.000Z
|
bfxhfindicators/wma.py
|
quadramadery/bfx-hf-indicators-py
|
fe523607ae6c16fc26f1bb1d5e8062a3770b43e4
|
[
"Apache-2.0"
] | null | null | null |
bfxhfindicators/wma.py
|
quadramadery/bfx-hf-indicators-py
|
fe523607ae6c16fc26f1bb1d5e8062a3770b43e4
|
[
"Apache-2.0"
] | null | null | null |
from bfxhfindicators.indicator import Indicator
class WMA(Indicator):
def __init__(self, args = []):
[ period ] = args
d = 0
for i in range(period):
d += (i + 1)
self._d = d
self._p = period
self._buffer = []
super().__init__({
'args': args,
'id': 'wma',
'name': 'WMA(%f)' % period,
'seed_period': period
})
def reset(self):
super().reset()
self._buffer = []
def update(self, v):
if len(self._buffer) == 0:
self._buffer.append(v)
else:
self._buffer[-1] = v
if len(self._buffer) < self._p:
return
n = 0
for i in range(self._p):
n += self._buffer[-i - 1] * (self._p - i)
super().update(n / self._d)
return self.v()
def add(self, v):
self._buffer.append(v)
if len(self._buffer) > self._p:
del self._buffer[0]
elif len(self._buffer) < self._p:
return
n = 0
for i in range(self._p):
n += self._buffer[-i - 1] * (self._p - i)
super().add(n / self._d)
return self.v()
| 18.152542
| 47
| 0.519141
| 1,021
| 0.953315
| 0
| 0
| 0
| 0
| 0
| 0
| 43
| 0.040149
|
fbe1cc6565e4765aae8322647fc0bb9752036f7c
| 5,021
|
py
|
Python
|
src/scripts/train_image.py
|
paavalipopov/introspection
|
ee486a9e8c8b6ddb7ab257eae9e14aac5d637527
|
[
"Apache-2.0"
] | null | null | null |
src/scripts/train_image.py
|
paavalipopov/introspection
|
ee486a9e8c8b6ddb7ab257eae9e14aac5d637527
|
[
"Apache-2.0"
] | null | null | null |
src/scripts/train_image.py
|
paavalipopov/introspection
|
ee486a9e8c8b6ddb7ab257eae9e14aac5d637527
|
[
"Apache-2.0"
] | null | null | null |
import argparse
from datetime import datetime
import os
from catalyst import dl, utils
from catalyst.contrib.data import AllTripletsSampler
from catalyst.contrib.losses import TripletMarginLossWithSampler
from catalyst.data import BatchBalanceClassSampler
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from src.modules import resnet9
from src.settings import LOGS_ROOT
class CustomRunner(dl.Runner):
def handle_batch(self, batch) -> None:
images, targets = batch
embeddings, logits = self.model(images)
self.batch = {
"embeddings": embeddings,
"targets": targets,
"logits": logits,
}
def get_loggers(self):
return {
"console": dl.ConsoleLogger(),
"wandb": dl.WandbLogger(project="wandb_test", name="experiment_1"),
}
def main(use_ml: bool = False):
# data
transform_train = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
)
transform_valid = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
)
train_dataset = datasets.CIFAR10(
os.getcwd(), train=True, download=True, transform=transform_train
)
valid_dataset = datasets.CIFAR10(
os.getcwd(), train=False, download=True, transform=transform_valid
)
# loaders
labels = train_dataset.targets
sampler = BatchBalanceClassSampler(labels=labels, num_classes=10, num_samples=10)
bs = sampler.batch_size
loaders = {
"train": DataLoader(train_dataset, batch_sampler=sampler, num_workers=4),
"valid": DataLoader(valid_dataset, batch_size=bs, num_workers=4, shuffle=False),
}
# model
model = resnet9(in_channels=3, num_classes=10)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
# optimizer = optim.Adam(model.parameters(), lr=1e-3)
# scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [5, 8], gamma=0.3)
criterion_ce = nn.CrossEntropyLoss()
sampler_inbatch = AllTripletsSampler()
criterion_ml = TripletMarginLossWithSampler(margin=0.5, sampler_inbatch=sampler_inbatch)
criterion = {"ce": criterion_ce, "ml": criterion_ml}
# runner
runner = CustomRunner()
# callbacks
callbacks = [
dl.CriterionCallback(
input_key="logits",
target_key="targets",
metric_key="loss_ce",
criterion_key="ce",
),
dl.AccuracyCallback(input_key="logits", target_key="targets", topk=(1, 3, 5)),
dl.BackwardCallback(metric_key="loss" if use_ml else "loss_ce"),
dl.OptimizerCallback(metric_key="loss" if use_ml else "loss_ce"),
dl.SchedulerCallback(),
]
if use_ml:
callbacks.extend(
[
dl.ControlFlowCallbackWrapper(
base_callback=dl.CriterionCallback(
input_key="embeddings",
target_key="targets",
metric_key="loss_ml",
criterion_key="ml",
),
loaders=["train"],
),
dl.ControlFlowCallbackWrapper(
base_callback=dl.MetricAggregationCallback(
metric_key="loss",
metrics=["loss_ce", "loss_ml"],
mode="mean",
),
loaders=["train"],
),
]
)
# train
strtime = datetime.now().strftime("%Y%m%d-%H%M%S")
ml_flag = int(use_ml)
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
loaders=loaders,
num_epochs=200,
callbacks=callbacks,
logdir=f"{LOGS_ROOT}/image-ml{ml_flag}-{strtime}",
valid_loader="valid",
valid_metric="accuracy01",
minimize_valid_metric=False,
verbose=True,
load_best_on_end=True,
)
# evaluate
metrics = runner.evaluate_loader(
loader=loaders["valid"],
callbacks=[
dl.AccuracyCallback(input_key="logits", target_key="targets", topk=(1, 3, 5)),
dl.PrecisionRecallF1SupportCallback(
input_key="logits", target_key="targets", num_classes=10
),
],
)
print(metrics)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
utils.boolean_flag(parser, "use-ml", default=False)
args = parser.parse_args()
main(args.use_ml)
| 31.778481
| 92
| 0.601075
| 465
| 0.092611
| 0
| 0
| 0
| 0
| 0
| 0
| 568
| 0.113125
|
836bc2bf5ec4d2f69e9599977608bd55913a2fd3
| 22,536
|
py
|
Python
|
aegea/batch.py
|
MrOlm/aegea
|
5599ddaf7947918a5c7a0282ab993cfa304790f8
|
[
"Apache-2.0"
] | null | null | null |
aegea/batch.py
|
MrOlm/aegea
|
5599ddaf7947918a5c7a0282ab993cfa304790f8
|
[
"Apache-2.0"
] | null | null | null |
aegea/batch.py
|
MrOlm/aegea
|
5599ddaf7947918a5c7a0282ab993cfa304790f8
|
[
"Apache-2.0"
] | null | null | null |
"""
Manage AWS Batch jobs, queues, and compute environments.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os, sys, argparse, base64, collections, io, subprocess, json, time, re, hashlib, concurrent.futures, itertools
from botocore.exceptions import ClientError
from . import logger
from .ls import register_parser, register_listing_parser
from .ecr import ecr_image_name_completer
from .util import Timestamp, paginate, get_mkfs_command
from .util.crypto import ensure_ssh_key
from .util.cloudinit import get_user_data
from .util.exceptions import AegeaException
from .util.printing import page_output, tabulate, YELLOW, RED, GREEN, BOLD, ENDC
from .util.aws import (resources, clients, ensure_iam_role, ensure_instance_profile, make_waiter, ensure_vpc,
ensure_security_group, ensure_log_group, IAMPolicyBuilder, resolve_ami, instance_type_completer,
expect_error_codes, instance_storage_shellcode)
from .util.aws.spot import SpotFleetBuilder
from .util.aws.logs import CloudwatchLogReader
from .util.aws.batch import ensure_job_definition, get_command_and_env
def complete_queue_name(**kwargs):
return [q["jobQueueName"] for q in paginate(clients.batch.get_paginator("describe_job_queues"))]
def complete_ce_name(**kwargs):
return [c["computeEnvironmentName"] for c in paginate(clients.batch.get_paginator("describe_compute_environments"))]
def batch(args):
batch_parser.print_help()
batch_parser = register_parser(batch, help="Manage AWS Batch resources", description=__doc__)
def queues(args):
page_output(tabulate(paginate(clients.batch.get_paginator("describe_job_queues")), args))
parser = register_listing_parser(queues, parent=batch_parser, help="List Batch queues")
def create_queue(args):
ces = [dict(computeEnvironment=e, order=i) for i, e in enumerate(args.compute_environments)]
logger.info("Creating queue %s in %s", args.name, ces)
queue = clients.batch.create_job_queue(jobQueueName=args.name, priority=args.priority, computeEnvironmentOrder=ces)
make_waiter(clients.batch.describe_job_queues, "jobQueues[].status", "VALID", "pathAny").wait(jobQueues=[args.name])
return queue
parser = register_parser(create_queue, parent=batch_parser, help="Create a Batch queue")
parser.add_argument("name")
parser.add_argument("--priority", type=int, default=5)
parser.add_argument("--compute-environments", nargs="+", required=True)
def delete_queue(args):
clients.batch.update_job_queue(jobQueue=args.name, state="DISABLED")
make_waiter(clients.batch.describe_job_queues, "jobQueues[].status", "VALID", "pathAny").wait(jobQueues=[args.name])
clients.batch.delete_job_queue(jobQueue=args.name)
parser = register_parser(delete_queue, parent=batch_parser, help="Delete a Batch queue")
parser.add_argument("name").completer = complete_queue_name
def compute_environments(args):
page_output(tabulate(paginate(clients.batch.get_paginator("describe_compute_environments")), args))
parser = register_listing_parser(compute_environments, parent=batch_parser, help="List Batch compute environments")
def ensure_launch_template(prefix=__name__.replace(".", "_"), **kwargs):
name = prefix + "_" + hashlib.sha256(json.dumps(kwargs, sort_keys=True).encode()).hexdigest()[:32]
try:
clients.ec2.create_launch_template(LaunchTemplateName=name, LaunchTemplateData=kwargs)
except ClientError as e:
expect_error_codes(e, "InvalidLaunchTemplateName.AlreadyExistsException")
return name
def get_ssm_parameter(name):
return clients.ssm.get_parameter(Name=name)["Parameter"]["Value"]
def create_compute_environment(args):
commands = instance_storage_shellcode.strip().format(mountpoint="/mnt", mkfs=get_mkfs_command()).split("\n")
user_data = get_user_data(commands=commands, mime_multipart_archive=True)
if args.ecs_container_instance_ami:
ecs_ami_id = args.ecs_container_instance_ami
elif args.ecs_container_instance_ami_tags:
# TODO: build ECS CI AMI on demand
ecs_ami_id = resolve_ami(**args.ecs_container_instance_ami_tags)
else:
ecs_ami_id = get_ssm_parameter("/aws/service/ecs/optimized-ami/amazon-linux-2/recommended/image_id")
launch_template = ensure_launch_template(ImageId=ecs_ami_id,
# TODO: add configurable BDM for Docker image cache space
UserData=base64.b64encode(user_data).decode())
batch_iam_role = ensure_iam_role(args.service_role, trust=["batch"], policies=["service-role/AWSBatchServiceRole"])
vpc = ensure_vpc()
ssh_key_name = ensure_ssh_key(args.ssh_key_name, base_name=__name__)
instance_profile = ensure_instance_profile(args.instance_role,
policies={"service-role/AmazonAPIGatewayPushToCloudWatchLogs",
"service-role/AmazonEC2ContainerServiceforEC2Role",
IAMPolicyBuilder(action="sts:AssumeRole", resource="*")})
compute_resources = dict(type=args.compute_type,
minvCpus=args.min_vcpus, desiredvCpus=args.desired_vcpus, maxvCpus=args.max_vcpus,
instanceTypes=args.instance_types,
subnets=[subnet.id for subnet in vpc.subnets.all()],
securityGroupIds=[ensure_security_group("aegea.launch", vpc).id],
instanceRole=instance_profile.name,
bidPercentage=100,
spotIamFleetRole=SpotFleetBuilder.get_iam_fleet_role().name,
ec2KeyPair=ssh_key_name,
launchTemplate=dict(launchTemplateName=launch_template))
logger.info("Creating compute environment %s in %s", args.name, vpc)
compute_environment = clients.batch.create_compute_environment(computeEnvironmentName=args.name,
type=args.type,
computeResources=compute_resources,
serviceRole=batch_iam_role.name)
wtr = make_waiter(clients.batch.describe_compute_environments, "computeEnvironments[].status", "VALID", "pathAny",
delay=2, max_attempts=300)
wtr.wait(computeEnvironments=[args.name])
return compute_environment
cce_parser = register_parser(create_compute_environment, parent=batch_parser, help="Create a Batch compute environment")
cce_parser.add_argument("name")
cce_parser.add_argument("--type", choices={"MANAGED", "UNMANAGED"})
cce_parser.add_argument("--compute-type", choices={"EC2", "SPOT"})
cce_parser.add_argument("--min-vcpus", type=int)
cce_parser.add_argument("--desired-vcpus", type=int)
cce_parser.add_argument("--max-vcpus", type=int)
cce_parser.add_argument("--instance-types", nargs="+").completer = instance_type_completer
cce_parser.add_argument("--ssh-key-name")
cce_parser.add_argument("--instance-role", default=__name__ + ".ecs_container_instance")
cce_parser.add_argument("--service-role", default=__name__ + ".service")
cce_parser.add_argument("--ecs-container-instance-ami")
cce_parser.add_argument("--ecs-container-instance-ami-tags")
def update_compute_environment(args):
update_compute_environment_args = dict(computeEnvironment=args.name, computeResources={})
if args.min_vcpus is not None:
update_compute_environment_args["computeResources"].update(minvCpus=args.min_vcpus)
if args.desired_vcpus is not None:
update_compute_environment_args["computeResources"].update(desiredvCpus=args.desired_vcpus)
if args.max_vcpus is not None:
update_compute_environment_args["computeResources"].update(maxvCpus=args.max_vcpus)
return clients.batch.update_compute_environment(**update_compute_environment_args)
uce_parser = register_parser(update_compute_environment, parent=batch_parser, help="Update a Batch compute environment")
uce_parser.add_argument("name").completer = complete_ce_name
uce_parser.add_argument("--min-vcpus", type=int)
uce_parser.add_argument("--desired-vcpus", type=int)
uce_parser.add_argument("--max-vcpus", type=int)
def delete_compute_environment(args):
clients.batch.update_compute_environment(computeEnvironment=args.name, state="DISABLED")
wtr = make_waiter(clients.batch.describe_compute_environments, "computeEnvironments[].status", "VALID", "pathAny")
wtr.wait(computeEnvironments=[args.name])
clients.batch.delete_compute_environment(computeEnvironment=args.name)
parser = register_parser(delete_compute_environment, parent=batch_parser, help="Delete a Batch compute environment")
parser.add_argument("name").completer = complete_ce_name
def ensure_queue(name):
cq_args = argparse.Namespace(name=name, priority=5, compute_environments=[name])
try:
return create_queue(cq_args)
except ClientError:
create_compute_environment(cce_parser.parse_args(args=[name]))
return create_queue(cq_args)
def submit(args):
if args.job_definition_arn is None:
if not any([args.command, args.execute, args.cwl]):
raise AegeaException("One of the arguments --command --execute --cwl is required")
elif args.name is None:
raise AegeaException("The argument --name is required")
ensure_log_group("docker")
ensure_log_group("syslog")
if args.job_definition_arn is None:
command, environment = get_command_and_env(args)
container_overrides = dict(command=command, environment=environment)
jd_res = ensure_job_definition(args)
args.job_definition_arn = jd_res["jobDefinitionArn"]
args.name = args.name or "{}_{}".format(jd_res["jobDefinitionName"], jd_res["revision"])
else:
container_overrides = {}
if args.command:
container_overrides["command"] = args.command
if args.environment:
container_overrides["environment"] = args.environment
submit_args = dict(jobName=args.name,
jobQueue=args.queue,
dependsOn=[dict(jobId=dep) for dep in args.depends_on],
jobDefinition=args.job_definition_arn,
parameters={k: v for k, v in args.parameters},
containerOverrides=container_overrides)
if args.dry_run:
print("The following command would be run: {0}".format(submit_args))
return {"Dry run succeeded": True}
try:
job = clients.batch.submit_job(**submit_args)
except ClientError as e:
if not re.search("JobQueue .+ not found", str(e)):
raise
ensure_queue(args.queue)
job = clients.batch.submit_job(**submit_args)
if args.watch:
watch(watch_parser.parse_args([job["jobId"]]))
if args.cwl:
job.update(resources.dynamodb.Table("aegea-batch-jobs").get_item(Key={"job_id": job["jobId"]})["Item"])
elif args.wait:
raise NotImplementedError()
return job
submit_parser = register_parser(submit, parent=batch_parser, help="Submit a job to a Batch queue")
submit_parser.add_argument("--name")
submit_parser.add_argument("--queue", default=__name__.replace(".", "_")).completer = complete_queue_name
submit_parser.add_argument("--depends-on", nargs="+", metavar="JOB_ID", default=[])
submit_parser.add_argument("--job-definition-arn")
def add_command_args(parser):
group = parser.add_mutually_exclusive_group()
group.add_argument("--watch", action="store_true", help="Monitor submitted job, stream log until job completes")
group.add_argument("--wait", action="store_true",
help="Block on job. Exit with code 0 if job succeeded, 1 if failed")
group = parser.add_mutually_exclusive_group()
group.add_argument("--command", nargs="+", help="Run these commands as the job (using " + BOLD("bash -c") + ")")
group.add_argument("--execute", type=argparse.FileType("rb"), metavar="EXECUTABLE",
help="Read this executable file and run it as the job")
group.add_argument("--cwl", metavar="CWL_DEFINITION",
help="Read this Common Workflow Language definition file and run it as the job")
parser.add_argument("--cwl-input", type=argparse.FileType("rb"), metavar="CWLINPUT", default=sys.stdin,
help="With --cwl, use this file as the CWL job input (default: stdin)")
parser.add_argument("--environment", nargs="+", metavar="NAME=VALUE",
type=lambda x: dict(zip(["name", "value"], x.split("=", 1))), default=[])
parser.add_argument("--staging-s3-bucket", help=argparse.SUPPRESS)
def add_job_defn_args(parser):
parser.add_argument("--ulimits", nargs="*",
help="Separate ulimit name and value with colon, for example: --ulimits nofile:20000",
default=["nofile:100000"])
img_group = parser.add_mutually_exclusive_group()
img_group.add_argument("--image", default="ubuntu", metavar="DOCKER_IMAGE",
help="Docker image URL to use for running job/task")
ecs_img_help = "Name of Docker image residing in this account's Elastic Container Registry"
ecs_img_arg = img_group.add_argument("--ecs-image", "--ecr-image", "-i", metavar="REPO[:TAG]", help=ecs_img_help)
ecs_img_arg.completer = ecr_image_name_completer
parser.add_argument("--volumes", nargs="+", metavar="HOST_PATH=GUEST_PATH", type=lambda x: x.split("=", 1),
default=[])
parser.add_argument("--memory-mb", dest="memory", type=int, default=1024)
add_command_args(submit_parser)
group = submit_parser.add_argument_group(title="job definition parameters", description="""
See http://docs.aws.amazon.com/batch/latest/userguide/job_definitions.html""")
add_job_defn_args(group)
group.add_argument("--vcpus", type=int, default=1)
group.add_argument("--gpus", type=int, default=0)
group.add_argument("--privileged", action="store_true", default=False)
group.add_argument("--volume-type", choices={"standard", "io1", "gp2", "sc1", "st1"},
help="io1, PIOPS SSD; gp2, general purpose SSD; sc1, cold HDD; st1, throughput optimized HDD")
group.add_argument("--parameters", nargs="+", metavar="NAME=VALUE", type=lambda x: x.split("=", 1), default=[])
group.add_argument("--job-role", metavar="IAM_ROLE", default=__name__ + ".worker",
help="Name of IAM role to grant to the job")
group.add_argument("--storage", nargs="+", metavar="MOUNTPOINT=SIZE_GB",
type=lambda x: x.rstrip("GBgb").split("=", 1), default=[])
group.add_argument("--efs-storage", action="store", dest="efs_storage", default=False,
help="Mount EFS network filesystem to the mount point specified. Example: --efs-storage /mnt")
group.add_argument("--mount-instance-storage", nargs="?", const="/mnt",
help="Assemble (MD RAID0), format and mount ephemeral instance storage on this mount point")
submit_parser.add_argument("--timeout",
help="Terminate (and possibly restart) the job after this time (use suffix s, m, h, d, w)")
submit_parser.add_argument("--retry-attempts", type=int, default=1,
help="Number of times to restart the job upon failure")
submit_parser.add_argument("--dry-run", action="store_true", help="Gather arguments and stop short of submitting job")
def terminate(args):
def terminate_one(job_id):
return clients.batch.terminate_job(jobId=job_id, reason=args.reason)
with concurrent.futures.ThreadPoolExecutor() as executor:
result = list(executor.map(terminate_one, args.job_id))
logger.info("Sent termination requests for %d jobs", len(result))
parser = register_parser(terminate, parent=batch_parser, help="Terminate Batch jobs")
parser.add_argument("job_id", nargs="+")
parser.add_argument("--reason", help="A message to attach to the job that explains the reason for canceling it")
def ls(args, page_size=100):
queues = args.queues or [q["jobQueueName"] for q in clients.batch.describe_job_queues()["jobQueues"]]
def list_jobs_worker(list_jobs_worker_args):
queue, status = list_jobs_worker_args
return [j["jobId"] for j in clients.batch.list_jobs(jobQueue=queue, jobStatus=status)["jobSummaryList"]]
with concurrent.futures.ThreadPoolExecutor() as executor:
job_ids = sum(executor.map(list_jobs_worker, itertools.product(queues, args.status)), [])
def describe_jobs_worker(start_index):
return clients.batch.describe_jobs(jobs=job_ids[start_index:start_index + page_size])["jobs"]
table = sum(executor.map(describe_jobs_worker, range(0, len(job_ids), page_size)), [])
page_output(tabulate(table, args, cell_transforms={"createdAt": Timestamp}))
job_status_colors = dict(SUBMITTED=YELLOW(), PENDING=YELLOW(), RUNNABLE=BOLD() + YELLOW(),
STARTING=GREEN(), RUNNING=GREEN(),
SUCCEEDED=BOLD() + GREEN(), FAILED=BOLD() + RED())
job_states = job_status_colors.keys()
parser = register_listing_parser(ls, parent=batch_parser, help="List Batch jobs")
parser.add_argument("--queues", nargs="+").completer = complete_queue_name
parser.add_argument("--status", nargs="+", default=job_states, choices=job_states)
def describe(args):
return clients.batch.describe_jobs(jobs=[args.job_id])["jobs"][0]
parser = register_parser(describe, parent=batch_parser, help="Describe a Batch job")
parser.add_argument("job_id")
def format_job_status(status):
return job_status_colors[status] + status + ENDC()
def get_logs(args):
for event in CloudwatchLogReader(args.log_stream_name, head=args.head, tail=args.tail):
print(str(Timestamp(event["timestamp"])), event["message"])
def save_job_desc(job_desc):
try:
cprops = dict(image="busybox", vcpus=1, memory=4,
environment=[dict(name="job_desc", value=json.dumps(job_desc))])
jd_name = "{}_job_desc_{}".format(__name__.replace(".", "_"), job_desc["jobId"])
clients.batch.register_job_definition(jobDefinitionName=jd_name, type="container", containerProperties=cprops)
except Exception as e:
logger.debug("Error while saving job description: %s", e)
def get_job_desc(job_id):
try:
return clients.batch.describe_jobs(jobs=[job_id])["jobs"][0]
except IndexError:
jd_name = "{}_job_desc_{}".format(__name__.replace(".", "_"), job_id)
jd = clients.batch.describe_job_definitions(jobDefinitionName=jd_name)["jobDefinitions"][0]
return json.loads(jd["containerProperties"]["environment"][0]["value"])
def watch(args):
job_desc = get_job_desc(args.job_id)
args.job_name = job_desc["jobName"]
logger.info("Watching job %s (%s)", args.job_id, args.job_name)
last_status = None
while last_status not in {"SUCCEEDED", "FAILED"}:
job_desc = get_job_desc(args.job_id)
if job_desc["status"] != last_status:
logger.info("Job %s %s", args.job_id, format_job_status(job_desc["status"]))
last_status = job_desc["status"]
if job_desc["status"] in {"RUNNING", "SUCCEEDED", "FAILED"}:
logger.info("Job %s log stream: %s", args.job_id, job_desc.get("container", {}).get("logStreamName"))
save_job_desc(job_desc)
if job_desc["status"] in {"RUNNING", "SUCCEEDED", "FAILED"} and "logStreamName" in job_desc["container"]:
args.log_stream_name = job_desc["container"]["logStreamName"]
get_logs(args)
if "statusReason" in job_desc:
logger.info("Job %s: %s", args.job_id, job_desc["statusReason"])
if job_desc.get("container", {}).get("exitCode"):
return SystemExit(job_desc["container"]["exitCode"])
time.sleep(1)
get_logs_parser = register_parser(get_logs, parent=batch_parser, help="Retrieve logs for a Batch job")
get_logs_parser.add_argument("log_stream_name")
watch_parser = register_parser(watch, parent=batch_parser, help="Monitor a running Batch job and stream its logs")
watch_parser.add_argument("job_id")
for parser in get_logs_parser, watch_parser:
lines_group = parser.add_mutually_exclusive_group()
lines_group.add_argument("--head", type=int, nargs="?", const=10,
help="Retrieve this number of lines from the beginning of the log (default 10)")
lines_group.add_argument("--tail", type=int, nargs="?", const=10,
help="Retrieve this number of lines from the end of the log (default 10)")
def ssh(args):
job_desc = clients.batch.describe_jobs(jobs=[args.job_id])["jobs"][0]
job_queue_desc = clients.batch.describe_job_queues(jobQueues=[job_desc["jobQueue"]])["jobQueues"][0]
ce = job_queue_desc["computeEnvironmentOrder"][0]["computeEnvironment"]
ce_desc = clients.batch.describe_compute_environments(computeEnvironments=[ce])["computeEnvironments"][0]
ecs_ci_arn = job_desc["container"]["containerInstanceArn"]
ecs_ci_desc = clients.ecs.describe_container_instances(cluster=ce_desc["ecsClusterArn"],
containerInstances=[ecs_ci_arn])["containerInstances"][0]
ecs_ci_ec2_id = ecs_ci_desc["ec2InstanceId"]
for reservation in paginate(clients.ec2.get_paginator("describe_instances"), InstanceIds=[ecs_ci_ec2_id]):
ecs_ci_address = reservation["Instances"][0]["PublicDnsName"]
logger.info("Job {} is on ECS container instance {} ({})".format(args.job_id, ecs_ci_ec2_id, ecs_ci_address))
ssh_args = ["ssh", "-l", "ec2-user", ecs_ci_address,
"docker", "ps", "--filter", "name=" + args.job_id, "--format", "{{.ID}}"]
logger.info("Running: {}".format(" ".join(ssh_args)))
container_id = subprocess.check_output(ssh_args).decode().strip()
subprocess.call(["ssh", "-t", "-l", "ec2-user", ecs_ci_address,
"docker", "exec", "--interactive", "--tty", container_id] + (args.ssh_args or ["/bin/bash", "-l"]))
ssh_parser = register_parser(ssh, parent=batch_parser, help="Log in to a running Batch job via SSH")
ssh_parser.add_argument("job_id")
ssh_parser.add_argument("ssh_args", nargs=argparse.REMAINDER)
| 57.489796
| 120
| 0.693734
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,378
| 0.23864
|
836c208587b33a187a0445b8a77d2420c941ff8d
| 10,897
|
py
|
Python
|
src/run_joint_confidence_generator-only.py
|
williamsashbee/Confident_classifier
|
cba3ef862b310afc3af6c4a62b524f032f45549e
|
[
"MIT"
] | null | null | null |
src/run_joint_confidence_generator-only.py
|
williamsashbee/Confident_classifier
|
cba3ef862b310afc3af6c4a62b524f032f45549e
|
[
"MIT"
] | null | null | null |
src/run_joint_confidence_generator-only.py
|
williamsashbee/Confident_classifier
|
cba3ef862b310afc3af6c4a62b524f032f45549e
|
[
"MIT"
] | null | null | null |
##############################################
# This code is based on samples from pytorch #
##############################################
# Writer: Kimin Lee
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import data_loader
import numpy as np
import torchvision.utils as vutils
import models
from torchvision import datasets, transforms
from torch.autograd import Variable
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "4"
# Training settings
parser = argparse.ArgumentParser(description='Training code - joint confidence')
parser.add_argument('--batch-size', type=int, default=128, help='input batch size for training')
parser.add_argument('--save-interval', type=int, default=3, help='save interval')
parser.add_argument('--epochs', type=int, default=100, help='number of epochs to train')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate')
parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, help='random seed')
parser.add_argument('--log-interval', type=int, default=100,
help='how many batches to wait before logging training status')
parser.add_argument('--dataset', default='cifar10', help='mnist | cifar10 | svhn')
parser.add_argument('--dataroot', required=True, help='path to dataset')
parser.add_argument('--imageSize', type=int, default=32, help='the height / width of the input image to network')
parser.add_argument('--outf', default='.', help='folder to output images and model checkpoints')
parser.add_argument('--wd', type=float, default=0.0, help='weight decay')
parser.add_argument('--droprate', type=float, default=0.1, help='learning rate decay')
parser.add_argument('--decreasing_lr', default='60', help='decreasing strategy')
parser.add_argument('--num_classes', type=int, default=10, help='the # of classes')
parser.add_argument('--beta', type=float, default=8, help='penalty parameter for KL term')
args = parser.parse_args()
if args.dataset == 'cifar10':
args.beta = 0.1
args.batch_size = 64
print(args)
args.cuda = not args.no_cuda and torch.cuda.is_available()
print("Random Seed: ", args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
print('load data: ', args.dataset)
if args.dataset == 'mnist':
transform = transforms.Compose([
transforms.Scale(32),
transforms.ToTensor(),
transforms.Lambda(lambda x: x.repeat(3, 1, 1)),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('data', train=True, download=True, transform=transform),
batch_size=128, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('data', train=False, download=True, transform=transform),
batch_size=128, shuffle=True)
else:
train_loader, test_loader = data_loader.getTargetDataSet(args.dataset, args.batch_size, args.imageSize,
args.dataroot)
print('Load model')
model = models.vgg13()
print(model)
print('load GAN')
nz = 100
G = models.cGenerator(1, nz, 64, 3) # ngpu, nz, ngf, nc
D = models.cDiscriminator(1, 3, 64) # ngpu, nc, ndf
G.weight_init(mean=0.0, std=0.02)
D.weight_init(mean=0.0, std=0.02)
# Initial setup for GAN
real_label = 1
fake_label = 0
criterion = nn.BCELoss()
nz = 100
#fixed_noise = torch.FloatTensor(64, nz, 1, 1).normal_(0, 1)
# fixed_noise = torch.randn((128, 100)).view(-1, 100, 1, 1)
if args.cuda:
model.cuda()
D.cuda()
G.cuda()
criterion.cuda()
#fixed_noise = fixed_noise.cuda()
#fixed_noise = Variable(fixed_noise)
print('Setup optimizer')
lr = 0.0002
batch_size = 128
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
G_optimizer = optim.Adam(G.parameters(), lr=lr, betas=(0.5, 0.999))
D_optimizer = optim.Adam(D.parameters(), lr=lr, betas=(0.5, 0.999))
decreasing_lr = list(map(int, args.decreasing_lr.split(',')))
img_size = 32
num_labels = 10
# os.environ["CUDA_LAUNCH_BLOCKING"]="1"
# Binary Cross Entropy loss
BCE_loss = nn.BCELoss()
# fixed_noise = torch.FloatTensor(64, nz, 1, 1).normal_(0, 1)
fixed_noise = torch.randn((64, 100)).view(-1, 100, 1, 1).cuda()
fixed_label = 0
first = True
def train(epoch):
model.train()
# D_train_loss = 0
# G_train_loss = 3
trg = 0
trd = 0
for batch_idx, (data, y_labels) in enumerate(train_loader):
global first
global fixed_noise
global fixed_label
if first:
global first
global fixed_noise
global fixed_label
first = False
fixed_label = y_labels.squeeze()[:64].type(torch.cuda.LongTensor)
assert fixed_label.shape == (64,)
print( "saving fixed_label!")
vutils.save_image(data[:64], '{}/{}2jointConfidencerealReference{}.png'.format(args.outf,args.dataset, epoch), normalize=True)
uniform_dist = torch.Tensor(data.size(0), args.num_classes).fill_((1. / args.num_classes)).cuda()
x_ = data.cuda()
assert x_[0, :, :, :].shape == (3, 32, 32)
# train discriminator D
mini_batch = x_.size()[0]
"""
D.zero_grad()
y_ = y_labels
y_real_ = torch.ones(mini_batch)
y_fake_ = torch.zeros(mini_batch)
y_real_, y_fake_ = Variable(y_real_.cuda()), Variable(y_fake_.cuda())
D_result = D(x_, y_).squeeze()
D_real_loss = BCE_loss(D_result, y_real_)
z_ = torch.randn((mini_batch, 100)).view(-1, 100, 1, 1).cuda()
y_ = (torch.rand(mini_batch, 1) * num_labels).type(torch.cuda.LongTensor).squeeze()
z_, y_ = Variable(z_.cuda()), Variable(y_.cuda())
G_result = G(z_, y_.squeeze())
D_result = D(G_result, y_).squeeze()
D_fake_loss = BCE_loss(D_result, y_fake_)
D_fake_score = D_result.data.mean()
D_train_loss = D_real_loss + D_fake_loss
D_train_loss.backward()
# D_losses.append(D_train_loss.item())
"""
# train generator G
G.zero_grad()
#z_ = torch.randn((mini_batch, 100)).view(-1, 100, 1, 1).cuda()
#y_ = (torch.rand(mini_batch, 1) * num_labels).type(torch.cuda.LongTensor).squeeze()
#z_, y_ = Variable(z_.cuda()), Variable(y_.cuda())
z_ = torch.randn((mini_batch, 100)).view(-1, 100, 1, 1).cuda()
y_ = (torch.rand(mini_batch, 1) * num_labels).type(torch.cuda.LongTensor).squeeze()
z_, y_ = Variable(z_.cuda()), Variable(y_.cuda())
G_result = G(z_, y_.squeeze())
#D_result = D(G_result, y_).squeeze()
#G_train_loss = BCE_loss(D_result, y_real_)
# minimize the true distribution
KL_fake_output = F.log_softmax(model(G_result))
errG_KL = F.kl_div(KL_fake_output, uniform_dist)*args.num_classes
#generator_loss = G_train_loss + args.beta * errG_KL # 12.0, .65, 0e-8
generator_loss = errG_KL # 12.0, .65, 0e-8
generator_loss.backward()
###########################
# (3) Update classifier #
###########################
# cross entropy loss
optimizer.zero_grad()
x_ = Variable(x_)
output = F.log_softmax(model(x_))
loss = F.nll_loss(output.cuda(), y_labels.type(torch.cuda.LongTensor).squeeze())
# KL divergence
####
# z_ = torch.randn((mini_batch, 100)).view(-1, 100, 1, 1).cuda()
# y_ = (torch.rand(mini_batch, 1) * num_labels).type(torch.cuda.LongTensor).squeeze()
# z_, y_ = Variable(z_.cuda()), Variable(y_.cuda())
G_result = G(z_, y_.squeeze())
# !!!#D_result = D(G_result, y_fill_).squeeze()
####
KL_fake_output = F.log_softmax(model(G_result))
KL_loss_fake = F.kl_div(KL_fake_output, uniform_dist) * args.num_classes
total_loss = loss + args.beta * KL_loss_fake
# total_loss = loss
total_loss.backward()
trg += 1
trd += 1
D_optimizer.step()
G_optimizer.step()
optimizer.step()
if batch_idx % args.log_interval == 0:
#print('Classification Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}, KL fake Loss: {:.6f}'.format(
# epoch, batch_idx * len(data), len(train_loader.dataset),
# 100. * batch_idx / len(train_loader), loss.data.item(), KL_loss_fake.data.item()))
print('Classification Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}, KL fake Loss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data.item(), KL_loss_fake.data.item()))
fake = G(fixed_noise, fixed_label)
vutils.save_image(fake.data, '{}/{}2jointConfidenceCDCgan_samples_epoch_{}.png'.format(args.outf,args.dataset, epoch), normalize=True)
def test(epoch):
model.eval()
test_loss = 0
correct = 0
total = 0
for data, target in test_loader:
total += data.size(0)
if args.cuda:
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
output = F.log_softmax(model(data))
target = target.type(
torch.LongTensor) # https://discuss.pytorch.org/t/runtimeerror-multi-target-not-supported-newbie/10216/4
if args.cuda:
output = output.cuda()
target = target.cuda()
target = torch.squeeze(target)
test_loss += F.nll_loss(output, target).data.item()
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).cpu().sum()
test_loss = test_loss
test_loss /= len(test_loader) # loss function already averages over batch size
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, total,
100. * correct / total))
for epoch in range(1, args.epochs + 1):
train(epoch)
test(epoch)
if epoch in decreasing_lr:
G_optimizer.param_groups[0]['lr'] *= args.droprate
D_optimizer.param_groups[0]['lr'] *= args.droprate
optimizer.param_groups[0]['lr'] *= args.droprate
if epoch % 20 == 0:
# do checkpointing
torch.save(G.state_dict(), '%s/2netG_epoch_%d.pth' % (args.outf, epoch))
torch.save(D.state_dict(), '%s/2netD_epoch_%d.pth' % (args.outf, epoch))
torch.save(model.state_dict(), '%s/2model_epoch_%d.pth' % (args.outf, epoch))
| 35.727869
| 146
| 0.626962
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,011
| 0.368083
|
836ce39bcde99da750d6f14d0f025be31157aa0f
| 302
|
py
|
Python
|
450/Nagendra/greedy/Swap and Maximize .py
|
Nagendracse1/Competitive-Programming
|
325e151b9259dbc31d331c8932def42e3ab09913
|
[
"MIT"
] | 3
|
2020-12-20T10:23:11.000Z
|
2021-06-16T10:34:18.000Z
|
450/Nagendra/greedy/Swap and Maximize .py
|
Spring-dot/Competitive-Programming
|
98add277a8b029710c749d1082de25c524e12408
|
[
"MIT"
] | null | null | null |
450/Nagendra/greedy/Swap and Maximize .py
|
Spring-dot/Competitive-Programming
|
98add277a8b029710c749d1082de25c524e12408
|
[
"MIT"
] | null | null | null |
#code https://practice.geeksforgeeks.org/problems/swap-and-maximize/0
for _ in range(int(input())):
n = int(input())
arr = list(map(int, input().split()))
arr.sort()
max = 0
for i in range(n//2):
max -= 2*arr[i]
max += 2*arr[n-i-1]
print(max)
| 21.571429
| 69
| 0.523179
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 69
| 0.228477
|
836d607bb4f413a3224c84e9b615d0b74908dbb0
| 2,304
|
py
|
Python
|
ga4stpg/tree/generate.py
|
GiliardGodoi/ppgi-stpg-gpx
|
2097b086111e1cde423031c9a9d58f45b2b96353
|
[
"MIT"
] | null | null | null |
ga4stpg/tree/generate.py
|
GiliardGodoi/ppgi-stpg-gpx
|
2097b086111e1cde423031c9a9d58f45b2b96353
|
[
"MIT"
] | 5
|
2021-01-26T17:28:32.000Z
|
2021-03-14T13:46:48.000Z
|
ga4stpg/tree/generate.py
|
GiliardGodoi/ppgi-stpg-gpx
|
2097b086111e1cde423031c9a9d58f45b2b96353
|
[
"MIT"
] | 1
|
2021-01-25T16:35:59.000Z
|
2021-01-25T16:35:59.000Z
|
from random import sample, shuffle
from ga4stpg.graph import UGraph
from ga4stpg.graph.disjointsets import DisjointSets
class GenerateBasedPrimRST:
def __init__(self, stpg):
self.stpg = stpg
def __call__(self):
result = UGraph()
terminals = self.stpg.terminals.copy()
GRAPH = self.stpg.graph
edges = set() # or is it better a list?
vi = sample(range(1, self.stpg.nro_nodes+1), k=1)[0]
terminals.discard(vi)
for w in GRAPH.adjacent_to(vi):
edges.add((vi, w))
while terminals and edges:
edge = sample(edges, k=1)[0] # need to ensure randomness
v, w = edge
if w not in result:
terminals.discard(w)
result.add_edge(v, w)
for u in GRAPH.adjacent_to(w):
if u not in result:
edges.add((w, u))
edges.remove(edge) # to remove from a list it can take O(n)
return result
class GenerateBasedKruskalRST:
def __init__(self, stpg):
self.stpg = stpg
def __call__(self):
result = UGraph()
done = DisjointSets()
edges = [(u, v) for u, v in self.stpg.graph.gen_undirect_edges()]
shuffle(edges)
for v in self.stpg.terminals:
done.make_set(v)
while edges and len(done.get_disjoint_sets()) > 1:
edge = edges.pop()
y, z = edge[0], edge[1]
if y not in done: done.make_set(y)
if z not in done: done.make_set(z)
if done.find(y) != done.find(z):
result.add(y, z)
done.union(y, z)
return result
class GenerateBasedRandomWalk:
def __init__(self, stpg):
self.stpg = stpg
def __call__(self):
GRAPH = self.stpg.graph
terminals = self.stpg.terminals.copy()
result = UGraph()
v = terminals.pop()
while terminals:
adjacents = GRAPH.adjacent_to(v, lazy=False)
u = sample(adjacents, k=1)[0]
if u not in result:
result.add_edge(v, u)
terminals.discard(u)
v = u
return result
| 27.105882
| 74
| 0.521267
| 2,163
| 0.938802
| 0
| 0
| 0
| 0
| 0
| 0
| 95
| 0.041233
|
83703f7128768cbf161b1e6803712f5beeb2dcb8
| 10,284
|
py
|
Python
|
Probabilistic_Matching.py
|
Data-Linkage/Rwandan_linkage
|
2c9e504b510f5ec54d207779e20b5ad491c4c5df
|
[
"MIT"
] | null | null | null |
Probabilistic_Matching.py
|
Data-Linkage/Rwandan_linkage
|
2c9e504b510f5ec54d207779e20b5ad491c4c5df
|
[
"MIT"
] | null | null | null |
Probabilistic_Matching.py
|
Data-Linkage/Rwandan_linkage
|
2c9e504b510f5ec54d207779e20b5ad491c4c5df
|
[
"MIT"
] | null | null | null |
import pandas as pd
import rapidfuzz
import math
import numpy as np
# ------------------------- #
# --------- DATA ---------- #
# ------------------------- #
# Read in mock census and PES data
CEN = pd.read_csv('Data/Mock_Rwanda_Data_Census.csv')
PES = pd.read_csv('Data/Mock_Rwanda_Data_Pes.csv')
# select needed columns
CEN = CEN[['id_indi_cen', 'firstnm_cen', 'lastnm_cen', 'age_cen', 'month_cen', 'year_cen', 'sex_cen', 'province_cen']]
PES = PES[['id_indi_pes', 'firstnm_pes', 'lastnm_pes', 'age_pes', 'month_pes', 'year_pes', 'sex_pes', 'province_pes']]
# ----------------------------- #
# --------- BLOCKING ---------- #
# ----------------------------- #
# Block on province geographic variable
BP1 = 'province'
# Combine
for i, BP in enumerate([BP1], 1):
if i == 1:
combined_blocks = PES.merge(CEN, left_on = BP + '_pes', right_on = BP + '_cen', how = 'inner').drop_duplicates(['id_indi_cen', 'id_indi_pes'])
print("1" + str(combined_blocks.count()))
# Count
len(combined_blocks) # 50042
# -------------------------------------------------- #
# --------------- AGREEMENT VECTORS ---------------- #
# -------------------------------------------------- #
# Agreement vector is created which is then inputted into the EM Algorithm.
# Set v1, v2,... vn as the agreement variables
# Select agreement variables
v1 = 'firstnm'
v2 = 'lastnm'
v3 = 'month'
v4 = 'year'
v5 = 'sex'
# All agreement variables used to calculate match weights & probabilities
all_variables = [v1, v2, v3, v4, v5]
# Variables using partial agreement (string similarity)
edit_distance_variables = [v1, v2]
dob_variables = [v3, v4]
remaining_variables = [v5]
# Cut off values for edit distance variables
cutoff_values = [0.45, 0.45]
# Replace NaN with blank spaces to assure the right data types for string similarity metrics
for variable in edit_distance_variables:
cen_var = variable+ '_cen'
pes_var = variable + '_pes'
combined_blocks[cen_var] = combined_blocks[cen_var].fillna("")
combined_blocks[pes_var] = combined_blocks[pes_var].fillna("")
def SLD(s,t):
# Computing the standardised levenshtein edit distance between two strings
# using the rapidfuzz string matching library for it's fast string comparisons
# Dividing result by 100 to return a score between 0 and 1
standardised = (rapidfuzz.string_metric.normalized_levenshtein(s, t)/100)
return standardised;
# Create forename/ last name Edit Distance score columns for all pairs
combined_blocks['firstnm_agreement'] = combined_blocks.apply(lambda x: SLD(x['firstnm_pes'], x['firstnm_cen']), axis=1)
combined_blocks['lastnm_agreement'] = combined_blocks.apply(lambda x: SLD(x['lastnm_pes'], x['lastnm_cen']), axis=1)
# --------------------------------------------------------- #
# ---------------- INITIAL M & U VALUES ------------------- #
# --------------------------------------------------------- #
# Read in M and U values
m_values = pd.read_csv('Data/m_values.csv')
u_values = pd.read_csv('Data/u_values.csv')
# Save individual M values from file
FN_M = m_values[m_values.variable == 'firstnm'].iloc[0][1]
SN_M = m_values[m_values.variable == 'lastnm'].iloc[0][1]
SEX_M = m_values[m_values.variable == 'sex'].iloc[0][1]
MONTH_M = m_values[m_values.variable == 'month'].iloc[0][1]
YEAR_M = m_values[m_values.variable == 'year'].iloc[0][1]
# Save individual U values from file
FN_U = u_values[u_values.variable == 'firstnm'].iloc[0][1]
SN_U = u_values[u_values.variable == 'lastnm'].iloc[0][1]
SEX_U = u_values[u_values.variable == 'sex'].iloc[0][1]
MONTH_U = u_values[u_values.variable == 'month'].iloc[0][1]
YEAR_U = u_values[u_values.variable == 'year'].iloc[0][1]
# Add M values to unlinked data
combined_blocks['firstnm_m'] = FN_M
combined_blocks['lastnm_m'] = SN_M
combined_blocks['sex_m'] = SEX_M
combined_blocks['month_m'] = MONTH_M
combined_blocks['year_m'] = YEAR_M
# Add U values to unlinked data
combined_blocks['firstnm_u'] = FN_U
combined_blocks['lastnm_u'] = SN_U
combined_blocks['sex_u'] = SEX_U
combined_blocks['month_u'] = MONTH_U
combined_blocks['year_u'] = YEAR_U
# Add Agreement / Disagreement Weights
for var in all_variables:
# apply calculations: agreement weight = log base 2 (m/u)
combined_blocks[var + "_agreement_weight"] = combined_blocks.apply(lambda x: (math.log2(x[var + "_m"] / x[var + "_u"])), axis = 1)
# disagreement weight = log base 2 ((1-m)/(1-u))
combined_blocks[var + "_disagreement_weight"] = combined_blocks.apply(lambda x: (math.log2((1 - x[var + "_m"]) / (1 - x[var + "_u"]))), axis = 1)
# show sample of agreement/disagreement weights calculated
print(combined_blocks[[var + "_m", var + "_u", var + "_agreement_weight", var + "_disagreement_weight"]].head(1))
'''
Alter the M and U values above (i.e. FN_M, FN_U etc. currently lines 100 - 112) to see the effect on variable agreement/disagreement weights
'''
# --------------------------------------------------- #
# ------------------ MATCH SCORES ------------------ #
# --------------------------------------------------- #
''' An agreement value between 0 and 1 is calculated for each agreeement variable '''
''' This is done for every candidate record pair '''
# --------------------------------------- #
# ------------- DOB SCORE -------------- #
# --------------------------------------- #
# Partial scores
combined_blocks['month_agreement'] = np.where(combined_blocks['month_pes'] == combined_blocks['month_cen'], 1/3, 0)
combined_blocks['year_agreement'] = np.where(combined_blocks['year_pes'] == combined_blocks['year_cen'], 1/2, 0)
# Compute final Score and drop extra score columns
dob_score_columns = ['month_agreement', 'year_agreement']
combined_blocks['DOB_agreement'] = combined_blocks[dob_score_columns].sum(axis=1)
# combined_blocks = combined_blocks.drop(dob_score_columns, axis = 1)
# ---------------------------------------- #
# ---------- PARTIAL CUT OFFS ------------ #
# ---------------------------------------- #
# All partial variables except DOB
for variable, cutoff in zip(edit_distance_variables, cutoff_values):
# If agreement below a certain level, set agreement to 0. Else, leave agreeement as it is
combined_blocks[variable + '_agreement'] = np.where(combined_blocks[variable + "_agreement"] <= cutoff, 0, combined_blocks[variable + "_agreement"])
# Remaining variables (no partial scores)
for variable in remaining_variables:
# Calculate 1/0 Agreement Score (no partial scoring)
combined_blocks[variable + '_agreement'] = np.where(combined_blocks[variable + "_cen"] == combined_blocks[variable + "_pes"], 1, 0)
# ------------------------------------------------------------------ #
# ------------------------- WEIGHTS ------------------------------- #
# ------------------------------------------------------------------ #
# Start by giving all records agreement weights
for variable in all_variables:
combined_blocks[variable + "_weight"] = combined_blocks[variable + "_agreement_weight"]
# Update for partial agreement / disagreement (only when agreement < 1)
# source: https://www.census.gov/content/dam/Census/library/working-papers/1991/adrm/rr91-9.pdf
# weight = Agreement_Weight if Agreement = 1, and
# MAX{(Agreement_Weight - (Agreement_Weight - Disgreement_Weight)*(1-Agreement)*(9/2)), Disgreement_Weight} if 0 <= Agreement < 1.
for variable in all_variables:
combined_blocks[variable + "_weight"] = np.where(combined_blocks[variable + "_agreement"] < 1,
np.maximum(((combined_blocks[variable + "_agreement_weight"]) -
((combined_blocks[variable + "_agreement_weight"] - combined_blocks[variable + "_disagreement_weight"]) *
(1 - combined_blocks[variable + "_agreement"]) * (9/2))),
combined_blocks[variable + "_disagreement_weight"]),
combined_blocks[variable + "_weight"])
# Set weights to 0 (instead of disagreement_weight) if there is missingess in PES or CEN variable (agreement == 0 condition needed for DOB)
for variable in all_variables:
combined_blocks[variable + "_weight"] = np.where(combined_blocks[variable + '_pes'].isnull() | combined_blocks[variable + '_cen'].isnull() &
(combined_blocks[variable + '_agreement'] == 0), 0,
combined_blocks[variable + '_weight'])
# Sum column wise across the above columns - create match score
combined_blocks["match_score"] = combined_blocks[['firstnm_weight', 'lastnm_weight', 'month_weight', 'year_weight', 'sex_weight']].sum(axis=1)
# ------------------------------------------------------------------ #
# ----------------------- ADJUSTMENTS ----------------------------- #
# ------------------------------------------------------------------ #
# To reduce false matches going to clerical, if ages are dissimilar set score to 0
combined_blocks['match_score'] = np.where((combined_blocks['age_pes'].notnull() == False) &
combined_blocks['age_cen'].notnull() &
(combined_blocks['age_pes'] - combined_blocks['age_cen'] > 5),
0, combined_blocks['match_score'])
''' let's view some example clusters produced to check if the scores assigned are sensible'''
# high-scoring candidate record pairs
cen_vars = [s + '_cen' for s in all_variables]
pes_vars = [s + '_pes' for s in all_variables]
display(combined_blocks[cen_vars + pes_vars + ['match_score']].sort_values(by=['match_score'], ascending=False).head(50))
# and low-scoring candidate pairs
display(combined_blocks[cen_vars + pes_vars + ['match_score']].sort_values(by=['match_score']).head(50))
# -------------------------------------- #
# -------------- SAVE ----------------- #
# -------------------------------------- #
combined_blocks.to_csv('Data/Probabilistic_Scores.csv')
| 46.533937
| 160
| 0.587417
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,533
| 0.53802
|