content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# -*- coding: utf-8 -*-
"""Tests for the key interactiveshell module.
Authors
-------
* Julian Taylor
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib
import sys
import types
import unittest
from IPython.core.inputtransformer import InputTransformer
from IPython.testing.decorators import skipif
from IPython.utils import py3compat
from IPython.testing import tools as tt
# Decorator for interaction loop tests -----------------------------------
class mock_input_helper(object):
"""Machinery for tests of the main interact loop.
Used by the mock_input decorator.
"""
def mock_input(testfunc):
"""Decorator for tests of the main interact loop.
Write the test as a generator, yield-ing the input strings, which IPython
will see as if they were typed in at the prompt.
"""
return test_method
# Test classes -----------------------------------------------------------
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
51,
3558,
329,
262,
1994,
9427,
1083,
12758,
8265,
13,
198,
198,
30515,
669,
198,
26866,
198,
9,
18322,
8121,
198,
37811,
198,
2,
10097,
32501,
198,
2,
220,
150... | 4.388013 | 317 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Definitions for modified MobileNet models used in LSTD."""
import tensorflow as tf
from nets import mobilenet_v1
from nets.mobilenet import conv_blocks as mobilenet_convs
from nets.mobilenet import mobilenet
slim = tf.contrib.slim
def mobilenet_v1_lite_def(depth_multiplier, low_res=False):
"""Conv definitions for a lite MobileNet v1 model.
Args:
depth_multiplier: float depth multiplier for MobileNet.
low_res: An option of low-res conv input for interleave model.
Returns:
Array of convolutions.
Raises:
ValueError: On invalid channels with provided depth multiplier.
"""
conv = mobilenet_v1.Conv
sep_conv = mobilenet_v1.DepthSepConv
return [
conv(kernel=[3, 3], stride=2, depth=32),
sep_conv(kernel=[3, 3], stride=1, depth=64),
sep_conv(kernel=[3, 3], stride=2, depth=128),
sep_conv(kernel=[3, 3], stride=1, depth=128),
sep_conv(kernel=[3, 3], stride=2, depth=256),
sep_conv(kernel=[3, 3], stride=1, depth=256),
sep_conv(kernel=[3, 3], stride=2, depth=512),
sep_conv(kernel=[3, 3], stride=1, depth=512),
sep_conv(kernel=[3, 3], stride=1, depth=512),
sep_conv(kernel=[3, 3], stride=1, depth=512),
sep_conv(kernel=[3, 3], stride=1, depth=512),
sep_conv(kernel=[3, 3], stride=1, depth=512),
sep_conv(kernel=[3, 3], stride=1 if low_res else 2, depth=1024),
sep_conv(
kernel=[3, 3],
stride=1,
depth=int(_find_target_depth(1024, depth_multiplier)))
]
def mobilenet_v2_lite_def(reduced=False, is_quantized=False, low_res=False):
"""Conv definitions for a lite MobileNet v2 model.
Args:
reduced: Determines the scaling factor for expanded conv. If True, a factor
of 6 is used. If False, a factor of 3 is used.
is_quantized: Whether the model is trained in quantized mode.
low_res: Whether the input to the model is of half resolution.
Returns:
Array of convolutions.
"""
expanded_conv = mobilenet_convs.expanded_conv
expand_input = mobilenet_convs.expand_input_by_factor
op = mobilenet.op
return dict(
defaults={
# Note: these parameters of batch norm affect the architecture
# that's why they are here and not in training_scope.
(slim.batch_norm,): {
'center': True,
'scale': True
},
(slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
'normalizer_fn': slim.batch_norm,
'activation_fn': tf.nn.relu6
},
(expanded_conv,): {
'expansion_size': expand_input(6),
'split_expansion': 1,
'normalizer_fn': slim.batch_norm,
'residual': True
},
(slim.conv2d, slim.separable_conv2d): {
'padding': 'SAME'
}
},
spec=[
op(slim.conv2d, stride=2, num_outputs=32, kernel_size=[3, 3]),
op(expanded_conv,
expansion_size=expand_input(1, divisible_by=1),
num_outputs=16),
op(expanded_conv,
expansion_size=(expand_input(3, divisible_by=1)
if reduced else expand_input(6)),
stride=2,
num_outputs=24),
op(expanded_conv,
expansion_size=(expand_input(3, divisible_by=1)
if reduced else expand_input(6)),
stride=1,
num_outputs=24),
op(expanded_conv, stride=2, num_outputs=32),
op(expanded_conv, stride=1, num_outputs=32),
op(expanded_conv, stride=1, num_outputs=32),
op(expanded_conv, stride=2, num_outputs=64),
op(expanded_conv, stride=1, num_outputs=64),
op(expanded_conv, stride=1, num_outputs=64),
op(expanded_conv, stride=1, num_outputs=64),
op(expanded_conv, stride=1, num_outputs=96),
op(expanded_conv, stride=1, num_outputs=96),
op(expanded_conv, stride=1, num_outputs=96),
op(expanded_conv, stride=1 if low_res else 2, num_outputs=160),
op(expanded_conv, stride=1, num_outputs=160),
op(expanded_conv, stride=1, num_outputs=160),
op(expanded_conv,
stride=1,
num_outputs=320,
project_activation_fn=(tf.nn.relu6
if is_quantized else tf.identity))
],
)
| [
2,
15069,
13130,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
201,
198,
2,
201,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
201,
198,
2,
345,
743,
407,
779,
428,
2393,
... | 2.182728 | 2,397 |
import sys
sys.path.append("../pipeline")
import mysql.connector
import pickle
import argparse
import json
import itertools
from collections import defaultdict,Counter
from collections.abc import Iterable
import numpy as np
import time
import os
from scipy import stats
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.decomposition import TruncatedSVD
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Prepare data for active learning')
#parser.add_argument('--db',required=True,type=str,help='JSON with database settings')
parser.add_argument('--inDir',required=True,type=str,help='Output dir to put matrices')
parser.add_argument('--negThreshold',required=False,default=0.3,type=float,help='Threshold below which is a confident negative (default=0.25)')
parser.add_argument('--posThreshold',required=False,default=0.7,type=float,help='Threshold above which is a confident positive (default=0.75)')
#parser.add_argument('--outFile',required=True,type=str,help='Output file')
args = parser.parse_args()
X_annotated = np.load(os.path.join(args.inDir,'X_annotated.npy'))
y_annotated = np.load(os.path.join(args.inDir,'y_annotated.npy'))
X_undecided = np.load(os.path.join(args.inDir,'X_undecided.npy'))
undecided_scores = np.load(os.path.join(args.inDir,'undecided_scores.npy'))
with open(os.path.join(args.inDir,'undecided_docs.pickle'),'rb') as f:
undecided_docs = pickle.load(f)
if False:
with open(args.db) as f:
database = json.load(f)
mydb = mysql.connector.connect(
host=database['host'],
user=database['user'],
passwd=database['passwd'],
database=database['database']
)
mycursor = mydb.cursor()
#loadDocumentIDMapping(mycursor,undecided_docs)
#baselineConfNumber = getConfidenceNumbers(X_annotated,y_annotated[:,args.label_index],X_undecided,args.posThreshold,args.negThreshold)
#print("baselineConfNumber=",baselineConfNumber)
#outcomes = searchForBestDocumentToAnnotate(X_annotated,y_annotated,X_undecided,args.posThreshold)
current_y = np.copy(y_annotated)
current_train_X = np.copy(X_annotated)
current_unknown_X = np.copy(X_undecided)
num_iter = current_unknown_X.shape[0]
prev_done = []
start_time = time.time()
for i in range(num_iter):
multi_scores = getMultiScores(current_train_X, current_y, current_unknown_X)
np.savetxt('multi_scores_%04d.csv' % i, multi_scores, delimiter=',', fmt="%f")
min_scores = multi_scores.min(axis=1)
min_score_percentiles = stats.rankdata(min_scores,"average") / min_scores.shape[0]
#print(min_score_percentiles.shape)
#print(min_score_percentiles[409])
current_outcomes = searchForBestDocumentToAnnotate(current_train_X,current_y,current_unknown_X,args.posThreshold,show_time=False)
for j in prev_done:
current_outcomes[j,:] = -1
np.savetxt('current_outcomes_%04d.csv' % i, current_outcomes, delimiter=',', fmt="%d")
best_doc_change = current_outcomes.min(axis=1).max()
best_doc_index = current_outcomes.min(axis=1).argmax()
best_min_score_percentile = min_score_percentiles[best_doc_index]
print("# best_doc_index=%d, best_doc_change=%d, train_size=%d" % (best_doc_index,best_doc_change,current_train_X.shape[0]))
print("# best_min_score_percentile = %f" % best_min_score_percentile)
which_label_was_min = current_outcomes[best_doc_index,:].argmin()
label_score_percentiles = stats.rankdata(multi_scores[:,which_label_was_min],"average") / multi_scores.shape[0]
label_score_percentile_for_doc = label_score_percentiles[best_doc_index]
num_where_label_was_min = (current_outcomes.min(axis=1) == current_outcomes[:,which_label_was_min]).sum()
print("which_label_was_min = %d" % which_label_was_min)
print("num_where_label_was_min = %d/%d (%.1f%%)" % (num_where_label_was_min,current_outcomes.shape[0],100*num_where_label_was_min/current_outcomes.shape[0]))
print("label_score_percentile_for_doc = %f" % label_score_percentile_for_doc)
prev_done.append(best_doc_index)
current_train_X = np.vstack([current_train_X,current_unknown_X[best_doc_index,:]])
#current_unknown_X = np.delete(current_unknown_X,best_doc_index,0)
current_y = np.vstack([current_y,np.zeros((1,current_y.shape[1]))])
current_y[current_y.shape[0]-1,current_outcomes[best_doc_index,:].argmax()] = 1
outputTimeEstimates(i,num_iter,start_time)
#break
np.savetxt('undecided_scores.csv', undecided_scores, delimiter=',', fmt="%f")
| [
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
7203,
40720,
79,
541,
4470,
4943,
198,
198,
11748,
48761,
13,
8443,
273,
198,
11748,
2298,
293,
198,
11748,
1822,
29572,
198,
11748,
33918,
198,
11748,
340,
861,
10141,
198,
6738,
17268,
13... | 2.582307 | 1,786 |
import string
import PIL.Image
from .printable import Printable
class _ImageHandler:
"""Convert PIL images to ZPL
Based on Java example from:
http://www.jcgonzalez.com/java-image-to-zpl-example
"""
@staticmethod
@staticmethod
@property
@property
| [
11748,
4731,
198,
11748,
350,
4146,
13,
5159,
198,
198,
6738,
764,
4798,
540,
1330,
12578,
540,
628,
198,
4871,
4808,
5159,
25060,
25,
198,
220,
220,
220,
37227,
3103,
1851,
350,
4146,
4263,
284,
1168,
6489,
628,
220,
220,
220,
13403,... | 2.759615 | 104 |
from .context import uspto
import unittest
class AdvancedTestSuite(unittest.TestCase):
"""Advanced test cases."""
print "here we go"
if __name__ == '__main__':
unittest.main()
| [
6738,
764,
22866,
1330,
514,
457,
78,
198,
198,
11748,
555,
715,
395,
198,
198,
4871,
13435,
14402,
5606,
578,
7,
403,
715,
395,
13,
14402,
20448,
2599,
198,
220,
220,
220,
37227,
28809,
1332,
2663,
526,
15931,
198,
220,
220,
220,
3... | 2.690141 | 71 |
import discord
from discord.ext import commands
| [
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729,
628
] | 4.9 | 10 |
import logging
from playhouse import migrate
from alcazar_logging import BraceAdapter
logger = BraceAdapter(logging.getLogger(__name__))
| [
11748,
18931,
198,
198,
6738,
711,
4803,
1330,
32492,
198,
198,
6738,
435,
66,
29413,
62,
6404,
2667,
1330,
1709,
558,
47307,
198,
198,
6404,
1362,
796,
1709,
558,
47307,
7,
6404,
2667,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
4008,
... | 3.2 | 45 |
from uuid import UUID
from aio_pika import Exchange
from tentacruel import HeosClientProtocol
HEOS_NS = UUID('003df636-ad90-11e9-aca1-9eb6d06a70c5')
attributes = {
"/player_volume_changed": {
"device_id": "pid",
"name": "heos.volume",
"subattributes": ["level", "mute"]
},
"/player_now_playing_progress": {
"device_id": "pid",
"name": "heos.progress",
"subattributes": ["cur_pos", "duration"]
}
}
| [
6738,
334,
27112,
1330,
471,
27586,
198,
198,
6738,
257,
952,
62,
79,
9232,
1330,
12516,
198,
6738,
11105,
330,
622,
417,
1330,
679,
418,
11792,
19703,
4668,
198,
198,
13909,
2640,
62,
8035,
796,
471,
27586,
10786,
11245,
7568,
21,
26... | 2.163551 | 214 |
import sys
import cPickle as pickle
from collections import OrderedDict
argv = sys.argv[1:]
if len(argv) < 1:
print "usage: create_span_concept_dict.py <span_concept_dataset.p> <output_filename>"
sys.exit()
span_concept_dataset = pickle.load(open(argv[0], "rb"))
output_filename = argv[1]
output_file = open(output_filename, 'w')
span_concept_dict = {}
for id, span_concept_data in span_concept_dataset.iteritems():
for [span, pos, concept, name, ner, nx_root, concept_idx] in span_concept_data:
if span_concept_dict.has_key(span):
if span_concept_dict[span].has_key(concept_idx):
span_concept_dict[span][concept_idx] += 1
else:
span_concept_dict[span][concept_idx] = 1
else:
span_concept_dict[span] = {concept_idx:1}
#Sort the concepts for each span by their frequency
for span, concepts in span_concept_dict.iteritems():
span_concept_dict[span] = OrderedDict(sorted(concepts.items(), key=lambda concepts: concepts[1], reverse=True))
for span, concepts in span_concept_dict.iteritems():
line = span.replace(" ", "_") + " "
for (concept_idx, count) in concepts.iteritems():
line += str(concept_idx) + ":" + str(count) + " "
output_file.write(line+"\n")
pickle.dump(span_concept_dict, open(output_filename + ".p", "wb"))
| [
11748,
25064,
198,
11748,
269,
31686,
293,
355,
2298,
293,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
853,
85,
796,
25064,
13,
853,
85,
58,
16,
47715,
198,
361,
18896,
7,
853,
85,
8,
1279,
352,
25,
198,
197,
4798,
366... | 2.663136 | 472 |
# -*- coding: utf-8 -*-
from queue import Queue
import random
import socket
import threading
import unittest
from coapclient import HelperClient
from coapforwardproxy import CoAPForwardProxy
from coapserver import CoAPServer
from coapthon import defines
from coapthon.messages.option import Option
from coapthon.messages.request import Request
from coapthon.messages.response import Response
from coapthon.serializer import Serializer
__author__ = 'Giacomo Tanganelli'
__version__ = "2.0"
if __name__ == '__main__':
unittest.main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
16834,
1330,
4670,
518,
198,
11748,
4738,
198,
11748,
17802,
198,
11748,
4704,
278,
198,
11748,
555,
715,
395,
198,
198,
6738,
763,
499,
16366,
1330,
5053,
52... | 3.151163 | 172 |
from datetime import datetime
import pandas as pd
import websocket
from tests import tests_path
from crypto_data.shared.utils import exclude_values
from crypto_data.binance.extract import get_candles, get_latest_candle_timestamp
from crypto_data.binance.schema import (
OPEN_TIME,
OPEN_PRICE,
CLOSE_PRICE,
HIGH_PRICE,
LOW_PRICE,
VOLUME,
COLUMNS,
)
from crypto_data.shared.candle_db import CandleDB
# candle_stream(
# symbol="btcusdt",
# interval="1h",
# candles=candles,
# on_open=on_open,
# on_close=on_close,
# on_candle=on_candle,
# on_candle_close=on_candle_close,
# )
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2639,
5459,
198,
198,
6738,
5254,
1330,
5254,
62,
6978,
198,
6738,
21473,
62,
7890,
13,
28710,
13,
26791,
1330,
19607,
62,
27160,
198,
6738,
21... | 2.261905 | 294 |
import numpy as np
from NiLBS.skinning.util import redistribute_weights
| [
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
11556,
43,
4462,
13,
20407,
768,
13,
22602,
1330,
17678,
4163,
62,
43775,
198
] | 3.217391 | 23 |
import cv2
import youtube_dl
import numpy as np
import os
import time
FLASH_MINIMUM = 3
tmp_dir = 'temp/'
ex = {'format': 'worstvideo[vcodec^=avc1][fps=30]/worst[vcodec^=avc1][fps=30]/worstvideo[vcodec=vp9][fps=30]/worst[vcodec=vp9][fps=30]', 'outtmpl': 'temp/temp.%(ext)s', 'recode_video': 'webm'}
ytdl = youtube_dl.YoutubeDL(ex)
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
# https://www.youtube.com/watch?v=atkD-beZ9oI # baseline test
# https://www.youtube.com/watch?v=Yw_YDvLWKnY # surreal video
# https://www.youtube.com/watch?v=OCpzajWSp6I # mlg video
# https://www.youtube.com/watch?v=FVY5uZ18-x8 #pokemon video
if __name__ == '__main__':
main()
| [
11748,
269,
85,
17,
198,
11748,
35116,
62,
25404,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
640,
198,
198,
3697,
11211,
62,
23678,
3955,
5883,
796,
513,
198,
198,
22065,
62,
15908,
796,
705,
29510,
14,
6,
198,... | 2.179487 | 312 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition of the Inception V4 architecture.
As described in http://arxiv.org/abs/1602.07261.
Inception-v4, Inception-ResNet and the Impact of Residual Connections
on Learning
Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, Alex Alemi
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import inception_utils
slim = tf.contrib.slim
def block_inception_a(inputs, scope=None, reuse=None):
"""Builds Inception-A block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionA', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 96, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 96, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 96, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
def block_reduction_a(inputs, scope=None, reuse=None):
"""Builds Reduction-A block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockReductionA', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 384, [3, 3], stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, 256, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
return tf.concat([branch_0, branch_1, branch_2], 3)
def block_inception_b(inputs, scope=None, reuse=None):
"""Builds Inception-B block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionB', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 256, [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 192, [7, 1], scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, 224, [1, 7], scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, 224, [7, 1], scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, 256, [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
def block_reduction_b(inputs, scope=None, reuse=None):
"""Builds Reduction-B block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockReductionB', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, 192, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 256, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 320, [7, 1], scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
return tf.concat([branch_0, branch_1, branch_2], 3)
def block_inception_c(inputs, scope=None, reuse=None):
"""Builds Inception-C block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionC', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat([
slim.conv2d(branch_1, 256, [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, 256, [3, 1], scope='Conv2d_0c_3x1')], 3)
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 448, [3, 1], scope='Conv2d_0b_3x1')
branch_2 = slim.conv2d(branch_2, 512, [1, 3], scope='Conv2d_0c_1x3')
branch_2 = tf.concat([
slim.conv2d(branch_2, 256, [1, 3], scope='Conv2d_0d_1x3'),
slim.conv2d(branch_2, 256, [3, 1], scope='Conv2d_0e_3x1')], 3)
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 256, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
def inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None):
"""Creates the Inception V4 network up to the given final endpoint.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
final_endpoint: specifies the endpoint to construct the network up to.
It can be one of [ 'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'Mixed_3a', 'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e',
'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c',
'Mixed_7d']
scope: Optional variable_scope.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionV4', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 299 x 299 x 3
net = slim.conv2d(inputs, 32, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
if add_and_check_final('Conv2d_1a_3x3', net): return net, end_points
# 149 x 149 x 32
net = slim.conv2d(net, 32, [3, 3], padding='VALID',
scope='Conv2d_2a_3x3')
if add_and_check_final('Conv2d_2a_3x3', net): return net, end_points
# 147 x 147 x 32
net = slim.conv2d(net, 64, [3, 3], scope='Conv2d_2b_3x3')
if add_and_check_final('Conv2d_2b_3x3', net): return net, end_points
# 147 x 147 x 64
with tf.variable_scope('Mixed_3a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_0a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [3, 3], stride=2, padding='VALID',
scope='Conv2d_0a_3x3')
net = tf.concat([branch_0, branch_1], 3)
if add_and_check_final('Mixed_3a', net): return net, end_points
# 73 x 73 x 160
with tf.variable_scope('Mixed_4a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, 96, [3, 3], padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 64, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 64, [7, 1], scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, 96, [3, 3], padding='VALID',
scope='Conv2d_1a_3x3')
net = tf.concat([branch_0, branch_1], 3)
if add_and_check_final('Mixed_4a', net): return net, end_points
# 71 x 71 x 192
with tf.variable_scope('Mixed_5a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [3, 3], stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat([branch_0, branch_1], 3)
if add_and_check_final('Mixed_5a', net): return net, end_points
# 35 x 35 x 384
# 4 x Inception-A blocks
for idx in range(4):
block_scope = 'Mixed_5' + chr(ord('b') + idx)
net = block_inception_a(net, block_scope)
if add_and_check_final(block_scope, net): return net, end_points
# 35 x 35 x 384
# Reduction-A block
net = block_reduction_a(net, 'Mixed_6a')
if add_and_check_final('Mixed_6a', net): return net, end_points
# 17 x 17 x 1024
# 7 x Inception-B blocks
for idx in range(7):
block_scope = 'Mixed_6' + chr(ord('b') + idx)
net = block_inception_b(net, block_scope)
if add_and_check_final(block_scope, net): return net, end_points
# 17 x 17 x 1024
# Reduction-B block
net = block_reduction_b(net, 'Mixed_7a')
if add_and_check_final('Mixed_7a', net): return net, end_points
# 8 x 8 x 1536
# 3 x Inception-C blocks
for idx in range(3):
block_scope = 'Mixed_7' + chr(ord('b') + idx)
net = block_inception_c(net, block_scope)
if add_and_check_final(block_scope, net): return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v4(inputs, num_classes=1001, is_training=True,
dropout_keep_prob=0.8,
reuse=None,
scope='InceptionV4',
create_aux_logits=True):
"""Creates the Inception V4 model.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: float, the fraction to keep before final layer.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
create_aux_logits: Whether to include the auxilliary logits.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionV4', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v4_base(inputs, scope=scope)
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# Auxiliary Head logits
if create_aux_logits:
with tf.variable_scope('AuxLogits'):
# 17 x 17 x 1024
aux_logits = end_points['Mixed_6h']
aux_logits = slim.avg_pool2d(aux_logits, [5, 5], stride=3,
padding='VALID',
scope='AvgPool_1a_5x5')
aux_logits = slim.conv2d(aux_logits, 128, [1, 1],
scope='Conv2d_1b_1x1')
aux_logits = slim.conv2d(aux_logits, 768,
aux_logits.get_shape()[1:3],
padding='VALID', scope='Conv2d_2a')
aux_logits = slim.flatten(aux_logits)
aux_logits = slim.fully_connected(aux_logits, num_classes,
activation_fn=None,
scope='Aux_logits')
end_points['AuxLogits'] = aux_logits
# Final pooling and prediction
with tf.variable_scope('Logits'):
# 8 x 8 x 1536
net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID',
scope='AvgPool_1a')
# 1 x 1 x 1536
net = slim.dropout(net, dropout_keep_prob, scope='Dropout_1b')
net = slim.flatten(net, scope='PreLogitsFlatten')
end_points['PreLogitsFlatten'] = net
# 1536
logits = slim.fully_connected(net, num_classes, activation_fn=None,
scope='Logits')
end_points['Logits'] = logits
end_points['Predictions'] = tf.nn.softmax(logits, name='Predictions')
return logits, end_points
inception_v4.default_image_size = 299
inception_v4_arg_scope = inception_utils.inception_arg_scope
| [
2,
15069,
1584,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 2.030542 | 7,596 |
from queue import Queue
from queue import Empty
from threading import Thread
from pyopentsdb import errors
from pyopentsdb.utils import request_post
from pyopentsdb.conf import QueryPointer
class IterableQueue(object):
""" Transform standard python Queue instance to iterable one"""
def __init__(self, source_queue):
"""
:param source_queue: queue.Queue, (mandatory)
"""
self.source_queue = source_queue
def tsdb_query_metrics_validation(**kwargs):
"""
looking for metric and all related and required arguments in kwargs specified in OpenTSDB http api
:param kwargs: dict
:return:
"""
# tsdb query kwargs have to contain 'metrics' argument
if not kwargs.get('metrics'):
raise errors.MissingArgumentError("Missing argument 'metrics' in query")
# metrics can contain more than one metric in list
for metric_object in kwargs['metrics']:
# each metric in metrics has to specify aggregator function
if not metric_object.get('metric') or not metric_object.get('aggregator'):
raise errors.MissingArgumentError("Missing argument 'metric' or 'aggregator' in metrics object")
# each metric can contain filters
if metric_object.get('filters'):
for metric_filter in metric_object['filters']:
# if filter is presented , it has contain 'type', 'tagk' and 'filter' (filter definition)
if not metric_filter.get('type') or not metric_filter.get('tagk') or \
metric_filter.get('filter') is None:
raise errors.MissingArgumentError(
"Missing argument 'type', 'tagk' or 'filter' in filters object")
def query(host, r_session, **kwargs):
"""
:param host: str
:param r_session: requests.Session
:param kwargs: dict
:return: dict
"""
# todo: make sure kwargs of tsdb are not colliding kwargs of requests
try:
start = kwargs.pop('start')
except KeyError:
raise errors.MissingArgumentError("'start' is a required argument")
try:
tsdb_query_metrics_validation(**kwargs)
except errors.MissingArgumentError as e:
raise errors.MissingArgumentError(str(e))
# general driven arguments
end = kwargs.pop('end', None)
ms_resolution = bool(kwargs.pop('ms', False))
show_tsuids = bool(kwargs.pop('show_tsuids', False))
no_annotations = bool(kwargs.pop('no_annotations', False))
global_annotations = bool(kwargs.pop('global_annotations', False))
show_summary = bool(kwargs.pop('show_summary', False))
show_stats = bool(kwargs.pop('show_stats', False))
show_query = bool(kwargs.pop('show_query', False))
delete_match = bool(kwargs.pop('delete', False))
timezone = kwargs.pop('timezone', 'UTC')
use_calendar = bool(kwargs.pop('use_calendar', False))
queries = kwargs.pop('metrics')
params = {
'start': '{}'.format(int(start.timestamp())),
'msResolution': ms_resolution,
'showTSUIDs': show_tsuids,
'noAnnotations': no_annotations,
'globalAnnotations': global_annotations,
'showSummary': show_summary,
'showStats': show_stats,
'showQuery': show_query,
'delete': delete_match,
'timezone': timezone,
'useCalendar': use_calendar,
'queries': list(),
}
if end:
params.update({'end': int(end.timestamp())})
params.update({'queries': queries})
kwargs.update(dict(data=params))
return request_post(api_url(host, pointer=QueryPointer.QUERY), r_session, **kwargs)
def multiquery(host, r_session, query_chunks, max_tsdb_concurrency=40, **kwargs):
"""
OpenTSDB /api/query/ concurrency wrapper
:param host: str (mandatory); OpenTSDB host
:param r_session: requests.Session
:param query_chunks: list (mandatory); list of json serializable dicts representing OpenTSDB query
:param max_tsdb_concurrency: int (optional), default=40; maximum number of concurrency
threads hitting OpenTSDB api
:return: dict; json serializable
"""
__WORKER_RUN__ = True
# todo: optimize, in case one of worker fail, terminate execution
n_threads = min(len(query_chunks), max_tsdb_concurrency)
query_queue = Queue(maxsize=len(query_chunks) + n_threads)
result_queue = Queue(maxsize=len(query_chunks) + n_threads)
error_queue = Queue()
threads = list()
try:
for q in query_chunks:
# valiate all queries in query_chunks
tsdb_query_metrics_validation(**q)
# add query kwargs to queue for future execution in threads
query_queue.put(q)
for _ in range(n_threads):
query_queue.put("TERMINATOR")
for _ in range(n_threads):
t = Thread(target=tsdb_worker)
threads.append(t)
t.daemon = True
t.start()
for t in threads:
t.join()
except KeyboardInterrupt:
raise
finally:
__WORKER_RUN__ = False
if not error_queue.empty():
# if not empty, error_queue has to contain exception from tsdb_worker
raise error_queue.get()
if result_queue.qsize() != len(query_chunks):
# this statement is probably not necessary
raise errors.TsdbError("Number of queries and responses is not the same")
# make sure any other kind of response code won't be propagated to this place and will be catched and processed
# in previous part of code
return sum([val for val in IterableQueue(result_queue)], list())
| [
6738,
16834,
1330,
4670,
518,
198,
6738,
16834,
1330,
33523,
198,
6738,
4704,
278,
1330,
14122,
198,
198,
6738,
12972,
404,
658,
9945,
1330,
8563,
198,
6738,
12972,
404,
658,
9945,
13,
26791,
1330,
2581,
62,
7353,
198,
6738,
12972,
404,... | 2.509267 | 2,266 |
import numpy as np
import torch
import torch.nn.functional as F
import sparseconvnet as scn
import data_util
UNK_THRESH = 2
#UNK_THRESH = 3
UNK_ID = -1
# note: weight_missing_geo must be > 1
# hierarchical loss
| [
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
198,
11748,
29877,
42946,
3262,
355,
629,
77,
198,
198,
11748,
1366,
62,
22602,
198,
198,
4944,
42,
62,
4221,
19535,
39,
79... | 2.611765 | 85 |
r"""Integrator functions used when no closed forms are available.
Lead author: Nicolas Guigui.
These are designed for first order ODE written of a variable x and a time
variable t:
.. math::
\frac{dx}{dt} = force(x, t)
where :math: `x` is called the state variable. It may represent many
variables by stacking arrays, e.g. position and velocity in a geodesic
equation.
"""
from geomstats.errors import check_parameter_accepted_values
STEP_FUNCTIONS = {
"euler": "euler_step",
"symp_euler": "symplectic_euler_step",
"leapfrog": "leapfrog_step",
"rk4": "rk4_step",
"rk2": "rk2_step",
}
def euler_step(force, state, time, dt):
"""Compute one step of the euler approximation.
Parameters
----------
force : callable
Vector field that is being integrated.
state : array-like, shape=[2, dim]
State at time t, corresponds to position and velocity variables at
time t.
time : float
Time variable.
dt : float
Time-step in the integration.
Returns
-------
point_new : array-like, shape=[,,,, {dim, [n, n]}]
First variable at time t + dt.
vector_new : array-like, shape=[,,,, {dim, [n, n]}]
Second variable at time t + dt.
"""
derivatives = force(state, time)
new_state = state + derivatives * dt
return new_state
def symplectic_euler_step(force, state, time, dt):
"""Compute one step of the symplectic euler approximation.
Parameters
----------
state : array-like, shape=[2, dim]
State at time t, corresponds to position and velocity variables at
time t.
force : callable
Vector field that is being integrated.
time : float
Time variable.
dt : float
Time-step in the integration.
Returns
-------
point_new : array-like, shape=[,,,, {dim, [n, n]}]
First variable at time t + dt.
vector_new : array-like, shape=[,,,, {dim, [n, n]}]
Second variable at time t + dt.
"""
raise NotImplementedError
def leapfrog_step(force, state, time, dt):
"""Compute one step of the leapfrog approximation.
Parameters
----------
state : array-like, shape=[2, dim]
State at time t, corresponds to position and velocity variables at
time t.
force : callable
Vector field that is being integrated.
time : float
Time variable.
dt : float
Time-step in the integration.
Returns
-------
point_new : array-like, shape=[,,,, {dim, [n, n]}]
First variable at time t + dt.
vector_new : array-like, shape=[,,,, {dim, [n, n]}]
Second variable at time t + dt.
"""
raise NotImplementedError
def rk2_step(force, state, time, dt):
"""Compute one step of the rk2 approximation.
Parameters
----------
force : callable
Vector field that is being integrated.
state : array-like, shape=[2, dim]
State at time t, corresponds to position and velocity variables at
time t.
time : float
Time variable.
dt : float
Time-step in the integration.
Returns
-------
point_new : array-like, shape=[,,,, {dim, [n, n]}]
First variable at time t + dt.
vector_new : array-like, shape=[,,,, {dim, [n, n]}]
Second variable at time t + dt.
See Also
--------
https://en.wikipedia.org/wiki/Runge–Kutta_methods
"""
k1 = force(state, time)
k2 = force(state + dt / 2 * k1, time + dt / 2)
new_state = state + dt * k2
return new_state
def rk4_step(force, state, time, dt):
"""Compute one step of the rk4 approximation.
Parameters
----------
force : callable
Vector field that is being integrated.
state : array-like, shape=[2, dim]
State at time t, corresponds to position and velocity variables at
time t.
time : float
Time variable.
dt : float
Time-step in the integration.
Returns
-------
point_new : array-like, shape=[,,,, {dim, [n, n]}]
First variable at time t + dt.
vector_new : array-like, shape=[,,,, {dim, [n, n]}]
Second variable at time t + dt.
See Also
--------
https://en.wikipedia.org/wiki/Runge–Kutta_methods
"""
k1 = force(state, time)
k2 = force(state + dt / 2 * k1, time + dt / 2)
k3 = force(state + dt / 2 * k2, time + dt / 2)
k4 = force(state + dt * k3, time + dt)
new_state = state + dt / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
return new_state
def integrate(function, initial_state, end_time=1.0, n_steps=10, step="euler"):
"""Compute the flow under the vector field using symplectic euler.
Integration function to compute flows of vector fields
on a regular grid between 0 and a finite time from an initial state.
Parameters
----------
function : callable
Vector field to integrate.
initial_state : tuple of arrays
Initial position and speed.
end_time : float
Final integration time.
Optional, default : 1.
n_steps : int
Number of integration steps to use.
Optional, default : 10.
step : str, {'euler', 'rk4', 'group_rk2', 'group_rk4'}
Numerical scheme to use for elementary integration steps.
Optional, default : 'euler'.
Returns
-------
final_state : tuple
sequences of solutions every end_time / n_steps. The shape of each
element of the sequence is the same as the vectors passed in
initial_state.
"""
check_parameter_accepted_values(step, "step", STEP_FUNCTIONS)
dt = end_time / n_steps
states = [initial_state]
current_state = initial_state
step_function = globals()[STEP_FUNCTIONS[step]]
for i in range(n_steps):
current_state = step_function(
state=current_state, force=function, time=i * dt, dt=dt
)
states.append(current_state)
return states
| [
81,
37811,
34500,
12392,
5499,
973,
618,
645,
4838,
5107,
389,
1695,
13,
198,
198,
20451,
1772,
25,
29737,
1962,
328,
9019,
13,
198,
198,
4711,
389,
3562,
329,
717,
1502,
440,
7206,
3194,
286,
257,
7885,
2124,
290,
257,
640,
198,
45... | 2.547081 | 2,347 |
"""
This module schedules all the tasks according to config.rules.
"""
import click
import logging
import multiprocessing
import schedule
import time
from scrapy.crawler import CrawlerRunner
from scrapy.utils.project import get_project_settings
from twisted.internet import reactor
from haipproxy.client import SquidClient
from haipproxy.config.rules import CRAWLER_TASKS, CRAWLER_QUEUE_MAPS
from haipproxy.crawler.spiders import SPIDER_MAP
from haipproxy.settings import (
SPIDER_AJAX_Q,
SPIDER_GFW_Q,
SPIDER_AJAX_GFW_Q,
TIMER_RECORDER,
)
from haipproxy.utils import get_redis_conn, acquire_lock, release_lock
DEFAULT_CRAWLER_QS = [SPIDER_AJAX_Q, SPIDER_GFW_Q, SPIDER_AJAX_GFW_Q]
logger = logging.getLogger(__name__)
def scheduler_start(tasks):
"""Start specified scheduler."""
default_tasks = CRAWLER_TASKS
SchedulerCls = CrawlerScheduler
scheduler = SchedulerCls(default_tasks)
scheduler.schedule_all_right_now()
scheduler.schedule_with_delay()
| [
37811,
198,
1212,
8265,
24025,
477,
262,
8861,
1864,
284,
4566,
13,
38785,
13,
198,
37811,
198,
11748,
3904,
198,
11748,
18931,
198,
11748,
18540,
305,
919,
278,
198,
11748,
7269,
198,
11748,
640,
198,
198,
6738,
15881,
88,
13,
66,
39... | 2.734247 | 365 |
import os
import subprocess as sp
from .srbColour import Colour
| [
11748,
28686,
198,
11748,
850,
14681,
355,
599,
198,
198,
6738,
764,
82,
26145,
5216,
454,
1330,
38773,
628
] | 3.473684 | 19 |
#!/usr/bin/env python
# coding: utf8
from __future__ import unicode_literals
import random
import operator
from typing import Dict
categories = {'FAULT': 0, 'INFO': 0, 'TOXIC': 0, 'REPAIR': 0}
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
23,
198,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
11748,
4738,
198,
11748,
10088,
198,
6738,
19720,
1330,
360,
713,
198,
198,
66,
... | 2.8 | 70 |
input_num = '22235253534090'
reverse(input_num)
| [
198,
15414,
62,
22510,
796,
705,
1828,
22370,
1495,
2327,
23601,
3829,
6,
198,
220,
220,
220,
220,
220,
220,
220,
220,
198,
198,
50188,
7,
15414,
62,
22510,
8,
628,
198
] | 1.90625 | 32 |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""cct model"""
import mindspore.common.initializer as weight_init
import mindspore.nn as nn
from src.models.cct.tokenizer import Tokenizer
from src.models.cct.transformers import TransformerClassifier
from src.models.cct.var_init import KaimingNormal
class CCT(nn.Cell):
"""CCT Model"""
def init_weights(self):
"""init_weights"""
for _, cell in self.cells_and_names():
if isinstance(cell, nn.Conv2d):
cell.weight.set_data(
weight_init.initializer(
KaimingNormal(
mode='fan_in'),
cell.weight.shape,
cell.weight.dtype))
elif isinstance(cell, nn.Dense):
cell.weight.set_data(
weight_init.initializer(
weight_init.TruncatedNormal(
sigma=0.02),
cell.weight.shape,
cell.weight.dtype))
if cell.bias is not None:
cell.bias.set_data(
weight_init.initializer(
weight_init.Zero(),
cell.bias.shape,
cell.bias.dtype))
def _cct(arch,
num_layers,
num_heads,
mlp_ratio,
embedding_dim,
kernel_size=3,
stride=None,
padding=None,
**kwargs):
"""get cct model with parameters"""
print(f'=> using arch: {arch}')
stride = stride if stride is not None else max(1, (kernel_size // 2) - 1)
padding = padding if padding is not None else max(1, (kernel_size // 2))
model = CCT(num_layers=num_layers,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
embedding_dim=embedding_dim,
kernel_size=kernel_size,
stride=stride,
padding=padding,
**kwargs)
return model
def cct_2(arch, **kwargs):
"""cct_2"""
return _cct(
arch,
num_layers=2,
num_heads=2,
mlp_ratio=1,
embedding_dim=128,
**kwargs)
def cct_4(arch, **kwargs):
"""cct_4"""
return _cct(
arch,
num_layers=4,
num_heads=2,
mlp_ratio=1,
embedding_dim=128,
**kwargs)
def cct_6(arch, **kwargs):
"""cct_6"""
return _cct(
arch,
num_layers=6,
num_heads=4,
mlp_ratio=2,
embedding_dim=256,
**kwargs)
def cct_7(arch, **kwargs):
"""cct_7"""
return _cct(
arch,
num_layers=7,
num_heads=4,
mlp_ratio=2,
embedding_dim=256,
**kwargs)
def cct_14(arch, **kwargs):
"""cct_14"""
return _cct(
arch,
num_layers=14,
num_heads=6,
mlp_ratio=3,
embedding_dim=384,
**kwargs)
def cct_2_3x2_32(
img_size=32,
positional_embedding='learnable',
num_classes=10,
**kwargs):
"""cct_2_3x2_32"""
return cct_2(
'cct_2_3x2_32',
kernel_size=3,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_2_3x2_32_sine(
img_size=32,
positional_embedding='sine',
num_classes=10,
**kwargs):
"""cct_2_3x2_32_sine"""
return cct_2(
'cct_2_3x2_32_sine',
kernel_size=3,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_4_3x2_32(
img_size=32,
positional_embedding='learnable',
num_classes=10,
**kwargs):
"""cct_2_3x2_32_sine"""
return cct_4(
'cct_4_3x2_32',
kernel_size=3,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_4_3x2_32_sine(
img_size=32,
positional_embedding='sine',
num_classes=10,
**kwargs):
"""cct_2_3x2_32_sine"""
return cct_4(
'cct_4_3x2_32_sine',
kernel_size=3,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_6_3x1_32(img_size=32, positional_embedding='learnable', num_classes=10,
**kwargs):
"""cct_2_3x2_32_sine"""
return cct_6(
'cct_6_3x1_32',
kernel_size=3,
n_conv_layers=1,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_6_3x1_32_sine(
img_size=32,
positional_embedding='sine',
num_classes=10,
**kwargs):
"""cct_2_3x2_32_sine"""
return cct_6(
'cct_6_3x1_32_sine',
kernel_size=3,
n_conv_layers=1,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_6_3x2_32(
img_size=32,
positional_embedding='learnable',
num_classes=10,
**kwargs):
"""cct_2_3x2_32_sine"""
return cct_6(
'cct_6_3x2_32',
kernel_size=3,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_6_3x2_32_sine(
img_size=32,
positional_embedding='sine',
num_classes=10,
**kwargs):
"""cct_6_3x2_32_sine"""
return cct_6(
'cct_6_3x2_32_sine',
kernel_size=3,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_7_3x1_32(
img_size=32,
positional_embedding='learnable',
num_classes=10,
**kwargs):
"""cct_7_3x1_32"""
return cct_7(
'cct_7_3x1_32',
kernel_size=3,
n_conv_layers=1,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_7_3x1_32_sine(
img_size=32,
positional_embedding='sine',
num_classes=10,
**kwargs):
"""cct_7_3x1_32_sine"""
return cct_7(
'cct_7_3x1_32_sine',
kernel_size=3,
n_conv_layers=1,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_7_3x1_32_c100(
img_size=32,
positional_embedding='learnable',
num_classes=100,
**kwargs):
"""cct_7_3x1_32_c100"""
return cct_7(
'cct_7_3x1_32_c100',
kernel_size=3,
n_conv_layers=1,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_7_3x1_32_sine_c100(
img_size=32,
positional_embedding='sine',
num_classes=100,
**kwargs):
"""cct_7_3x1_32_sine_c100"""
return cct_7(
'cct_7_3x1_32_sine_c100',
kernel_size=3,
n_conv_layers=1,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_7_3x2_32(
img_size=32,
positional_embedding='learnable',
num_classes=10,
**kwargs):
"""cct_7_3x2_32"""
return cct_7(
'cct_7_3x2_32',
kernel_size=3,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_7_3x2_32_sine(
img_size=32,
positional_embedding='sine',
num_classes=10,
**kwargs):
"""cct_7_3x2_32_sine"""
return cct_7(
'cct_7_3x2_32_sine',
kernel_size=3,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_7_7x2_224(
img_size=224,
positional_embedding='learnable',
num_classes=102):
"""cct_7_7x2_224"""
return cct_7(
'cct_7_7x2_224',
kernel_size=7,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes)
def cct_7_7x2_224_sine(
img_size=224,
positional_embedding='sine',
num_classes=102,
**kwargs):
"""cct_7_7x2_224_sine"""
return cct_7(
'cct_7_7x2_224_sine',
kernel_size=7,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_14_7x2_224(
img_size=224,
positional_embedding='learnable',
num_classes=1000,
**kwargs):
"""cct_14_7x2_224"""
return cct_14(
'cct_14_7x2_224',
kernel_size=7,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_14_7x2_384(
img_size=384,
positional_embedding='learnable',
num_classes=1000,
**kwargs):
"""cct_14_7x2_384"""
return cct_14(
'cct_14_7x2_384',
kernel_size=7,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
def cct_14_7x2_384_fl(
img_size=384,
positional_embedding='learnable',
num_classes=102,
**kwargs):
"""cct_14_7x2_384_fl"""
return cct_14(
'cct_14_7x2_384_fl',
kernel_size=7,
n_conv_layers=2,
img_size=img_size,
positional_embedding=positional_embedding,
num_classes=num_classes,
**kwargs)
| [
2,
15069,
33448,
43208,
21852,
1766,
1539,
12052,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198... | 1.813442 | 5,907 |
from array import array
def _copytobuffer(x):
"""
return a copy of x as an object that supports the python Buffer
API (python array if input is float, list or tuple, numpy array
if input is a numpy array). returns copyofx, isfloat, islist,
istuple (islist is True if input is a list, istuple is true if
input is a tuple, isfloat is true if input is a float).
"""
# make sure x supports Buffer API and contains doubles.
isfloat = False
islist = False
istuple = False
# first, if it's a numpy array scalar convert to float
# (array scalars don't support buffer API)
if hasattr(x, "shape"):
if x.shape == ():
return _copytobuffer_return_scalar(x)
else:
try:
# typecast numpy arrays to double.
# (this makes a copy - which is crucial
# since buffer is modified in place)
x.dtype.char
# Basemap issue
# https://github.com/matplotlib/basemap/pull/223/files
# (deal with input array in fortran order)
inx = x.copy(order="C").astype("d")
# inx,isfloat,islist,istuple
return inx, False, False, False
except:
try: # perhaps they are Numeric/numarrays?
# sorry, not tested yet.
# i don't know Numeric/numarrays has `shape'.
x.typecode()
inx = x.astype("d")
# inx,isfloat,islist,istuple
return inx, False, False, False
except:
raise TypeError("input must be an array, list, tuple or scalar")
else:
# perhaps they are regular python arrays?
if hasattr(x, "typecode"):
# x.typecode
inx = array("d", x)
# try to convert to python array
# a list.
elif type(x) == list:
inx = array("d", x)
islist = True
# a tuple.
elif type(x) == tuple:
inx = array("d", x)
istuple = True
# a scalar?
else:
return _copytobuffer_return_scalar(x)
return inx, isfloat, islist, istuple
| [
6738,
7177,
1330,
7177,
628,
198,
198,
4299,
4808,
30073,
83,
672,
13712,
7,
87,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1441,
257,
4866,
286,
2124,
355,
281,
2134,
326,
6971,
262,
21015,
47017,
198,
220,
220,
220,
7824... | 2.011576 | 1,123 |
import importlib
package = 'aao.spiders.bookmakers'
SpiderBet365 = importlib.import_module(
'.bet365', package).SpiderBet365
SpiderBwin = importlib.import_module(
'.bwin', package).SpiderBwin
Spider888sport = importlib.import_module(
'.888sport', package).Spider888sport
SpiderWilliamhill = importlib.import_module(
'.williamhill', package).SpiderWilliamhill
spiders = {
'bet365': SpiderBet365,
'bwin': SpiderBwin,
'888sport': Spider888sport,
'williamhill': SpiderWilliamhill,
}
| [
11748,
1330,
8019,
198,
198,
26495,
796,
705,
64,
5488,
13,
2777,
4157,
13,
2070,
6620,
6,
198,
198,
41294,
13056,
24760,
796,
1330,
8019,
13,
11748,
62,
21412,
7,
198,
220,
220,
220,
45302,
11181,
24760,
3256,
5301,
737,
41294,
13056... | 2.824176 | 182 |
#!/usr/bin/env python3
from reporting.category import Category
from statsSend.jenkins.jenkinsBuild import JenkinsBuild | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
6447,
13,
22872,
1330,
21743,
198,
6738,
9756,
25206,
13,
48796,
5331,
13,
48796,
5331,
15580,
1330,
21835,
15580
] | 3.83871 | 31 |
#! /usr/bin/env python3
# SPDX-FileCopyrightText: 2022 geisserml <geisserml@gmail.com>
# SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
# Download the PDFium binaries and generate ctypes bindings
import os
import sys
import shutil
import tarfile
import argparse
import traceback
from urllib import request
from os.path import join, abspath, dirname
from concurrent.futures import ThreadPoolExecutor
sys.path.insert(0, dirname(dirname(abspath(__file__))))
from pl_setup.packaging_base import (
DataTree,
VerNamespace,
PlatformNames,
run_cmd,
call_ctypesgen,
set_version,
)
ReleaseRepo = "https://github.com/bblanchon/pdfium-binaries"
ReleaseURL = ReleaseRepo + "/releases/download/chromium%2F"
ReleaseExtension = "tgz"
ReleaseNames = {
PlatformNames.darwin_x64 : "pdfium-mac-x64",
PlatformNames.darwin_arm64 : "pdfium-mac-arm64",
PlatformNames.linux_x64 : "pdfium-linux-x64",
PlatformNames.linux_x86 : "pdfium-linux-x86",
PlatformNames.linux_arm64 : "pdfium-linux-arm64",
PlatformNames.linux_arm32 : "pdfium-linux-arm",
PlatformNames.musllinux_x64 : "pdfium-linux-musl-x64",
PlatformNames.musllinux_x86 : "pdfium-linux-musl-x86",
PlatformNames.windows_x64 : "pdfium-win-x64",
PlatformNames.windows_x86 : "pdfium-win-x86",
PlatformNames.windows_arm64 : "pdfium-win-arm64",
}
if __name__ == "__main__":
run_cli()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
30628,
55,
12,
8979,
15269,
8206,
25,
33160,
4903,
747,
263,
4029,
1279,
469,
747,
263,
4029,
31,
14816,
13,
785,
29,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
... | 2.535587 | 562 |
from gin.i_o.from_smiles import to_mols
import pandas as pd
df = pd.read_csv('data/delaney-processed.csv')
smiles_array = df[['smiles']].values.flatten()
mols = to_mols(smiles_array)
for mol in mols:
print(mol)
| [
6738,
39733,
13,
72,
62,
78,
13,
6738,
62,
5796,
2915,
1330,
284,
62,
76,
10220,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
7568,
796,
279,
67,
13,
961,
62,
40664,
10786,
7890,
14,
12381,
22297,
12,
14681,
276,
13,
40664,
115... | 2.333333 | 93 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2017-01-16 17:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
24,
13,
24,
319,
2177,
12,
486,
12,
1433,
1596,
25,
1065,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
19... | 2.724638 | 69 |
PhoneDirectory = ['John:009878788677' , 'Jefrey:67654654645' , 'Maria:8787677766']
for entry in PhoneDirectory:
if '7' in entry:
print('yeah')
| [
6132,
43055,
796,
37250,
7554,
25,
405,
4089,
41019,
3459,
40179,
6,
837,
705,
41,
891,
4364,
25,
3134,
39111,
2996,
3510,
2231,
6,
837,
705,
46827,
25,
23,
3695,
32059,
3324,
2791,
20520,
628,
198,
1640,
5726,
287,
14484,
43055,
25,
... | 2.378788 | 66 |
FILENAME = './puzzle15/data/input'
small_cave = []
with open(FILENAME) as file:
for line in file:
small_cave.append([int(x) for x in list(line.strip())])
small_n = len(small_cave)
large_n = small_n * 5
cave = [[ 0 for _ in range(large_n)] for _ in range(large_n)]
for i in range(large_n):
for j in range(large_n):
change_i, i_l = divmod(i, small_n)
change_j, j_l = divmod(j, small_n)
if small_cave[i_l][j_l] + change_i + change_j > 9:
cave[i][j] = small_cave[i_l][j_l] - 9 + change_i + change_j
else:
cave[i][j] = small_cave[i_l][j_l] + change_i + change_j
scores = [[ 0 for _ in range(len(cave))] for _ in range(len(cave))]
for i in range(len(cave) - 1, -1 , -1):
for j in range(len(cave) - 1, -1 , -1):
if i < len(cave) - 1 and j < len(cave) - 1:
scores[i][j] = cave[i][j] + min([scores[i + 1][j], scores[i][j + 1]])
elif i < len(cave) - 1 and j == len(cave) - 1:
scores[i][j] = cave[i][j] + scores[i + 1][j]
elif i == len(cave) - 1 and j < len(cave) - 1:
scores[i][j] = cave[i][j] + scores[i][j + 1]
elif i == len(cave) - 1 and j == len(cave) - 1:
scores[i][j] = cave[i][j]
# b
# a c
# d
prev_value = 1000000000
current_value = 100000000
while current_value != prev_value:
prev_value = current_value
for i in range(0, len(cave)):
for j in range(0, len(cave)):
a, b, c, d = 100000, 100000, 100000, 100000
if i > 0:
a = scores[i - 1][j]
if j > 0:
b = scores[i][j - 1]
if i < len(cave) - 1:
d = scores[i + 1][j]
if j < len(cave) - 1:
c = scores[i][j + 1]
if i < len(cave) - 1 and j < len(cave) - 1:
scores[i][j] = cave[i][j] + min([a, b, c, d])
current_value = sum([sum(x) for x in scores])
print(current_value)
print(scores[0][0] - cave[0][0]) | [
46700,
1677,
10067,
796,
705,
19571,
79,
9625,
1314,
14,
7890,
14,
15414,
6,
201,
198,
201,
198,
17470,
62,
66,
1015,
796,
17635,
201,
198,
4480,
1280,
7,
46700,
1677,
10067,
8,
355,
2393,
25,
201,
198,
220,
220,
220,
329,
1627,
2... | 1.804214 | 1,139 |
#!/bin/python3
print("Hello, World!") | [
2,
48443,
8800,
14,
29412,
18,
198,
198,
4798,
7203,
15496,
11,
2159,
2474,
8
] | 2.533333 | 15 |
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
#------------------------------------------------------------------------------
# https://developer.apple.com/library/archive/documentation/Security/Conceptual/CodeSigningGuide/Procedures/Procedures.html
#------------------------------------------------------------------------------
import sys, os, subprocess
#------------------------------------------------------------------------------
# FOR PRINTING IN COLOR
#------------------------------------------------------------------------------
BLACK = '\033[90m'
RED = '\033[91m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
BLUE = '\033[94m'
MAGENTA = '\033[95m'
CYAN = '\033[96m'
WHITE = '\033[97m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
BLINK = '\033[5m'
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# MAIN
#------------------------------------------------------------------------------
#--- Get script absolute path
scriptDir = os.path.dirname (os.path.abspath (sys.argv [0]))
#--- Free routing dir
FREEROUTING_DIR = scriptDir + "/freerouting"
APP_VERSION = "1.4.4-pm"
#--- Goto Freerouting dir
os.chdir (FREEROUTING_DIR)
#--- Compile for distribution
runCommand (["bash", "gradlew", "dist"])
print (BLUE + BOLD + "DONE" + ENDC)
#--- Download and install JDK
# https://jdk.java.net/14/
JPACKAGE_JVM="https://download.java.net/java/GA/jdk14/076bab302c7b4508975440c56f6cc26a/36/GPL/openjdk-14_osx-x64_bin.tar.gz"
JPKG_DIR = scriptDir + "/jdk14"
JPKG_HOME = JPKG_DIR + "/jdk-14.jdk/Contents/Home"
JPKG_ARCHIVE = "jdk14.tar.gz"
if os.path.exists (JPKG_HOME) :
print (BLUE + BOLD + "JDK already installed" + ENDC)
else:
if not os.path.exists (JPKG_DIR) :
runCommand (["mkdir", "-p", JPKG_DIR])
os.chdir (JPKG_DIR)
#--- Download ?
if not os.path.exists (JPKG_ARCHIVE) :
print (BLUE + "Download JDK" + ENDC)
runCommand (["curl", "-o", JPKG_ARCHIVE, JPACKAGE_JVM])
#--- Install ?
if not os.path.exists (JPKG_DIR + "/runtime") :
print (BLUE + "Unpack JDK" + ENDC)
runCommand (["tar", "xvzf", JPKG_ARCHIVE])
print (BLUE + "Create runtime image" + ENDC)
runCommand ([
JPKG_HOME + "/bin/jlink",
"--module-path", JPKG_HOME + "/jmods",
"--add-modules", "java.desktop",
"--strip-debug",
"--no-header-files",
"--no-man-pages",
"--strip-native-commands",
"--vm=server",
"--compress=2",
"--output", "runtime"
])
#--- Build executable
os.chdir (scriptDir)
FREE_ROUTING_NAME = "Freerouting-" + APP_VERSION
runCommand (["rm", "-fr", FREE_ROUTING_NAME + ".app"])
runCommand ([
JPKG_HOME + "/bin/jpackage",
"--input", FREEROUTING_DIR + "/build/dist/",
"--name", FREE_ROUTING_NAME,
"--main-jar", "freerouting-executable.jar",
"--type", "app-image",
"--runtime-image", "jdk14/runtime",
# "--mac-sign",
# "--mac-signing-key-user-name", "pierre@pcmolinaro.name",
"--app-version", APP_VERSION
])
runCommand ([
"/usr/bin/codesign",
"--force",
"--sign", "Apple Development: pierre@pcmolinaro.name",
"--deep",
FREE_ROUTING_NAME + ".app"
])
runCommand ([
"/usr/bin/codesign",
"-dv",
"--verbose=4",
FREE_ROUTING_NAME + ".app"
])
runCommand ([
"/usr/bin/codesign",
"--verify",
"--deep",
"--strict",
"--verbose=2",
FREE_ROUTING_NAME + ".app"
])
# runCommand ([
# "spctl",
# "-a",
# FREE_ROUTING_NAME + ".app"
# ])
# runCommand ([
# "spctl",
# "--assess",
# "--verbose=4",
# "--type", "execute",
# FREE_ROUTING_NAME + ".app"
# ])
#--- Build DMG
PACKAGE_FILE = FREE_ROUTING_NAME + ".pkg"
runCommand (["/usr/bin/productbuild", "--component-compression", "auto", "--component", FREE_ROUTING_NAME + ".app", "/Applications", PACKAGE_FILE])
DISTRIBUTION_DIR = "Freerouting-" + APP_VERSION
runCommand (["/bin/rm", "-rf", DISTRIBUTION_DIR])
runCommand (["/bin/rm", "-f", FREE_ROUTING_NAME + ".dmg"])
runCommand (["/bin/mkdir", DISTRIBUTION_DIR])
runCommand (["/bin/cp", PACKAGE_FILE, DISTRIBUTION_DIR])
runCommand (["/usr/bin/hdiutil", "create", "-srcfolder", FREE_ROUTING_NAME, FREE_ROUTING_NAME + ".dmg", "-fs", "HFS+"])
runCommand (["/bin/rm", PACKAGE_FILE])
runCommand (["/bin/rm", "-rf", DISTRIBUTION_DIR])
#------------------------------------------------------------------------------
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
2,
10097,
26171,
198,
2,
3740,
1378,
16244,
263,
13,
18040,
13,
785,
14,
32016,
14,
17474,
14,
22897,
341,
14,
... | 2.616447 | 1,666 |
from __future__ import print_function
import argparse
from cProfile import label
from dis import dis
import os
import random
from socket import MSG_DONTROUTE
from sklearn import cluster
import torch
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from pointnet.dataset import LidarDataset, BoxDataset
from pointnet.box_model import BoxNet
import torch.nn.functional as F
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
import time
from model_utils import BoxNetLoss, parse_output_to_tensors, get_box3d_corners_helper, get_box3d_corners
import open3d as o3d
from provider import angle2class, size2class, class2angle, class2size, compute_box3d_iou, size2class2, give_pred_box_corners, get_3d_box
#from viz_util import draw_lidar, draw_lidar_simple
Loss = BoxNetLoss()
NUM_HEADING_BIN = 12
NUM_SIZE_CLUSTER = 3 # one cluster for each type
NUM_OBJECT_POINT = 512
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
corners3d: (N, 8, 3)
"""
template = np.array([
[1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1],
[1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1],
]) / 2
corners3d = boxes3d[:, None, 3:6] * template[None, :, :]
corners3d = rotate_points_along_z(corners3d, boxes3d[:, 6]).reshape(-1, 8, 3)
corners3d += boxes3d[:, None, 0:3]
return corners3d
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
cosa = np.cos(angle)
sina = np.sin(angle)
ones = np.ones_like(angle, dtype=np.float32)
zeros = np.zeros_like(angle, dtype=np.float32)
rot_matrix = np.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), axis=1).reshape(-1, 3, 3)
points_rot = np.matmul(points, rot_matrix)
return points_rot
parser = argparse.ArgumentParser()
parser.add_argument('--batchSize', type=int, default=32, help='input batch size')
parser.add_argument('--num_points', type=int, default=128, help='input size')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--nepoch', type=int, default=250, help='number of epochs to train for')
parser.add_argument('--outf', type=str, default='cls', help='output folder')
parser.add_argument('--model', type=str, default='', help='model path')
parser.add_argument('--dataset', type=str, required=False, help="dataset path")
parser.add_argument('--dataset_type', type=str, default='bbox', help="dataset type bbox|lidar")
opt = parser.parse_args()
print(opt)
blue = lambda x: '\033[94m' + x + '\033[0m'
opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.dataset_type == 'bbox':
box_dataset = BoxDataset(
#root=opt.dataset,
root='train_unbbox_dataset',
classification=True,
npoints=opt.num_points,
data_augmentation=False)
test_box_dataset = BoxDataset(
#root=opt.dataset,
root='test_unbbox_dataset',
classification=True,
split='test',
npoints=opt.num_points,
data_augmentation=False)
else:
exit('wrong dataset type')
box_dataloader = torch.utils.data.DataLoader(
box_dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.workers))
testboxdataloader = torch.utils.data.DataLoader(
test_box_dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.workers))
print(len(box_dataset), len(test_box_dataset))
num_classes = len(box_dataset.classes)
print('classes', num_classes)
try:
os.makedirs(opt.outf)
except OSError:
pass
classifier = BoxNet(n_classes=num_classes, n_channel=3)
if opt.model != '':
classifier.load_state_dict(torch.load(opt.model))
optimizer = optim.Adam(classifier.parameters(), lr=0.001, betas=(0.9, 0.999),eps=1e-08, weight_decay=0.0)
#scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=20, gamma=0.1)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1)
#optimizer = optim.Adam(classifier.parameters(), lr=0.001, betas=(0.9, 0.999))
#scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
classifier.cuda()
num_batch = len(box_dataset) / opt.batchSize
plt.ion()
figure = plt.figure()
ax = figure.add_subplot(111)
idx = []
test_loss = []
train_loss = []
plot1, = ax.plot(idx, test_loss, label='test')
plot2, = ax.plot(idx, train_loss, label='train')
plt.ylim(0, 10)
plt.xlim(0, 158200)
plt.xlabel("i")
plt.ylabel("loss")
plt.legend(loc="lower left")
plt.title("loss-iteration")
for epoch in range(opt.nepoch):
scheduler.step()
for i, data in enumerate(box_dataloader, 0):
points, bbox_target, target, _, dist, cluster_center, voxel = data
points1 = points + cluster_center[:, None]
target = target[:, 0]
dist = dist[:, None]
voxel = voxel[:, :, None]
# transform target scalar to 3x one hot vector
hot1 = torch.zeros(len(data[0]))
hot1[target == 0] = 1
hot2 = torch.zeros(len(data[0]))
hot2[target == 2] = 1
hot3 = torch.zeros(len(data[0]))
hot3[target == 1] = 1
one_hot = torch.vstack((hot1, hot2, hot3))
one_hot = one_hot.transpose(1, 0)
points = points.transpose(2, 1)
points, target, bbox_target, one_hot, dist, cluster_center, voxel = points.cuda(), target.cuda(), bbox_target.cuda(), one_hot.cuda(), dist.cuda().float(), cluster_center.cuda(), voxel.cuda().float()
optimizer.zero_grad()
classifier = classifier.train()
# NN
box_pred, center_delta = classifier(points, one_hot, dist, voxel)
center_boxnet, \
heading_scores, heading_residual_normalized, heading_residual, \
size_scores, size_residual_normalized, size_residual = \
parse_output_to_tensors(box_pred)
#box3d_center = center_boxnet + center_delta
stage1_center = cluster_center + center_delta # original cluster center in the world
box3d_center = center_boxnet + stage1_center
# heading_scores (32, 12) which bin is the heading
# heading_residual (32, 12) residual angle
# size_scores (32, 3) which bin is the size
# size_residual (32, 3, 3) residual size
'''
2.Center
center: torch.Size([32, 3]) torch.float32
stage1_center: torch.Size([32, 3]) torch.float32
center_label:[32,3]
3.Heading
heading_scores: torch.Size([32, 12]) torch.float32
heading_residual_normalized: torch.Size([32, 12]) torch.float32
heading_residual: torch.Size([32, 12]) torch.float32
heading_class_label:(32)
heading_residual_label:(32)
4.Size
size_scores: torch.Size([32, 8]) torch.float32
size_residual_normalized: torch.Size([32, 8, 3]) torch.float32
size_residual: torch.Size([32, 8, 3]) torch.float32
size_class_label:(32)
size_residual_label:(32,3)'''
# compute GT
bbox_target[:,:3] = bbox_target[:,:3] + cluster_center
box3d_center_label = bbox_target[:,:3]
angle = bbox_target[:, 6]
heading_class_label, heading_residual_label = angle2class(angle, NUM_HEADING_BIN)
size_class_label, size_residual_label = size2class2(bbox_target[:,3:6], target)
#print(' ')
#print(heading_class_label)
#print(heading_scores.data.max(1)[1])
#print(heading_residual_label)
#print(heading_residual)
#print(size_class_label)
#print(size_scores.data.max(1)[1])
#print(size_residual_label)
#scls_onehot = torch.eye(NUM_SIZE_CLUSTER)[size_class_label.long()].cuda() # 32,8
#scls_onehot_repeat = scls_onehot.view(-1, NUM_SIZE_CLUSTER, 1).repeat(1, 1, 3) # 32,8,3
#predicted_size_residual = torch.sum( \
# size_residual * scls_onehot_repeat.cuda(), dim=1)#32,3
#print(size_residual_label-predicted_size_residual)
#print(size_residual_label-size_residual)
#print(box3d_center_label)
#print(box3d_center)
#print(' ')
# losses
losses = Loss(box3d_center, box3d_center_label, stage1_center, \
heading_scores, heading_residual_normalized, \
heading_residual, \
heading_class_label, heading_residual_label, \
size_scores, size_residual_normalized, \
size_residual, \
size_class_label, size_residual_label)
loss = losses['total_loss']
# accuracy (FIX: flipped box results in IOU = 0 maybe)
ioubev, iou3dbox = compute_box3d_iou(box3d_center.cpu().detach().numpy(), heading_scores.cpu().detach().numpy(), \
heading_residual.cpu().detach().numpy(), size_scores.cpu().detach().numpy(), size_residual.cpu().detach().numpy(), \
box3d_center_label.cpu().detach().numpy(), heading_class_label.cpu().detach().numpy(), \
heading_residual_label.cpu().detach().numpy(), size_class_label.cpu().detach().numpy(), \
size_residual_label.cpu().detach().numpy())
# matplotlib viz
pred_box_corners = give_pred_box_corners(box3d_center.cpu().detach().numpy(), heading_scores.cpu().detach().numpy(), \
heading_residual.cpu().detach().numpy(), size_scores.cpu().detach().numpy(), size_residual.cpu().detach().numpy())
np_bbox_target = bbox_target.cpu().detach().numpy()
gt_corners = boxes_to_corners_3d(np_bbox_target)
if i > 0 and epoch == -1:
for cc in range(32):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
np_points = points1.cpu().detach().numpy()
pts = np_points[cc]
gt_b = gt_corners[cc] # (8, 3)
b = pred_box_corners[cc]
ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2], s=5, c='b', lw=0, alpha=1)
for k in range(0, 4):
xx = 0
yy = 1
zz = 2
# pred
i, j = k, (k + 1) % 4
ax.plot([b[i, xx], b[j, xx]], [b[i, yy], b[j, yy]], [b[i, zz], b[j, zz]],
color='r')
i, j = k + 4, (k + 1) % 4 + 4
ax.plot([b[i, xx], b[j, xx]], [b[i, yy], b[j, yy]], [b[i, zz], b[j, zz]],
color='r')
i, j = k, k + 4
ax.plot([b[i, xx], b[j, xx]], [b[i, yy], b[j, yy]], [b[i, zz], b[j, zz]],
color='r')
# gt
i, j = k, (k + 1) % 4
ax.plot([gt_b[i, xx], gt_b[j, xx]], [gt_b[i, yy], gt_b[j, yy]], [gt_b[i, zz], gt_b[j, zz]],
color='g')
i, j = k + 4, (k + 1) % 4 + 4
ax.plot([gt_b[i, xx], gt_b[j, xx]], [gt_b[i, yy], gt_b[j, yy]], [gt_b[i, zz], gt_b[j, zz]],
color='g')
i, j = k, k + 4
ax.plot([gt_b[i, xx], gt_b[j, xx]], [gt_b[i, yy], gt_b[j, yy]], [gt_b[i, zz], gt_b[j, zz]],
color='g')
#visual_right_scale(corners3d.reshape(-1, 3), ax)
ax.title.set_text('IOU: {}'.format(iou3dbox[cc]))
ax.view_init(elev=30., azim=-45)
ax.set_box_aspect([1,1,1])
#ax.set_xlim3d(-3, 3)
#ax.set_ylim3d(-3, 3)
#ax.set_zlim3d(-3, 3)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.show()
'''# Our lines span from points 0 to 1, 1 to 2, 2 to 3, etc...
lines = [[0, 1], [1, 2], [2, 3], [0, 3],
[4, 5], [5, 6], [6, 7], [4, 7],
[0, 4], [1, 5], [2, 6], [3, 7]]
# Use the same color for all lines
colors = [[1, 0, 0] for _ in range(len(lines))]
colors1 = [[0, 1, 0] for _ in range(len(lines))]
line_set = o3d.geometry.LineSet()
line_set.points = o3d.utility.Vector3dVector(np_pred_box[0])
line_set.lines = o3d.utility.Vector2iVector(lines)
line_set.colors = o3d.utility.Vector3dVector(colors)
line_set1 = o3d.geometry.LineSet()
line_set1.points = o3d.utility.Vector3dVector(np_gt_box[0])
line_set1.lines = o3d.utility.Vector2iVector(lines)
line_set1.colors = o3d.utility.Vector3dVector(colors1)
# Create a visualization object and window
#vis = o3d.visualization.Visualizer()
#vis.create_window()
# Display the bounding boxes:
#vis.add_geometry(line_set)
#o3d.visualization.draw_geometries([line_set,line_set1,pcd])
#o3d.visualization.draw_geometries([line_set1])
#np_points = points1.cpu().detach().numpy()
#np_points = np.transpose(np_points)
#pcd = o3d.geometry.PointCloud()
#pcd.points = o3d.utility.Vector3dVector(np_points)
#o3d.visualization.draw_geometries([pcd])
o3d.visualization.draw_geometries([line_set, line_set1])'''
loss.backward()
optimizer.step()
print('[%d: %d/%d] train loss: %f MIOU: %f' % (epoch, i, num_batch, loss.item(), np.mean(iou3dbox)))
#print('[%d: %d/%d] train loss: %f' % (epoch, i, num_batch, loss.item()))
loss_train = loss.item()
if i % 10 == 0:
j, data = next(enumerate(testboxdataloader, 0))
points, bbox_target, target, _, dist, cluster_center, voxel = data
points1 = points + cluster_center[:, None]
target = target[:, 0]
dist = dist[:, None]
voxel = voxel[:, :, None]
# transform target scalar to 3x one hot vector
hot1 = torch.zeros(len(data[0]))
hot1[target == 0] = 1
hot2 = torch.zeros(len(data[0]))
hot2[target == 2] = 1
hot3 = torch.zeros(len(data[0]))
hot3[target == 1] = 1
one_hot = torch.vstack((hot1, hot2, hot3))
one_hot = one_hot.transpose(1, 0)
points = points.transpose(2, 1)
points, target, bbox_target, one_hot, dist, cluster_center, voxel = points.cuda(), target.cuda(), bbox_target.cuda(), one_hot.cuda(), dist.cuda().float(), cluster_center.cuda(), voxel.cuda().float()
classifier = classifier.eval()
# NN
box_pred, center_delta = classifier(points, one_hot, dist, voxel)
center_boxnet, \
heading_scores, heading_residual_normalized, heading_residual, \
size_scores, size_residual_normalized, size_residual = \
parse_output_to_tensors(box_pred)
stage1_center = cluster_center + center_delta # original cluster center in the world
box3d_center = center_boxnet + stage1_center
# compute GT, probably wrong setup
bbox_target[:,:3] = bbox_target[:,:3] + cluster_center
box3d_center_label = bbox_target[:,:3]
angle = bbox_target[:, 6] #+ 3/2*np.pi
heading_class_label, heading_residual_label = angle2class(angle, NUM_HEADING_BIN)
size_class_label, size_residual_label = size2class2(bbox_target[:,3:6], target)
# losses
losses = Loss(box3d_center, box3d_center_label, stage1_center, \
heading_scores, heading_residual_normalized, \
heading_residual, \
heading_class_label, heading_residual_label, \
size_scores, size_residual_normalized, \
size_residual, \
size_class_label, size_residual_label)
loss = losses['total_loss']
# accuracy
ioubev, iou3dbox = compute_box3d_iou(box3d_center.cpu().detach().numpy(), heading_scores.cpu().detach().numpy(), \
heading_residual.cpu().detach().numpy(), size_scores.cpu().detach().numpy(), size_residual.cpu().detach().numpy(), \
box3d_center_label.cpu().detach().numpy(), heading_class_label.cpu().detach().numpy(), \
heading_residual_label.cpu().detach().numpy(), size_class_label.cpu().detach().numpy(), \
size_residual_label.cpu().detach().numpy())
# matplotlib viz
pred_box_corners = give_pred_box_corners(box3d_center.cpu().detach().numpy(), heading_scores.cpu().detach().numpy(), \
heading_residual.cpu().detach().numpy(), size_scores.cpu().detach().numpy(), size_residual.cpu().detach().numpy())
np_bbox_target = bbox_target.cpu().detach().numpy()
gt_corners = boxes_to_corners_3d(np_bbox_target)
if i > 0 and epoch == -1:
for cc in range(32):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
np_points = points1.cpu().detach().numpy()
pts = np_points[cc]
gt_b = gt_corners[cc] # (8, 3)
b = pred_box_corners[cc]
ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2], s=5, c='b', lw=0, alpha=1)
for k in range(0, 4):
xx = 0
yy = 1
zz = 2
# pred
i, j = k, (k + 1) % 4
ax.plot([b[i, xx], b[j, xx]], [b[i, yy], b[j, yy]], [b[i, zz], b[j, zz]],
color='r')
i, j = k + 4, (k + 1) % 4 + 4
ax.plot([b[i, xx], b[j, xx]], [b[i, yy], b[j, yy]], [b[i, zz], b[j, zz]],
color='r')
i, j = k, k + 4
ax.plot([b[i, xx], b[j, xx]], [b[i, yy], b[j, yy]], [b[i, zz], b[j, zz]],
color='r')
# gt
i, j = k, (k + 1) % 4
ax.plot([gt_b[i, xx], gt_b[j, xx]], [gt_b[i, yy], gt_b[j, yy]], [gt_b[i, zz], gt_b[j, zz]],
color='g')
i, j = k + 4, (k + 1) % 4 + 4
ax.plot([gt_b[i, xx], gt_b[j, xx]], [gt_b[i, yy], gt_b[j, yy]], [gt_b[i, zz], gt_b[j, zz]],
color='g')
i, j = k, k + 4
ax.plot([gt_b[i, xx], gt_b[j, xx]], [gt_b[i, yy], gt_b[j, yy]], [gt_b[i, zz], gt_b[j, zz]],
color='g')
#visual_right_scale(corners3d.reshape(-1, 3), ax)
ax.title.set_text('IOU: {}'.format(iou3dbox[cc]))
ax.view_init(elev=30., azim=-45)
ax.set_box_aspect([1,1,1])
#ax.set_xlim3d(-3, 3)
#ax.set_ylim3d(-3, 3)
#ax.set_zlim3d(-3, 3)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.show()
print('[%d: %d/%d] %s loss: %f MIOU: %f' % (epoch, i, num_batch, blue('test'), loss.item(), np.mean(iou3dbox)))
test_loss.append(loss.item())
train_loss.append(loss_train)
#loss_list[epoch*791 + i] = loss.item()
idx.append(epoch*791 + i)
plot1.set_xdata(idx)
plot1.set_ydata(test_loss)
plot2.set_xdata(idx)
plot2.set_ydata(train_loss)
figure.canvas.draw()
figure.canvas.flush_events()
time.sleep(0.01)
torch.save(classifier.state_dict(), '%s/cls_model_%d.pth' % (opt.outf, epoch))
'''total_correct = 0
total_testset = 0
for i,data in tqdm(enumerate(testdataloader, 0)):
points, target = data
target = target[:, 0]
points = points.transpose(2, 1)
points, target = points.cuda(), target.cuda()
classifier = classifier.eval()
pred, _, _, _ = classifier(points)
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.data).cpu().sum()
total_correct += correct.item()
total_testset += points.size()[0]
print("final accuracy {}".format(total_correct / float(total_testset)))''' | [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
1822,
29572,
198,
6738,
269,
37046,
1330,
6167,
198,
6738,
595,
1330,
595,
198,
11748,
28686,
198,
11748,
4738,
198,
6738,
17802,
1330,
49064,
62,
41173,
5446,
2606,
9328,
198,
... | 1.881277 | 11,211 |
'''
Given a collection of distinct integers, return all possible permutations.
Example:
Input: [1,2,3]
Output:
[
[1,2,3],
[1,3,2],
[2,1,3],
[2,3,1],
[3,1,2],
[3,2,1]
]
'''
| [
7061,
6,
198,
15056,
257,
4947,
286,
7310,
37014,
11,
1441,
477,
1744,
9943,
32855,
13,
198,
198,
16281,
25,
198,
198,
20560,
25,
685,
16,
11,
17,
11,
18,
60,
198,
26410,
25,
198,
58,
198,
220,
685,
16,
11,
17,
11,
18,
4357,
1... | 1.947917 | 96 |
import os
import sys
sys.path.append("..")
import argparse
from pathlib import Path
# Import teaching utils
import pandas as pd
import numpy as np
from utils.neuralnetwork import NeuralNetwork
# Import sklearn metrics
from sklearn import metrics
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = "train neural network on the full MNIST dataset and view the classifier metrics")
parser.add_argument("-d", "--data_path", default = Path('../data/'), type = Path, help = "path to where the MNIST csv-files dataset is saved or where to save it")
parser.add_argument("-e", "--epochs", default = 5, type = int, help = "numbers of epochs to train")
args = parser.parse_args()
main(data_path = args.data_path, epochs = args.epochs) | [
11748,
28686,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
7203,
492,
4943,
198,
11748,
1822,
29572,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
2,
17267,
7743,
3384,
4487,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,... | 3.230769 | 286 |
import re
from oelint_adv.cls_item import Variable
from oelint_adv.cls_rule import Rule
| [
11748,
302,
198,
198,
6738,
267,
417,
600,
62,
32225,
13,
565,
82,
62,
9186,
1330,
35748,
198,
6738,
267,
417,
600,
62,
32225,
13,
565,
82,
62,
25135,
1330,
14330,
628
] | 2.8125 | 32 |
from google.oauth2 import service_account
from google.cloud import bigquery
from datetime import datetime
| [
6738,
23645,
13,
12162,
1071,
17,
1330,
2139,
62,
23317,
201,
198,
6738,
23645,
13,
17721,
1330,
1263,
22766,
201,
198,
6738,
4818,
8079,
1330,
4818,
8079,
201
] | 3.857143 | 28 |
#
# Copyright 2014+ Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Base class for configurable processing components. Processing components are
designed to be pipelined.
"""
| [
2,
198,
2,
220,
15069,
1946,
10,
33976,
49808,
2059,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,... | 3.774869 | 191 |
"""yamlip - A yaml interpolation tool"""
__version__ = '0.0.1'
__author__ = 'Jan Murre <jan.murre@catalyz.nl>'
__all__ = []
import functools
from string import Template
from attrdict import AttrDict
import yaml
import click
@click.command()
@click.argument("source_yaml_file")
@click.option("-o", "--output")
| [
37811,
88,
321,
40712,
532,
317,
331,
43695,
39555,
341,
2891,
37811,
198,
198,
834,
9641,
834,
796,
705,
15,
13,
15,
13,
16,
6,
198,
834,
9800,
834,
796,
705,
12128,
5921,
260,
1279,
13881,
13,
28582,
260,
31,
9246,
3400,
89,
13,... | 2.700855 | 117 |
import unittest
from datastructure.links.PositionList import PositionList
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
6738,
4818,
459,
5620,
13,
28751,
13,
26545,
8053,
1330,
23158,
8053,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
198
] | 3 | 42 |
# -*- encoding: utf-8 -*-
#
# Copyright © 2018 Julien Danjou <jd@mergify.io>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pyparsing
import pytest
from mergify_engine.rules import parser
| [
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
10673,
2864,
5979,
2013,
6035,
73,
280,
1279,
73,
67,
31,
647,
70,
1958,
13,
952,
29,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
3... | 3.455446 | 202 |
from melodb.loggers import ILogger, ConsoleLogger, MongoLogger
from typing import List
| [
198,
6738,
7758,
375,
65,
13,
6404,
5355,
1330,
314,
11187,
1362,
11,
24371,
11187,
1362,
11,
42591,
11187,
1362,
198,
6738,
19720,
1330,
7343,
628
] | 3.423077 | 26 |
import math
import itertools
import operator
import numpy
import pylab
import scipy.fftpack
import overlap
def autocorrelation(signal):
""" this matches Marsyas exactly. """
N = signal.shape[1]
ffts = scipy.fftpack.fft(signal, 2*N, axis=1) / (2*N)
ffts_abs = abs(ffts)
ffts_abs_scaled = ffts_abs**0.5
scratch = (scipy.fftpack.ifft(ffts_abs_scaled, axis=1
).real)*(2*N)
xcorr = scratch[:,:N]
return xcorr
GCD_TOLERANCE = 0.1
TOLERANCE = 1.04
MAX_BPM = 1000
| [
11748,
10688,
198,
11748,
340,
861,
10141,
198,
11748,
10088,
198,
198,
11748,
299,
32152,
198,
11748,
279,
2645,
397,
198,
11748,
629,
541,
88,
13,
487,
83,
8002,
198,
198,
11748,
21721,
198,
198,
4299,
1960,
420,
273,
49501,
7,
1268... | 2.121849 | 238 |
import numpy as np
from sklearn.decomposition import PCA
from scipy.stats import zscore
import time
import csv
import os
import nibabel
from sklearn.metrics.pairwise import euclidean_distances
from scipy.ndimage.filters import gaussian_filter
from utils.ridge_tools import cross_val_ridge, corr
import time as tm
import sys
# train/test is the full NLP feature
# train/test_pca is the NLP feature reduced to 10 dimensions via PCA that has been fit on the training data
# feat_dir is the directory where the NLP features are stored
# train_indicator is an array of 0s and 1s indicating whether the word at this index is in the training set
| [
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
12501,
296,
9150,
1330,
4217,
32,
198,
6738,
629,
541,
88,
13,
34242,
1330,
1976,
26675,
198,
11748,
640,
198,
11748,
269,
21370,
198,
11748,
28686,
198,
11748,
33272,
9608,
198... | 3.378238 | 193 |
import pandas as pd
import os
import numpy as np
import datetime
import csv
from Code.create_collector import vti_init
from Code.preprocessing import vector_merge
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4818,
8079,
198,
11748,
269,
21370,
198,
6738,
6127,
13,
17953,
62,
33327,
273,
1330,
410,
20259,
62,
15003,
198,
6738,
6127,
13,
3866,
... | 3.288462 | 52 |
import time
from datetime import datetime as dt
"""
host files for windows windows c:\windows\system32\drivers\etc
host files for linux & Mac /ect/hosts
"""
# list paths
hosts_path_system = r"C:\Windows\System32\drivers\etc\hosts"
host_dir = hosts_path_system
#host_dir = "hosts" local
redir = "127.0.0.1"
# list websites to block
websites_list =[
"www.facebook.com",
"www.youtube.com",
"www.google.com.mx"
]
# Define working hours
from_hour = 7
to_hour = 13
#Main Program
while True:
if dt(dt.now().year, dt.now().month, dt.now().day, from_hour) < dt.now() < dt(dt.now().year, dt.now().month, dt.now().day, to_hour):
print("En hora de trabajar: Bloqueo Activo ")
with open(host_dir, 'r+') as file:
content = file.read()
for website in websites_list:
if website in content:
pass
else:
file.write(redir + " " + website + "\n")
else:
with open(host_dir, 'r+') as file:
content = file.readlines()
file.seek(0)
for line in content:
if not any(website in line for website in websites_list):
file.write(line)
file.truncate()
print("Es hora de relajarse: Bloqueo Desactivado")
time.sleep(1) #Seconds | [
11748,
640,
198,
6738,
4818,
8079,
1330,
4818,
8079,
355,
288,
83,
220,
198,
37811,
198,
220,
220,
220,
2583,
3696,
329,
9168,
220,
9168,
269,
7479,
28457,
59,
10057,
2624,
59,
36702,
59,
14784,
198,
220,
220,
220,
2583,
3696,
329,
... | 2.1 | 640 |
import importlib
import pkgutil
import aurora.drivers
| [
11748,
1330,
8019,
198,
11748,
279,
10025,
22602,
198,
198,
11748,
45714,
5799,
13,
36702,
628,
628,
628
] | 3.333333 | 18 |
from functools import partial
from typing import Tuple
import chika
import homura
import torch
import torch.nn.functional as F
from homura import lr_scheduler, reporters, trainers
from homura.vision import DATASET_REGISTRY, MODEL_REGISTRY
from sam import SAMSGD as _SAMSGD
@chika.config
@chika.config
@chika.main(cfg_cls=Config, strict=True)
if __name__ == '__main__':
main()
| [
6738,
1257,
310,
10141,
1330,
13027,
198,
6738,
19720,
1330,
309,
29291,
198,
198,
11748,
442,
9232,
198,
11748,
3488,
5330,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
3488,
5330,
1330,
300,
81,
62,... | 2.868613 | 137 |
import csv
import io
from flask import (
current_app,
flash,
redirect,
render_template,
request,
Response,
url_for,
)
from flask_login import current_user
from . import admin_bp
from .forms import (
CancelCarpoolAdminForm,
DeleteDestinationForm,
DestinationForm,
ProfilePurgeForm,
)
from geoalchemy2.shape import to_shape
from .. import db
from ..email import send_email
from ..carpool.views import (
cancel_carpool,
email_driver_rider_cancelled_request,
)
from ..models import (
Carpool,
Destination,
Person,
Role,
PersonRole,
RideRequest,
)
@admin_bp.route('/admin/')
@admin_bp.route('/admin/stats/')
@admin_bp.route('/admin/users/<uuid>')
@admin_bp.route('/admin/users/<uuid>/purge', methods=['GET', 'POST'])
@admin_bp.route('/admin/users/<user_uuid>/togglerole', methods=['POST'])
@admin_bp.route('/admin/users')
@admin_bp.route('/admin/drivers_and_riders')
@admin_bp.route('/admin/users.csv')
@admin_bp.route('/admin/carpools')
@admin_bp.route('/admin/carpools.csv')
@admin_bp.route('/admin/destinations')
@admin_bp.route('/admin/destinations/new', methods=['GET', 'POST'])
@admin_bp.route('/admin/destinations/<uuid>', methods=['GET', 'POST'])
@admin_bp.route('/admin/destinations/<uuid>/delete', methods=['GET', 'POST'])
@admin_bp.route('/admin/destinations/<uuid>/togglehidden', methods=['POST'])
@admin_bp.route('/admin/emailpreview/<template>')
@admin_bp.route('/admin/<uuid>/cancel', methods=['GET', 'POST'])
| [
11748,
269,
21370,
198,
11748,
33245,
198,
6738,
42903,
1330,
357,
198,
220,
220,
220,
1459,
62,
1324,
11,
198,
220,
220,
220,
7644,
11,
198,
220,
220,
220,
18941,
11,
198,
220,
220,
220,
8543,
62,
28243,
11,
198,
220,
220,
220,
2... | 2.527273 | 605 |
import sys
import os
import json
import glob
import pandas as pd
import plotly
import plotly.graph_objs as go
if len(sys.argv) != 2:
print("Usage: python tune_plot.py <result_dir>")
print("Example: python tune_pot.py ~/ray_results/objective_mean_2021-04-08_00-07-44/")
result_dir = sys.argv[1]
tune_run = os.path.basename(os.path.normpath(result_dir))
results = glob.glob(os.path.join(result_dir, "*", "result.json"))
score = []
kp = []
ki = []
kd = []
alpha = []
fullPID = False
for results_file in results:
print(results_file)
with open(results_file) as f:
try:
d = json.load(f)
except:
continue
score.append(d['score'])
kp.append(d['config']['kp'])
ki.append(d['config']['ki'])
if 'kd' in d['config']:
kd.append(d['config']['kd'])
fullPID = True
alpha.append(d['config']['alpha'])
# 5D plot
if fullPID:
#Set marker properties
markersize = [x * 20 for x in alpha]
markercolor = score
#Make Plotly figure
fig1 = go.Scatter3d(x=kp,
y=ki,
z=kd,
marker=dict(size=markersize,
color=markercolor,
opacity=0.5,
line=dict(width=2,
color='DarkSlateGrey'),
reversescale=False,
colorscale='blues'),
line=dict (width=0.02),
mode='markers')
#Make Plot.ly Layout
kp_range = [min(kp), max(kp)]
ki_range = [min(ki), max(kd)]
kd_range = [min(ki), max(kd)]
#ki_range = [0, 6e-6]
#kd_range = [0, 6e-6]
mylayout = go.Layout(scene=dict(xaxis=dict(title="kp", range=kp_range, showexponent = 'all', exponentformat = 'e'),
yaxis=dict(title="ki", range=ki_range, showexponent = 'all', exponentformat = 'e'),
zaxis=dict(title="kd", range=kd_range, showexponent = 'all', exponentformat = 'e')))
#Plot and save html
plotly.offline.plot({"data": [fig1],
"layout": mylayout},
image = 'png',
image_filename = 'tune_analyze_PID.png',
auto_open=True,
filename=("PID Scores Plot " + tune_run + ".html"))
else:
#Set marker properties
#markersize = [x * 20 for x in alpha]
markersize = [10 for x in alpha]
markercolor = score
#Make Plotly figure
fig1 = go.Scatter3d(x=kp,
y=ki,
z=alpha,
marker=dict(size=markersize,
color=markercolor,
opacity=0.5,
line=dict(width=2,
color='DarkSlateGrey'),
reversescale=False,
colorscale='blues'),
line=dict (width=0.02),
mode='markers')
#Make Plot.ly Layout
mylayout = go.Layout(scene=dict(xaxis=dict(title="kp", showexponent = 'all', exponentformat = 'e'),
yaxis=dict(title="ki",showexponent = 'all', exponentformat = 'e'),
zaxis=dict(title="alpha", showexponent = 'all', exponentformat = 'e')))
#Plot and save html
plotly.offline.plot({"data": [fig1],
"layout": mylayout},
image = 'png',
image_filename = 'tune_analyze_PI.png',
auto_open=True,
filename=("PI Scores Plot " + tune_run + ".html"))
| [
11748,
25064,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
15095,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
7110,
306,
198,
11748,
7110,
306,
13,
34960,
62,
672,
8457,
355,
467,
198,
198,
361,
18896,
7,
17597,
13,
853,
85... | 1.727273 | 2,266 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name:softmaxMnist
Description : mnist data sets, softmax model
pytorch 不需要进行 one-hot 编码, 使用类别即可
Email : autuanliu@163.com
Date:18-1-16
"""
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.nn import Module, functional as F
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import MNIST
# 网络模型定义
if __name__ == '__main__':
# some config
config = {'batch_size': 64, 'epoch_num': 100, 'lr': 0.001, 'in_feature': 28 * 28, 'out_feature': 10}
train_loader, test_loader = get_data(), get_data(flag=False)
# 模型实例与损失函数, 优化函数
model = Network()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=config['lr'], momentum=0.9)
# 训练与测试
for epoch in range(config['epoch_num']):
train_m(model, train_loader)
test_m(model, test_loader)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
47232,
12,
198,
220,
220,
9220,
6530,
171,
120,
248,
4215,
9806,
44,
77,
396,
198,
220,
220,
12489,
1058,... | 2.261641 | 451 |
from sys import path; path += [".", ".."] # hacky...
from utils import *
if __name__ == "__main__":
ciphertexts = map(dehex, load_data("4.txt").split("\n"))
keyspace = list(range(0x100))
plaintexts = reduce(op.add, [
[xor(ct, [key]) for key in keyspace]
for ct in ciphertexts
])
best_plaintext = min(plaintexts, key=englishness) # I like this code
message = best_plaintext.decode()
assert(message == "Now that the party is jumping\n")
print(message.strip())
| [
6738,
25064,
1330,
3108,
26,
3108,
15853,
14631,
33283,
366,
492,
8973,
1303,
8156,
88,
986,
198,
6738,
3384,
4487,
1330,
1635,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
197,
66,
10803,
5239,
82,
796,
397... | 2.620879 | 182 |
# Copyright 2020 EPAM Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import docker
import pytest
from pytest_mock import MockFixture
from odahuflow.sdk.local import packaging
from odahuflow.sdk.local.packaging import start_package
from odahuflow.sdk.models import K8sPackager, ModelPackaging, ModelPackagingSpec, PackagingIntegration, \
PackagingIntegrationSpec
# Format: ['artifact_name', 'artifact_path',
# 'expected_artifact_name', expected_artifact_path]
test_data = [
(
'wine-1.0', '/odahu/training',
'wine-1.0', '/odahu/training'
),
(
'wine-1.0.zip', '/odahu/training',
'wine-1.0', '/odahu/training'
),
(
'wine-1.0.zip.zip', None,
'wine-1.0.zip', '/odahu/default_output'
)
]
DEFAULT_OUTPUT_DIR = '/odahu/default_output'
@pytest.mark.parametrize(['artifact_name', 'artifact_path',
'expected_artifact_name', 'expected_artifact_path'],
test_data)
| [
2,
220,
15069,
12131,
14724,
2390,
11998,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
... | 2.624357 | 583 |
# Generated by Django 3.0.7 on 2020-06-29 22:25
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
22,
319,
12131,
12,
3312,
12,
1959,
2534,
25,
1495,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import rospy
from yaw_controller import YawController
from lowpass import LowPassFilter
from pid import PID
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
| [
11748,
686,
2777,
88,
198,
6738,
331,
707,
62,
36500,
1330,
575,
707,
22130,
198,
6738,
1877,
6603,
1330,
7754,
14478,
22417,
198,
6738,
46514,
1330,
37022,
628,
198,
38,
1921,
62,
35,
16938,
9050,
796,
362,
13,
23,
3365,
198,
11651,
... | 2.867925 | 53 |
"""Unit tests for github4.api."""
import unittest.mock
import github4
class TestAPI(unittest.TestCase):
"""All tests for the github4.api module."""
def test_enterprise_login(self):
"""Show that github4.enterprise_login returns GitHubEnterprise."""
args = ("login", "password", None, "https://url.com/", None)
with unittest.mock.patch.object(github4.GitHubEnterprise, "login") as login:
g = github4.enterprise_login(*args)
assert isinstance(g, github4.GitHubEnterprise)
login.assert_called_once_with("login", "password", None, None)
def test_login(self):
"""Show that github4.login proxies to GitHub."""
args = ("login", "password", None, None)
with unittest.mock.patch.object(github4.GitHub, "login") as login:
g = github4.login(*args)
assert isinstance(g, github4.GitHub)
assert not isinstance(g, github4.GitHubEnterprise)
login.assert_called_once_with(*args)
| [
37811,
26453,
5254,
329,
33084,
19,
13,
15042,
526,
15931,
198,
11748,
555,
715,
395,
13,
76,
735,
198,
198,
11748,
33084,
19,
628,
198,
4871,
6208,
17614,
7,
403,
715,
395,
13,
14402,
20448,
2599,
628,
220,
220,
220,
37227,
3237,
5... | 2.41866 | 418 |
import json
import pytest
import responses
from filepreviews import API_URL, FilePreviews, exceptions
file_previews = FilePreviews(api_key="DUMMY_API_KEY", api_secret="DUMMY_SECRET_KEY")
@responses.activate
@responses.activate
@responses.activate
@responses.activate
@responses.activate
| [
11748,
33918,
198,
198,
11748,
12972,
9288,
198,
11748,
9109,
198,
198,
6738,
2393,
3866,
33571,
1330,
7824,
62,
21886,
11,
9220,
6719,
33571,
11,
13269,
198,
198,
7753,
62,
3866,
33571,
796,
9220,
6719,
33571,
7,
15042,
62,
2539,
2625,... | 3.061224 | 98 |
from ursina import *
from ursina import curve
from particles import ParticleSystem
sign = lambda x: -1 if x < 0 else (1 if x > 0 else 0) | [
6738,
220,
1834,
1437,
1330,
1635,
198,
6738,
220,
1834,
1437,
1330,
12133,
198,
6738,
13166,
1330,
2142,
1548,
11964,
198,
198,
12683,
796,
37456,
2124,
25,
532,
16,
611,
2124,
1279,
657,
2073,
357,
16,
611,
2124,
1875,
657,
2073,
65... | 3.186047 | 43 |
import numpy as np
DISTRIBUTION_SIZE = 10
NEGATIVE_THRESHOLD = -2.5
| [
11748,
299,
32152,
355,
45941,
198,
198,
26288,
5446,
9865,
35354,
62,
33489,
796,
838,
198,
45,
7156,
37045,
62,
4221,
19535,
39,
15173,
796,
532,
17,
13,
20,
198
] | 2.3 | 30 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from rlkit.torch.core import PyTorchModule
from rlkit.torch.networks import Mlp, identity
from rlkit.torch import pytorch_util as ptu
from copy import deepcopy
# self.V_part = V_net
# # this is a hack so it's not added as a submodule
# self.target_V_part = [deepcopy(V_net)]
# self.soft_target_V_tau = soft_target_V_tau
# def cuda(self, *args, **kwargs):
# super().cuda(*args, **kwargs)
# self.target_V_part[0].cuda()
# def forward(self, obs_batch, act_batch, z_batch=None, pol_log_prob=None, next_obs_batch=None):
# obs_batch = self.obs_processor(obs_batch, False, z_batch)
# next_obs_batch = self.obs_processor(next_obs_batch, False, z_batch)
# r = self.r_part(obs_batch)
# V_s = self.V_part(obs_batch)
# V_s_prime = self.target_V_part[0](next_obs_batch).detach()
# shaping = self.gamma*V_s_prime - V_s
# f = r + shaping
# disc_logits = f - pol_log_prob
# clamped_disc_logits = torch.clamp(disc_logits, min=-1.0*self.clamp_magnitude, max=self.clamp_magnitude)
# return clamped_disc_logits, r, shaping
# def _update_target_V_part(self):
# ptu.soft_update_from_to(self.V_part, self.target_V_part[0], self.soft_target_V_tau)
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
198,
198,
6738,
374,
75,
15813,
13,
13165,
354,
13,
7295,
1330,
9485,
158... | 2.153274 | 672 |
import argparse
import sys
import json
if __name__ == "__main__":
main()
| [
198,
11748,
1822,
29572,
198,
11748,
25064,
198,
11748,
33918,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
220,
220,
1388,
3419,
198
] | 2.612903 | 31 |
from distutils.core import setup
from Cython.Build import cythonize
from distutils.extension import Extension
import numpy as np
sourcefiles = ['array_tools.pyx', '_sum.cpp']
extra_compile_args = []
libraries = []
ext = [Extension('*',
sourcefiles,
extra_compile_args=extra_compile_args,
libraries=[],
language='c++')
]
setup(ext_modules=cythonize(ext), include_dirs=[np.get_include()])
| [
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
6738,
327,
7535,
13,
15580,
1330,
3075,
400,
261,
1096,
198,
6738,
1233,
26791,
13,
2302,
3004,
1330,
27995,
198,
11748,
299,
32152,
355,
45941,
198,
198,
10459,
16624,
796,
37250,
18747,
62... | 2.517442 | 172 |
import textwrap
from typing import Iterator, Any
from primehub import Helpful, cmd, Module
from primehub.utils.display import display_tree_like_format
| [
11748,
2420,
37150,
198,
6738,
19720,
1330,
40806,
1352,
11,
4377,
198,
198,
6738,
6994,
40140,
1330,
21656,
11,
23991,
11,
19937,
198,
6738,
6994,
40140,
13,
26791,
13,
13812,
1330,
3359,
62,
21048,
62,
2339,
62,
18982,
628,
198
] | 3.85 | 40 |
from django.shortcuts import render
from django.views.generic import TemplateView
from django.http import HttpResponse, JsonResponse, HttpResponseForbidden, HttpResponseBadRequest
import ccxt
# Create your views here.
exchangeIns = {}
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
37350,
7680,
198,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
11,
449,
1559,
31077,
11,
367,
29281,
31077,
1890,
37978,... | 3.621212 | 66 |
/usr/lib/python3.4/tokenize.py | [
14,
14629,
14,
8019,
14,
29412,
18,
13,
19,
14,
30001,
1096,
13,
9078
] | 2.142857 | 14 |
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from typing import Dict, Optional
from polyaxon.exceptions import PolyaxonCompilerError
from polyaxon.polyflow import V1CompiledOperation
from polyaxon.polypod.compiler.resolver.base import BaseResolver
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
198,
2,
15069,
2864,
12,
42334,
12280,
897,
261,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
... | 3.647826 | 230 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
""" IOB bits are more complicated than can be easily expressed to segmaker.
There are couple cases that need to be handled here:
- There are some bits that are always set for IN-only ports, but are cleared
selectively for OUT and INOUT ports.
- There are bits per each IOSTANDARD, in addition to drive patterns. These
can be merged to provide unique "(IOSTANDARD, DRIVE)" bit sets.
"""
import argparse
def filter_bits(site, bits):
""" Seperate top and bottom bits.
Some IOSTANDARD bits are tile wide, but really only apply to a half.
It is hard to write a fuzzer for this, but it is easy to filter by site,
and all bits appear to have a nice hard halve seperatation in the bitidx.
"""
if site == 'IOB_Y0':
min_bitidx = 64
max_bitidx = 127
elif site == 'IOB_Y1':
min_bitidx = 0
max_bitidx = 63
else:
assert False, site
return frozenset(inner())
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
34,
8,
2177,
12,
42334,
220,
383,
4935,
1395,
12,
19591,
46665,
13,
198,
2,
198,
2,
57... | 2.824719 | 445 |
import feedparser
import urllib.parse
from random import shuffle, seed
UKR_NEWS = ["https://news.yandex.ua/index.rss", "http://www.ukr-portal.com/php/rss_1.xml", "http://news.finance.ua/ru/rss", "http://www.ua.rian.ru/export/rss2/index.xml", "http://feeds.feedburner.com/zaxid/rss_ua", "http://www.dt.ua/export.rss", "https://malina-mix.com/anekdots.xml"]
def lookup(geo, lang="us"):
"""Looks up articles for geo."""
# check cache for geo
if geo in lookup.cache:
if lookup.query_counter[geo] < 10:
lookup.query_counter[geo] += 1
return lookup.cache[geo]
else:
del lookup.cache[geo]
del lookup.query_counter[geo]
if geo == "H++":
lookup.cache[geo] = {"link": "http://programming.kr.ua/ru", "title": "Главная"}, {"link": "http://programming.kr.ua/ru/news", "title": "News"}, {"link": "http://programming.kr.ua/ru/potential", "title": "Возможности"}, {"link": "http://programming.kr.ua/ru/about#contacts", "title": "Контакты"}
lookup.query_counter[geo] = 1
return lookup.cache[geo]
url = "http://news.google.com/news?ned=" + lang+ "&geo={}&output=rss"
# get feed from Google
feed = feedparser.parse(url.format(urllib.parse.quote(geo, safe="")))
# if no items in feed, get feed from other
if not feed["items"]:
if lang == "ru_ua":
# get random UKR_NEWS
seed()
shuffle(UKR_NEWS)
feed = feedparser.parse(UKR_NEWS[0])
if not feed["items"]:
# there is always news
feed = feedparser.parse("http://feeds.feedburner.com/zaxid/rss_ua")
else:
# get from Onion
feed = feedparser.parse("http://www.theonion.com/feeds/rss")
# cache results
lookup.cache[geo] = [{"link": item["link"], "title": item["title"]} for item in feed["items"]]
# add counter
lookup.query_counter[geo] = 1
# return results
return lookup.cache[geo]
# initialize cache
lookup.cache = {}
# initialize query counter
lookup.query_counter = {}
| [
11748,
3745,
48610,
198,
11748,
2956,
297,
571,
13,
29572,
198,
6738,
4738,
1330,
36273,
11,
9403,
198,
198,
15039,
49,
62,
49597,
796,
14631,
5450,
1378,
10827,
13,
88,
392,
1069,
13,
6413,
14,
9630,
13,
42216,
1600,
366,
4023,
1378,... | 2.121951 | 1,025 |
# -*- coding: utf-8 -*-
"""
Single Molecule Molecular Dynamics Code
Created 2018 by David of Theoretically Speaking
Please Modify!
"""
from __future__ import print_function
import os
import sys
import numpy as np
# Global variables for unit conversions
hartree = 4.35974465e-18 # J, atomic unit of energy
emass = 5.486e-4 # kg
dalton = 1.660539040e-27 # kg
avo = 6.02214086e23 # mol^-1
emass = 9.109534e-28 # g, atomic unit of mass
boltz = 1.38064852e-23 / hartree # E_h K^-1
bohr = 0.52917721067 # Angstroms
hbar = 6.626070040e-34 # Js
atomic_time = hbar / hartree
# Global files to prevent constant opening/closing
xyz_file = open("coordinates.xyz", "w")
energy_file = open("energies.dat", "w")
def display_header():
"""Write opening message to screen"""
print_dashed_line()
print("Welcome to the Theoretically Speaking molecular dynamics code")
print_dashed_line()
def print_dashed_line(length = 65):
"""Write --- line of given length to screen"""
line = "-" * length
print(line)
def string_to_boolean(string):
"""Converts input string of True or False to a boolean True or False"""
string = string.lower().strip()
true_strings = ["true", "t"]
false_strings = ["false", "f"]
if string in true_strings: return True
elif string in false_strings: return False
raise ValueError("Bad Boolean Value: " + string)
def get_input_parameters():
"""Ask user for input file name, read input parameters and store in dictionary"""
# Get list of available input files
input_files = get_recursive_file_list("inpt")
# Ask user to select input file from list
if len(input_files) == 0: # If cannot find any input files close program
print("No available input files. Exiting.")
sys.exit()
else:
while True:
print("Select an input file from the list:")
for i, file in enumerate(input_files):
print("[{0}] {1}".format(i, file))
try:
user_selection = int(input())
input_file = input_files[user_selection]
print("Input file selected: {0}".format(input_file))
print_dashed_line()
break
except: pass
# Open input file and read parameters into dictionary
parameters = {}
with open(input_file, "r") as file:
print("Reading input file")
# Skip header
for i in range(2): file.readline()
# Simulation parameters
try:
for i in range(2): file.readline()
parameters["time_total"] = float(file.readline().split()[0]) / (atomic_time * 1e12)
parameters["time_step"] = float(file.readline().split()[0]) / (atomic_time * 1e12)
parameters["box_size"] = float(file.readline().split()[0]) / bohr
parameters["write_freq"] = float(file.readline().split()[0]) / (atomic_time * 1e12)
print(" - Simulation parameters read")
except:
print("Error in simulation parameters")
sys.exit()
# Atom data
try:
for i in range(2): file.readline()
num_atoms = parameters["num_atoms"] = int(file.readline().split()[0])
parameters["random_displacement"] = string_to_boolean(file.readline().split()[0])
parameters["random_displacement_limit"] = float(file.readline().split()[0]) / bohr
file.readline() # skip comment
name_to_index = {} # dictionary to convert atom name to array index
parameters["atom_names"] = [] # empty list for names
parameters["atom_masses"] = np.empty(num_atoms) # empty array for masses
parameters["atom_crds"] = np.empty([num_atoms, 3]) # empty array for coordinates
for i in range(num_atoms):
line = file.readline().split()
name_to_index[line[0]] = i
parameters["atom_names"].append(line[0])
parameters["atom_masses"][i] = float(line[1]) / (avo * emass)
parameters["atom_crds"][i] = np.array(line[2:5], dtype = float) / bohr
print(" - Atom data read")
except:
print("Error in atom data")
sys.exit()
# Bond Data
try:
for i in range(2): file.readline()
num_bonds = parameters["num_bonds"] = int(file.readline().split()[0])
file.readline() # skip comment
parameters["bond_pairs"] = np.empty([num_bonds, 2], dtype=int) # empty array for indices of bonded atom pairs
parameters["bond_params"] = np.empty([num_bonds, 2]) # empty array for harmonic bond r0 and k
for i in range(num_bonds):
line = file.readline().split()
parameters["bond_pairs"][i, 0] = name_to_index[line[0]]
parameters["bond_pairs"][i, 1] = name_to_index[line[1]]
parameters["bond_params"][i, 0] = float(line[2]) / bohr
parameters["bond_params"][i, 1] = float(line[3]) * (bohr * 1e-10)**2 / hartree
print(" - Bond data read")
except:
print("Error in bond data")
sys.exit()
print("Read successful")
print_dashed_line()
return parameters
def get_recursive_file_list(ext):
"""Get list of files with specifed extension in current directory and all subdirectories"""
# Search over all files in all subdirectories, add to list if have required extension
files = []
for dirpath, dirname, filenames in os.walk("./"):
for filename in filenames:
if filename.endswith(ext):
filepath = os.path.join(dirpath,filename)
files.append(filepath)
return files
def apply_periodic_boundary_condition(crds, box_size):
"""Apply periodicity to keep atoms within simulation box"""
crds[crds < 0] += box_size
crds[crds > box_size] -= box_size
return crds
def minimum_image_displacement(crd_0, crd_1, box_size):
"""Find displacement between nearest periodic images of atom pair"""
displacement = crd_0 - crd_1
displacement[displacement < -box_size / 2] += box_size
displacement[displacement > box_size / 2] -= box_size
return displacement
def initialise_coordinates(crds, box_size, displace, limit):
"""Recentre atoms in simulation box, apply periodic boundary, apply random displacement"""
crds += box_size / 2
crds = apply_periodic_boundary_condition(crds, box_size)
if displace:
displacements = np.random.uniform(low = -limit, high = limit, size = crds.shape)
crds += displacements
return crds
def calculate_energy(masses, crds, velocities, bond_pairs, bond_params, box_size):
"""Calculate kinetic, potential and total energy of system"""
kinetic_energy = 0.5 * (masses * np.sum(velocities ** 2, axis=1)).sum() # U=0.5*m*v^2
# Calculate harmonic potential energy using: U=0.5*k(r-r0)^2
for i, bond in enumerate(bond_pairs):
atom_0, atom_1 = bond[0], bond[1]
displacement = minimum_image_displacement(crds[atom_0, :], crds[atom_1, :], box_size)
distance = np.linalg.norm(displacement)
potential_energy = 0.5 * bond_params[i, 1] * (distance - bond_params[i, 0]) ** 2
total_energy = kinetic_energy + potential_energy # Total energy as sum of ke and pe
return np.array([kinetic_energy, potential_energy, total_energy])
def update_accelerations(masses, crds, bond_pairs, bond_params, box_size):
"""Calculate the acceleration on each atom using potential model and Newton's laws of motion"""
# Calculate forces using Hooke's law: F=-k(r-r0)
# Convert to acceleration using Newton's laws: F=ma, action has opposite reaction
accelerations = np.zeros_like(crds) # x,y,z accelerations for each atom
for i, bond in enumerate(bond_pairs):
atom_0, atom_1 = bond[0], bond[1]
displacement = minimum_image_displacement(crds[atom_0, :], crds[atom_1, :], box_size)
distance = np.linalg.norm(displacement)
force_direction = displacement / distance
force_magnitude = - bond_params[i, 1] * (distance - bond_params[i, 0])
force = force_magnitude * force_direction
accelerations[atom_0] += force / masses[atom_0]
accelerations[atom_1] -= force / masses[atom_1]
return accelerations
def update_coordinates(crds, accelerations, velocities, time_step, box_size):
"""Update coordinates using: x(t+dt)=x(t)+v(t)*dt+0.5*a(t)*dt**2"""
crds += velocities * time_step + 0.5 * accelerations * time_step ** 2
crds = apply_periodic_boundary_condition(crds, box_size)
return crds
def update_velocities(velocities, accelerations_start, accelerations_end, time_step):
"""Update velocities using: v(t+dt)=v(t)+0.5*dt*(a(t)+a(t+dt))"""
velocities += 0.5 * time_step * (accelerations_start + accelerations_end)
return velocities
def write_output_files(time_step, num_atoms, names, crds, energies):
"""Writes coordinates in XYZ file type to 'coordinates.xyz'
Write kinetic, potential and total energies to 'energies.dat'"""
# Write XYZ file
xyz_file.write("{0} \n\n".format(num_atoms))
for i, crd in enumerate(crds):
xyz = crd * bohr
xyz_file.write("{0} {1:.6f} {2:.6f} {3:.6f} \n".format(names[i], xyz[0], xyz[1], xyz[2]))
# Write energies
energy = energies * hartree * avo * 1e-3
energy_file.write("{0} {1} {2} {3} \n".format(time_step, energy[0], energy[1], energy[2]))
def main():
"""Handle input/output and molecular dynamics velocity-verlet algorithm"""
# Display opening message
display_header()
# Read user parameters from input file
input_parameters = get_input_parameters()
# Unpack parameters
time_total = input_parameters["time_total"]
time_step = input_parameters["time_step"]
box_size = input_parameters["box_size"]
write_freq = input_parameters["write_freq"]
num_atoms = input_parameters["num_atoms"]
displace_atoms = input_parameters["random_displacement"]
displacement_limit = input_parameters["random_displacement_limit"]
atom_names = input_parameters["atom_names"]
atom_masses = input_parameters["atom_masses"]
atom_crds = input_parameters["atom_crds"]
bond_pairs = input_parameters["bond_pairs"]
bond_params = input_parameters["bond_params"]
# Recentre coordinates and apply displacements
atom_crds = initialise_coordinates(atom_crds, box_size, displace_atoms, displacement_limit)
# Initialise Molecular Dynamics Variables
num_steps = int(time_total / time_step) # total number of steps of md
write_steps = int(write_freq / time_step) # number of steps to write out results
atom_vels = np.zeros_like(atom_crds) # velocities in x,y,z directions for all atoms
atom_acc_start = atom_acc_end = np.zeros_like(atom_crds) # accelerations at start and end of time step
atom_acc_start = update_accelerations(atom_masses, atom_crds, bond_pairs, bond_params, box_size) # calculate initial accelerations
system_energy = calculate_energy(atom_masses, atom_crds, atom_vels, bond_pairs, bond_params, box_size) # calculate initial energies
write_output_files(0, num_atoms, atom_names, atom_crds, system_energy)
# Molecular dynamics
print("Performing molecular dynamics simulation")
for step in range(1, num_steps+1):
# Velocity - Verlet algorithm
atom_crds = update_coordinates(atom_crds, atom_acc_start, atom_vels, time_step, box_size)
atom_acc_end = update_accelerations(atom_masses, atom_crds, bond_pairs, bond_params, box_size)
atom_vels = update_velocities(atom_vels, atom_acc_start, atom_acc_end, time_step)
atom_acc_start = atom_acc_end
# Write coordinates and energies
if step % write_steps == 0:
system_energy = calculate_energy(atom_masses, atom_crds, atom_vels, bond_pairs, bond_params, box_size)
write_output_files(step, num_atoms, atom_names, atom_crds, system_energy)
print("Completion: {:.3f}%".format(100 * float(step) / num_steps))
print_dashed_line()
print("Simulation complete \nCoordinates written to coordinates.xyz \nEnergies written to energies.dat")
print_dashed_line()
# Execute code if main file
if __name__ == "__main__":
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
28008,
25726,
23172,
38275,
33806,
6127,
220,
198,
41972,
2864,
416,
3271,
286,
383,
9997,
1146,
21393,
198,
5492,
3401,
1958,
0,
198,
37811,
198,
6738,
11593,... | 2.469327 | 5,037 |
if __name__ == '__main__':
coins = [1, 2, 10]
price = 28
print(minimal_number_of_coins(coins, price))
| [
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
10796,
796,
685,
16,
11,
362,
11,
838,
60,
198,
220,
220,
220,
2756,
796,
2579,
198,
220,
220,
220,
3601,
7,
1084,
4402,
62,
17618,
62,
1659,
... | 2.25 | 52 |
from __clrclasses__.System.Security.Principal import GenericIdentity
from __clrclasses__.System.Security.Principal import GenericPrincipal
from __clrclasses__.System.Security.Principal import IdentityNotMappedException
from __clrclasses__.System.Security.Principal import IdentityReference
from __clrclasses__.System.Security.Principal import IdentityReferenceCollection
from __clrclasses__.System.Security.Principal import IIdentity
from __clrclasses__.System.Security.Principal import IPrincipal
from __clrclasses__.System.Security.Principal import NTAccount
from __clrclasses__.System.Security.Principal import PrincipalPolicy
from __clrclasses__.System.Security.Principal import SecurityIdentifier
from __clrclasses__.System.Security.Principal import TokenAccessLevels
from __clrclasses__.System.Security.Principal import TokenImpersonationLevel
from __clrclasses__.System.Security.Principal import WellKnownSidType
from __clrclasses__.System.Security.Principal import WindowsAccountType
from __clrclasses__.System.Security.Principal import WindowsBuiltInRole
from __clrclasses__.System.Security.Principal import WindowsIdentity
from __clrclasses__.System.Security.Principal import WindowsImpersonationContext
from __clrclasses__.System.Security.Principal import WindowsPrincipal
| [
6738,
11593,
565,
81,
37724,
834,
13,
11964,
13,
24074,
13,
42904,
8521,
1330,
42044,
7390,
26858,
198,
6738,
11593,
565,
81,
37724,
834,
13,
11964,
13,
24074,
13,
42904,
8521,
1330,
42044,
42904,
8521,
198,
6738,
11593,
565,
81,
37724,... | 3.844311 | 334 |
import math
import numpy as np
from nltk.metrics.association import TOTAL
from sklearn import metrics
from matplotlib.mlab import entropy
| [
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
2528,
74,
13,
4164,
10466,
13,
562,
41003,
1330,
36247,
198,
6738,
1341,
35720,
1330,
20731,
198,
6738,
2603,
29487,
8019,
13,
4029,
397,
1330,
40709,
198,
220,
220,
220,... | 2.333333 | 75 |
from django import forms
from ..models import User
from django.contrib.auth.forms import UserCreationForm
| [
6738,
42625,
14208,
1330,
5107,
198,
6738,
11485,
27530,
1330,
11787,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
23914,
1330,
11787,
12443,
341,
8479,
628
] | 3.821429 | 28 |
'''Bulbasaur, Ivysaur and Venusaur'''
from __init__ import Pokemon
Bulbasaur = Pokemon('generation_1/001.txt')
print(Bulbasaur)
| [
7061,
6,
33481,
12093,
2899,
11,
16975,
893,
2899,
290,
21094,
2899,
7061,
6,
198,
6738,
11593,
15003,
834,
1330,
14878,
198,
198,
33481,
12093,
2899,
796,
14878,
10786,
20158,
62,
16,
14,
8298,
13,
14116,
11537,
198,
4798,
7,
33481,
... | 2.866667 | 45 |
import django
from django.conf import settings
| [
11748,
42625,
14208,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
628
] | 4 | 12 |
#
# @lc app=leetcode id=207 lang=python3
#
# [207] Course Schedule
#
# @lc code=start
# @lc code=end
| [
2,
198,
2,
2488,
44601,
598,
28,
293,
316,
8189,
4686,
28,
22745,
42392,
28,
29412,
18,
198,
2,
198,
2,
685,
22745,
60,
20537,
19281,
198,
2,
198,
198,
2,
2488,
44601,
2438,
28,
9688,
628,
198,
2,
2488,
44601,
2438,
28,
437,
198... | 2.311111 | 45 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for nipyapi security module."""
from __future__ import absolute_import
import pytest
from tests import conftest
import nipyapi
# Tells pytest to skip this module of security testing is not enabled.
pytestmark = pytest.mark.skipif(not conftest.test_security, reason='test_security disabled in Conftest')
# Useful for manual testing
# if conftest.test_security:
# test_host = nipyapi.config.default_host
# nipyapi.utils.set_endpoint('https://' + test_host + ':18443/nifi-registry-api', True, True)
# nipyapi.utils.set_endpoint('https://' + test_host + ':9443/nifi-api', True, True)
# TODO: Test adding users to existing set of users and ensuring no clobber
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
51,
3558,
329,
299,
541,
88,
15042,
2324,
8265,
526,
15931,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
... | 2.815094 | 265 |
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth.forms import UserCreationForm
from django.views.generic import CreateView
from django.views.generic import RedirectView
admin.autodiscover()
from django.conf import settings
from django.conf.urls.static import static
import django.contrib.auth.views
from mentoring.views import views
from mentoring.views import honors_admin
# Examples:
# url(r'^$', 'gettingstarted.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
urlpatterns = [
url(r'^$', views.home),
url(r'^admin/', admin.site.urls),
url(r'^(?i)honorsAdmin/$', honors_admin.home),
url(r'^(?i)honorsAdmin/mentors/$', honors_admin.mentors),
url(r'^(?i)honorsAdmin/mentor/([0-9]+)/view', honors_admin.mentor_detail),
url(r'^(?i)honorsAdmin/mentor/([0-9]+)/details', honors_admin.mentor_detail_page),
url(r'^(?i)honorsAdmin/mentor/([0-9]+)/approve', honors_admin.mentor_approve),
url(r'^(?i)honorsAdmin/mentor/([0-9]+)/deny', honors_admin.mentor_deny),
url(r'^(?i)honorsAdmin/mentees/$', honors_admin.mentees),
url(r'^(?i)honorsAdmin/mentee/([0-9]+)/view', honors_admin.mentee_detail),
url(r'^(?i)honorsAdmin/mentee/([0-9]+)/details', honors_admin.mentee_detail_page),
url(r'^(?i)honorsAdmin/mentee/([0-9]+)/approve', honors_admin.mentee_approve),
url(r'^(?i)honorsAdmin/mentee/([0-9]+)/deny', honors_admin.mentee_deny),
url(r'^(?i)honorsAdmin/mentee/([0-9]+)/getmatches', honors_admin.mentee_get_matches),
url(r'^(?i)honorsAdmin/mentee/([0-9]+)/getallmatches$', honors_admin.mentee_get_all_matches),
url(r'^(?i)honorsAdmin/mentee/([0-9]+)/getallmatcheslist', honors_admin.mentee_get_all_matches_list),
url(r'^(?i)honorsAdmin/createPairing', honors_admin.create_pairing),
url(r'^(?i)honorsAdmin/resendPairing', honors_admin.resend_pairing_email),
url(r'^(?i)honorsAdmin/endPairing', honors_admin.end_pairing),
url(r'^(?i)honorsAdmin/feedbacks/([0-9]+)/view/', honors_admin.pairing_feedback),
url(r'^(?i)honorsAdmin/pairs/$', honors_admin.pairings),
url(r'^(?i)honorsAdmin/export/$', honors_admin.export),
url(r'^(?i)honorsAdmin/invite/$', honors_admin.invitations),
url(r'^(?i)honorsAdmin/send_invite/$', honors_admin.send_invite),
url(r'^(?i)honorsAdmin/preview_invite/$', honors_admin.preview_invite),
# Default django stuff
url(r'^(?i)accounts/logout/$', django.contrib.auth.views.logout),
url(r'^(?i)accounts/login/$', django.contrib.auth.views.login, {'template_name': 'admin/login.html'}),
url(r'^(?i)accounts/$', RedirectView.as_view(url='/')),
url(r'^(?i)thankyoumentor/', views.thank_you_mentor),
url(r'^(?i)thankyoumentee/', views.thank_you_mentee),
url(r'^(?i)newmentor/', views.new_mentor),
url(r'^(?i)newmentee/', views.new_mentee),
url(r'^(?i)confirmation/', views.confirm_account),
url(r'^(?i)feedback/', views.pairing_feedback),
] # + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
2291,
11,
19016,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
23914,
1330,
11787,
12443,
341,
8479,
198,
6738,
42625,... | 2.308511 | 1,316 |
import pytest
import potemkin
import boto3
from potemkin.configservice import evaluate_config_rule_and_wait_for_resource, config_rule_wait_for_resource, config_rule_wait_for_absent_resources, config_rule_wait_for_compliance_results
@potemkin.CloudFormationStack('test/integration/test_templates/eip.yml',
stack_name_stem='EipTestStack')
@pytest.mark.xfail(reason="deliberate fail")
@potemkin.CloudFormationStack('test/integration/test_templates/eip.yml',
stack_name_stem='EipTestStack')
@potemkin.CloudFormationStack(
'test/integration/test_templates/eip.yml',
stack_name_stem='EipTestStack'
)
@potemkin.CloudFormationStack(
'test/integration/test_templates/eip.yml',
stack_name_stem='EipTestStack'
)
| [
11748,
12972,
9288,
198,
11748,
1787,
368,
5116,
198,
11748,
275,
2069,
18,
198,
6738,
1787,
368,
5116,
13,
11250,
15271,
1330,
13446,
62,
11250,
62,
25135,
62,
392,
62,
17077,
62,
1640,
62,
31092,
11,
4566,
62,
25135,
62,
17077,
62,
... | 2.40367 | 327 |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Akram Mutaher and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
66,
8,
12131,
11,
9084,
859,
13859,
64,
372,
290,
20420,
198,
2,
1114,
5964,
1321,
11,
3387,
766,
5964,
13,
14116,
198,
198,
6738,
11593,
37443,
834,
... | 3.38806 | 67 |
'''
Context Processors do some pretty great work, like default arguments supplied
to templates when they're rendered. kind of like Macros in Flask, but even more
powerful.
'''
import string
from django.utils.datastructures import MultiValueDictKeyError
from .forms import SearchForm
from .static_vars import COLORS, GROUPS
def search_form(request):
'''renders the search form still uses a <form> wrapper to control action
Now pulls the query from the request data and presents it as the
initial field value
'''
try:
query = request.POST['search']
except MultiValueDictKeyError:
query = ""
return {
'SearchForm': SearchForm(initial={'search': query}),
}
def alphabet(request):
'''renders the capitol alphabet from A-Z'''
return {
'alphabet': string.ascii_uppercase,
}
def groups(request):
'''renders the mineral groups'''
return {'groups': GROUPS, }
def colors(request):
'''renders the available colors'''
return {'colors': COLORS, }
| [
7061,
6,
198,
21947,
10854,
669,
466,
617,
2495,
1049,
670,
11,
588,
4277,
7159,
14275,
198,
1462,
24019,
618,
484,
821,
15111,
13,
1611,
286,
588,
4100,
4951,
287,
46947,
11,
475,
772,
517,
198,
44548,
13,
198,
7061,
6,
198,
11748,... | 2.948718 | 351 |
"""
pushover simple api
~~~~~~~~~~~~~~~~~~~
"""
__author__ = "toloy"
from .pushover import Pushover, PushoverException
| [
37811,
201,
198,
220,
220,
220,
220,
4574,
2502,
2829,
40391,
201,
198,
220,
220,
220,
220,
220,
27156,
4907,
93,
201,
198,
37811,
201,
198,
201,
198,
834,
9800,
834,
796,
366,
83,
349,
726,
1,
201,
198,
201,
198,
6738,
764,
14689... | 2.465517 | 58 |
# Code generated by `typeddictgen`. DO NOT EDIT.
"""V1beta1CertificateSigningRequestConditionDict generated type."""
import datetime
from typing import TypedDict
V1beta1CertificateSigningRequestConditionDict = TypedDict(
"V1beta1CertificateSigningRequestConditionDict",
{
"lastUpdateTime": datetime.datetime,
"message": str,
"reason": str,
"type": str,
},
total=False,
)
| [
2,
6127,
7560,
416,
4600,
28004,
6048,
713,
5235,
44646,
8410,
5626,
48483,
13,
198,
37811,
53,
16,
31361,
16,
37608,
22460,
11712,
278,
18453,
48362,
35,
713,
7560,
2099,
526,
15931,
198,
11748,
4818,
8079,
198,
6738,
19720,
1330,
1713... | 2.608696 | 161 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import uuid
from azure_devtools.perfstress_tests import PerfStressTest, get_random_bytes
from azure.servicebus import ServiceBusClient, ServiceBusReceiveMode, ServiceBusMessage
from azure.servicebus.aio import ServiceBusClient as AsyncServiceBusClient
from azure.servicebus.aio.management import ServiceBusAdministrationClient
MAX_QUEUE_SIZE = 40960
| [
2,
16529,
1783,
10541,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
5964,
1321,
13,
198,
2,
16529,
1783,
10541,
198,... | 4.75 | 148 |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
nx, ny = (1000,1000)
x = np.linspace(-2,1,nx)
y = np.linspace(-1.5,1.5,ny)
X, Y = np.meshgrid(x,y)
cgrid = X + 1j*Y
# For some numbers c doing z^2 + c again and again from 0 will diverge, not for others, plot it to get the mandelbrot set
Z = 0*cgrid
ZC = Z
for i in range(1,50):
Z = np.power(Z,2) + cgrid
ZC[Z>1000] = i
ZC = np.abs(ZC)
#fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
#surf = ax.plot_surface(X, Y, Z, linewidth=0, antialiased=False, cmap=cm.coolwarm)
mycount = [1]
# Get the mouse click
print(ZC)
fig,ax = plt.subplots()
plt.pcolormesh(X,Y,ZC)
fig.canvas.mpl_connect('button_press_event', onclick)
#fig.canvas.mpl_connect('button_press_event', lambda event: onclick(event, mycount))
'''
ax.set_xlim(-4.01, 4.01)
ax.set_ylim(-4.01, 4.01)
'''
plt.show()
'''
value = np.abs(grid)**(-1)
print(value)
value.flatten()
colour = np.stack((value,value,value))
print(colour)
fig = plt.figure()
ax = plt.axes(xlim=(-1,1),ylim=(-1,1))
ax.scatter(xv,yv,c=colour)
''' | [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
220,
198,
6738,
2603,
29487,
8019,
1330,
12067,
198,
198,
77,
87,
11,
299,
88,
796,
357,
12825,
11,
12825,
8,
198,
87,
796,
45941,
13,
2160... | 2.098077 | 520 |
'''initialize'''
from .moocdl import MOOCDL | [
7061,
6,
36733,
1096,
7061,
6,
198,
6738,
764,
5908,
420,
25404,
1330,
13070,
4503,
19260
] | 2.6875 | 16 |
from chainerchem.links import embed_atom_id # NOQA
from chainerchem.links import graph_linear # NOQA
from chainerchem.links.embed_atom_id import EmbedAtomID # NOQA
from chainerchem.links.graph_linear import GraphLinear # NOQA
| [
6738,
6333,
263,
15245,
13,
28751,
1330,
11525,
62,
37696,
62,
312,
220,
1303,
8005,
48,
32,
198,
6738,
6333,
263,
15245,
13,
28751,
1330,
4823,
62,
29127,
220,
1303,
8005,
48,
32,
198,
198,
6738,
6333,
263,
15245,
13,
28751,
13,
20... | 2.924051 | 79 |
from celery import Task
from kombu.serialization import (
dumps as kombu_dumps,
loads as kombu_loads,
)
from ichnaea.cache import redis_pipeline
from ichnaea.db import db_worker_session
| [
6738,
18725,
1924,
1330,
15941,
198,
6738,
479,
2381,
84,
13,
46911,
1634,
1330,
357,
198,
220,
220,
220,
45514,
355,
479,
2381,
84,
62,
67,
8142,
11,
198,
220,
220,
220,
15989,
355,
479,
2381,
84,
62,
46030,
11,
198,
8,
198,
198,... | 2.684932 | 73 |
from discord.ext import commands
from discord.ext.commands import Context
from diceBot import roller
class Utilities(commands.Cog):
"""
General Utilities
"""
@commands.command()
async def ping(self, ctx: Context):
"""
Status check
"""
import time
start_time = time.time()
message = await ctx.send('pong. `DWSP latency: ' + str(round(ctx.bot.latency * 1000)) + 'ms`')
end_time = time.time()
await message.edit(content='pong. `DWSP latency: ' + str(round(ctx.bot.latency * 1000)) + 'ms` ' +
'`Response time: ' + str(int((end_time - start_time) * 1000)) + 'ms`')
@commands.command()
async def source(self, ctx: Context):
"""
Print a link to the source code
"""
await ctx.send(content='Created by Philip Mottershead'
'https://github.com/PhilipMottershead/Dicebot')
@commands.command()
async def feedback(self, ctx: Context):
"""
Report feedback or issues with the bot
"""
await ctx.send('If the bot is broken or you have any feedback you\'d like to submit please create a issue on '
'GitHub: https://github.com/PhilipMottershead/Dicebot')
@commands.command()
async def r(self, ctx: Context):
"""
Report feedback or issues with the bot
"""
await ctx.send(roller.rollDices(ctx.message.content))
| [
6738,
36446,
13,
2302,
1330,
9729,
198,
6738,
36446,
13,
2302,
13,
9503,
1746,
1330,
30532,
198,
6738,
17963,
20630,
1330,
24471,
198,
198,
4871,
41086,
7,
9503,
1746,
13,
34,
519,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
... | 2.284404 | 654 |
import feedparser
import time
class RSSReader:
"""Class built upon feedparser to get new items from an rss feed"""
DATA_FILE = 'RSSData.txt'
DATA_FILE = 'RSSData.txt'
| [
11748,
3745,
48610,
198,
11748,
640,
198,
198,
4871,
25012,
33634,
25,
198,
220,
220,
220,
37227,
9487,
3170,
2402,
3745,
48610,
284,
651,
649,
3709,
422,
281,
374,
824,
3745,
37811,
198,
220,
220,
220,
42865,
62,
25664,
796,
705,
49,... | 2.859375 | 64 |
__author__ = 'Kalyan'
# this is a sample module for the understanding_modules assignment.
| [
834,
9800,
834,
796,
705,
42,
3400,
272,
6,
628,
198,
2,
428,
318,
257,
6291,
8265,
329,
262,
4547,
62,
18170,
16237,
13,
198
] | 3.68 | 25 |
from blendvis.primitives.primitives import FontPrimitive, LinePrimitive, CubePrimitive, \
CameraPrimitive, SpherePrimitive, CurvePrimitive, GreasePencilPrimitive, Primitive | [
6738,
13516,
4703,
13,
19795,
20288,
13,
19795,
20288,
1330,
24060,
23828,
1800,
11,
6910,
23828,
1800,
11,
23315,
23828,
1800,
11,
3467,
198,
220,
220,
220,
20432,
23828,
1800,
11,
31798,
23828,
1800,
11,
46300,
23828,
1800,
11,
11955,
... | 3.666667 | 48 |
import json
import requests
import pandas as pd
import websocket
# Get Alpaca API Credential
endpoint = "https://data.alpaca.markets/v2"
headers = json.loads(open("key.txt", 'r').read())
def hist_data(symbols, start="2021-01-01", timeframe="1Hour", limit=50, end=""):
"""
returns historical bar data for a string of symbols separated by comma
symbols should be in a string format separated by comma e.g. symbols = "MSFT,AMZN,GOOG"
"""
df_data_tickers = {}
for symbol in symbols:
bar_url = endpoint + "/stocks/{}/bars".format(symbol)
params = {"start":start, "limit" :limit, "timeframe":timeframe}
data = {"bars": [], "next_page_token":'', "symbol":symbol}
while True:
r = requests.get(bar_url, headers = headers, params = params)
r = r.json()
if r["next_page_token"] == None:
data["bars"]+=r["bars"]
break
else:
params["page_token"] = r["next_page_token"]
data["bars"]+=r["bars"]
data["next_page_token"] = r["next_page_token"]
df_data = pd.DataFrame(data["bars"])
df_data.rename({"t":"time","o":"open","h":"high","l":"low","c":"close","v":"volume"},axis=1, inplace=True)
df_data["time"] = pd.to_datetime(df_data["time"])
df_data.set_index("time",inplace=True)
df_data.index = df_data.index.tz_convert("America/Indiana/Petersburg")
df_data_tickers[symbol] = df_data
return df_data_tickers
def get_historical_data(ticker_list, start_date, end_date=None, limit=10000, timeframe="1Day"):
"""
returns historical bar data for a string of symbols separated by comma
symbols should be in a string format separated by comma e.g. symbols = "MSFT,AMZN,GOOG"
* timeframe - Timeframe for the aggregation. Available values are: `1Min`, `1Hour`, `1Day`
https://alpaca.markets/docs/api-documentation/api-v2/market-data/alpaca-data-api-v2/historical/#bars
"""
df_data_tickers = {}
for symbol in ticker_list:
bar_url = endpoint + "/stocks/{}/bars".format(symbol)
params = {"start":start_date, "end": end_date, "limit": limit, "timeframe":timeframe}
data = {"bars": [], "next_page_token": '', "symbol": symbol}
# r = requests.get(bar_url, headers=headers, params=params)
# r = r.json()
# data["bars"] += r["bars"]
while True:
r = requests.get(bar_url, headers=headers, params=params)
r = r.json()
try:
if r["next_page_token"] == None:
data["bars"] += r["bars"]
break
else:
params["page_token"] = r["next_page_token"]
data["bars"] += r["bars"]
data["next_page_token"] = r["next_page_token"]
except:
break
# Create a DataFrame for the data["bars"] of each stock
df_data = pd.DataFrame(data["bars"])
df_data.rename({"t":"time","o":"open","h":"high","l":"low","c":"close","v":"volume"},axis=1, inplace=True)
try:
df_data["time"] = pd.to_datetime(df_data["time"])
df_data.set_index("time",inplace=True)
df_data.index = df_data.index.tz_convert("America/New_York")
df_data_tickers[symbol] = df_data
except:
pass
print("---- Created for [{}]".format(symbol))
return df_data_tickers
| [
11748,
33918,
201,
198,
11748,
7007,
201,
198,
11748,
19798,
292,
355,
279,
67,
220,
201,
198,
11748,
2639,
5459,
201,
198,
201,
198,
201,
198,
2,
3497,
978,
79,
22260,
7824,
327,
445,
1843,
201,
198,
437,
4122,
796,
366,
5450,
1378... | 2.049555 | 1,796 |
import sys, os
from dotenv import dotenv_values
config = dotenv_values(".env")
cur_dir = os.path.dirname(__file__)
trex_path = f"{config['TREX_LOCATION']}/{config['TREX_VERSION']}"
interactive = os.path.abspath(f"{trex_path}/automation/trex_control_plane/interactive")
sys.path.insert(0, os.path.abspath(interactive))
STL_PROFILES_PATH = os.path.join(f"{trex_path}/stl")
EXT_LIBS_PATH = os.path.abspath(f"{trex_path}/external_libs")
assert os.path.isdir(STL_PROFILES_PATH), "Could not determine STL profiles path"
assert os.path.isdir(EXT_LIBS_PATH), "Could not determine external_libs path"
| [
11748,
25064,
11,
28686,
198,
6738,
16605,
24330,
1330,
16605,
24330,
62,
27160,
198,
198,
11250,
796,
16605,
24330,
62,
27160,
7,
1911,
24330,
4943,
198,
198,
22019,
62,
15908,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
... | 2.45679 | 243 |
#coding=utf-8
import sys
import os
from os.path import abspath, dirname
sys.path.append(abspath(dirname(__file__)))
import tkinter
import tkinter.filedialog
from tkinter import *
import Fun
ElementBGArray={}
ElementBGArray_Resize={}
ElementBGArray_IM={}
from PyPDF2 import PdfFileReader, PdfFileWriter
| [
2,
66,
7656,
28,
40477,
12,
23,
198,
11748,
25064,
198,
11748,
28686,
198,
6738,
220,
220,
28686,
13,
6978,
1330,
2352,
6978,
11,
26672,
3672,
198,
17597,
13,
6978,
13,
33295,
7,
397,
2777,
776,
7,
15908,
3672,
7,
834,
7753,
834,
... | 2.72807 | 114 |
from django.test import Client
from django.test import RequestFactory, TestCase
from django.contrib.auth import get_user_model
from cart import views
| [
6738,
42625,
14208,
13,
9288,
1330,
20985,
198,
6738,
42625,
14208,
13,
9288,
1330,
19390,
22810,
11,
6208,
20448,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
6738,
6383,
1330,
5009,
198,
220,... | 3.444444 | 45 |
import enum
import typing as ta
from omnibus import collections as col
from omnibus import dataclasses as dc
from .base import Expr
from .base import Identifier
from .base import Node
from .base import QualifiedNameNode
from .base import SetQuantifier
from .base import SortItem
| [
11748,
33829,
198,
11748,
19720,
355,
20486,
198,
198,
6738,
22284,
26333,
1330,
17268,
355,
951,
198,
6738,
22284,
26333,
1330,
4818,
330,
28958,
355,
30736,
198,
198,
6738,
764,
8692,
1330,
1475,
1050,
198,
6738,
764,
8692,
1330,
11440,... | 3.734177 | 79 |
# -*- coding: utf8 -*-
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
628,
628,
628,
628,
198
] | 1.823529 | 17 |
balance = 999999
annualInterestRate = 0.18
monthlyInterestRate = annualInterestRate/12.0
monthlyLower = balance/12
monthlyUpper = (balance * (1+monthlyInterestRate)**12)/12.0
while True:
updatedBalance = balance
for i in range(12):
payment = (monthlyUpper + monthlyLower)/2.0
monthlyUnpaidBalance = updatedBalance - payment
updatedBalance = monthlyUnpaidBalance + monthlyInterestRate * monthlyUnpaidBalance
if updatedBalance < -0.01:
monthlyUpper = payment
elif updatedBalance > 0.01:
monthlyLower = payment
else:
break
print("Lowest payment: {:0.2f}".format(payment)) | [
20427,
796,
36006,
17032,
198,
1236,
723,
19302,
32184,
796,
657,
13,
1507,
198,
198,
8424,
306,
19302,
32184,
796,
5079,
19302,
32184,
14,
1065,
13,
15,
198,
8424,
306,
31426,
796,
5236,
14,
1065,
198,
8424,
306,
52,
2848,
796,
357,
... | 2.690083 | 242 |
# -*- coding: utf-8 -*-
import time
from pytest import mark
@mark.parametrize('with_message', [True, False])
@mark.parametrize('hard_deployment', [True, False])
@mark.parametrize('final_release_state', [
'DEPLOYED', 'FAILED', 'UNKNOWN', 'TEMP_DEPLOYMENT_FAILURE'
])
@mark.parametrize('maintenance', [True, False])
@mark.parametrize('payload', [
None, {'stories': {'foo'}, 'services': ['bar', 'baz']}
])
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
640,
198,
198,
6738,
12972,
9288,
1330,
1317,
628,
198,
31,
4102,
13,
17143,
316,
380,
2736,
10786,
4480,
62,
20500,
3256,
685,
17821,
11,
10352,
12962,
198,
31,
... | 2.426901 | 171 |