code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import yfinance as yf
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from arch import arch_model
from volatility.utils import get_percent_chg
start = datetime(2000, 1, 1)
end = datetime(2020, 9, 11)
symbol = 'SPY'
tickerData = yf.Ticker(symbol)
df = tickerData.hist... | [
"datetime.datetime",
"pandas.Series",
"numpy.sqrt",
"arch.arch_model",
"volatility.utils.get_percent_chg",
"yfinance.Ticker",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((205, 225), 'datetime.datetime', 'datetime', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (213, 225), False, 'from datetime import datetime\n'), ((232, 253), 'datetime.datetime', 'datetime', (['(2020)', '(9)', '(11)'], {}), '(2020, 9, 11)\n', (240, 253), False, 'from datetime import datetime\n'), ((282, 299), 'y... |
# Copyright 2003, 2007 by <NAME>. <EMAIL>
# All rights reserved. This code is part of the Biopython
# distribution and governed by its license.
# Please see the LICENSE file that should have been included as part
# of this package.
import math
def lcc_mult(seq,wsize):
"""Local Composition Complexity (LCC) value... | [
"math.log"
] | [((750, 761), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (758, 761), False, 'import math\n'), ((4891, 4902), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (4899, 4902), False, 'import math\n')] |
import logging
import random
from collections import namedtuple
from typing import NamedTuple
from queue import PriorityQueue
from objects import BaseObject
from constants import NORTH, SOUTH, EAST, WEST
Space = namedtuple("Space", ["x", "y"])
# TODO: Big TODO - Re-implement space with z/t value for terrain???
# Sp... | [
"queue.PriorityQueue",
"collections.namedtuple",
"logging.info"
] | [((216, 247), 'collections.namedtuple', 'namedtuple', (['"""Space"""', "['x', 'y']"], {}), "('Space', ['x', 'y'])\n", (226, 247), False, 'from collections import namedtuple\n'), ((2226, 2241), 'queue.PriorityQueue', 'PriorityQueue', ([], {}), '()\n', (2239, 2241), False, 'from queue import PriorityQueue\n'), ((3040, 30... |
"""
Copyright European Organization for Nuclear Research (CERN)
Licensed under the Apache License, Version 2.0 (the "License");
You may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Authors:
- <NAME>, <<EMAIL>>, 2014-2... | [
"alembic.op.drop_index",
"alembic.op.create_index"
] | [((716, 813), 'alembic.op.create_index', 'create_index', (['"""REQUESTS_TYP_STA_UPD_IDX"""', '"""requests"""', "['request_type', 'state', 'updated_at']"], {}), "('REQUESTS_TYP_STA_UPD_IDX', 'requests', ['request_type',\n 'state', 'updated_at'])\n", (728, 813), False, 'from alembic.op import create_index, drop_index\... |
from pywizard.userSettings import settings
import scipy as sp
class PreEmphasizer(object):
@classmethod
def processBuffer(cls, buf):
preEnergy = buf.energy()
alpha = cls.alpha()
unmodifiedPreviousSample = buf.samples[0]
tempSample = None
first_sample = buf.samples[0]
... | [
"scipy.sqrt",
"scipy.insert"
] | [((408, 447), 'scipy.insert', 'sp.insert', (['buf.samples', '(0)', 'first_sample'], {}), '(buf.samples, 0, first_sample)\n', (417, 447), True, 'import scipy as sp\n'), ((670, 701), 'scipy.sqrt', 'sp.sqrt', (['(preEnergy / postEnergy)'], {}), '(preEnergy / postEnergy)\n', (677, 701), True, 'import scipy as sp\n')] |
import numpy as np
import random
import numexpr as ne
def gen_layer(rin, rout, nsize):
R = 1.0
phi = np.random.uniform(0, 2*np.pi, size=(nsize))
costheta = np.random.uniform(-1, 1, size=(nsize))
u = np.random.uniform(rin**3, rout**3, size=(nsize))
theta = np.arccos( costheta )... | [
"numpy.multiply",
"numpy.arccos",
"numexpr.evaluate",
"numpy.power",
"numpy.linalg.norm",
"numpy.max",
"numpy.square",
"numpy.array",
"numpy.zeros",
"numpy.isnan",
"numpy.true_divide",
"numpy.cos",
"numpy.random.uniform",
"numpy.sin",
"numpy.cbrt",
"numpy.shape",
"py3Dmol.view"
] | [((119, 162), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * np.pi)'], {'size': 'nsize'}), '(0, 2 * np.pi, size=nsize)\n', (136, 162), True, 'import numpy as np\n'), ((182, 218), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': 'nsize'}), '(-1, 1, size=nsize)\n', (199, 218), True, 'i... |
"""Strategies for selecting actions for value-based policies."""
from abc import ABC, abstractmethod
from typing import List, Optional
from numpy.typing import ArrayLike
import numpy as np
from rl.action_selectors import (
ActionSelector,
DeterministicActionSelector,
UniformDiscreteActionSelector,
Nois... | [
"numpy.sqrt",
"numpy.random.default_rng",
"rl.action_selectors.DeterministicActionSelector",
"numpy.argmax",
"numpy.sum",
"rl.action_selectors.NoisyActionSelector",
"numpy.maximum"
] | [((996, 1031), 'numpy.random.default_rng', 'np.random.default_rng', (['random_state'], {}), '(random_state)\n', (1017, 1031), True, 'import numpy as np\n'), ((1317, 1359), 'rl.action_selectors.DeterministicActionSelector', 'DeterministicActionSelector', (['greedy_action'], {}), '(greedy_action)\n', (1344, 1359), False,... |
from unittest import TestSuite, TextTestRunner
import hashlib
def run(test):
suite = TestSuite()
suite.addTest(test)
TextTestRunner().run(suite)
def hash256(s):
'''two rounds of sha256'''
return hashlib.sha256(hashlib.sha256(s).digest()).digest()
| [
"unittest.TestSuite",
"hashlib.sha256",
"unittest.TextTestRunner"
] | [((92, 103), 'unittest.TestSuite', 'TestSuite', ([], {}), '()\n', (101, 103), False, 'from unittest import TestSuite, TextTestRunner\n'), ((132, 148), 'unittest.TextTestRunner', 'TextTestRunner', ([], {}), '()\n', (146, 148), False, 'from unittest import TestSuite, TextTestRunner\n'), ((235, 252), 'hashlib.sha256', 'ha... |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Program to denoise a short speech sample using a pre-trained autoencoder.
PATH_TO_TRAINED_MODEL : path to the pre-trained model (.h5)
PATH_TO_AUDIO : path to the noisy audio file (.wav)
PATH_TO_SAVE : path to save the denoised audio output (.wav)
@author: nk
"""
#%% ... | [
"numpy.reshape",
"librosa.griffinlim",
"soundfile.write",
"numpy.random.randint",
"tensorflow.keras.models.load_model",
"librosa.stft",
"librosa.load"
] | [((3027, 3084), 'soundfile.write', 'soundfile.write', (['PATH_TO_SAVE', 'denoised', 'dnae.SAMPLE_RATE'], {}), '(PATH_TO_SAVE, denoised, dnae.SAMPLE_RATE)\n', (3042, 3084), False, 'import soundfile\n'), ((1162, 1210), 'librosa.load', 'librosa.load', (['path_to_audio'], {'sr': 'self.SAMPLE_RATE'}), '(path_to_audio, sr=se... |
#!/usr/bin/python
import spidev
class mcp2515:
SPI_RESET = 0xC0
SPI_READ = 0x03
SPI_READ_RX = 0x90
SPI_WRITE = 0x02
SPI_WRITE_TX = 0x40
SPI_RTS = 0x80
SPI_READ_STATUS = 0xA0
SPI_RX_STATUS = 0xB0
SPI_BIT_MODIFY = 0x05
#/* Configuration Registers */
CANSTAT = 0x0E
CANCTRL = 0x0F
BFPCTRL ... | [
"spidev.SpiDev"
] | [((9431, 9446), 'spidev.SpiDev', 'spidev.SpiDev', ([], {}), '()\n', (9444, 9446), False, 'import spidev\n')] |
import sqlalchemy
from .base import Base
from .track import Track
import model.tracktime
class GroupTrack(Base):
'''
a link between group and track (association pattern)
backrefs group and track (not listed here)
To use this first create the group, then group tracks,
then tracks and add them to ... | [
"sqlalchemy.ForeignKey",
"sqlalchemy.Column"
] | [((445, 500), 'sqlalchemy.Column', 'sqlalchemy.Column', (['sqlalchemy.Integer'], {'primary_key': '(True)'}), '(sqlalchemy.Integer, primary_key=True)\n', (462, 500), False, 'import sqlalchemy\n'), ((734, 771), 'sqlalchemy.Column', 'sqlalchemy.Column', (['sqlalchemy.Integer'], {}), '(sqlalchemy.Integer)\n', (751, 771), F... |
import unittest
from reflexy.base import reflex
class TestReflexModule(unittest.TestCase):
sof = 'datasetname|file1.fits;PRO_CATG1;PURPOSE1:PURPOSE2,file2;' \
'PRO_CAT2;PURPOSE1'
sopexp = [('long_param1', '3'), ('param2', '3'), ('param3', 'ser'),
('param_not_shown', 'none')]
sop = ... | [
"unittest.main",
"reflexy.base.reflex.parseSop",
"reflexy.base.reflex.parseSofJson",
"reflexy.base.reflex.parseSof"
] | [((1568, 1583), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1581, 1583), False, 'import unittest\n'), ((482, 507), 'reflexy.base.reflex.parseSof', 'reflex.parseSof', (['self.sof'], {}), '(self.sof)\n', (497, 507), False, 'from reflexy.base import reflex\n'), ((1108, 1133), 'reflexy.base.reflex.parseSof', 'refl... |
import sys
import argparse
import os
import re
import yaml
from . import workflow
class Runner(object):
tasks = [
]
out_and_cache_subfolder_with_sumatra_label = True
def run(self):
parser = argparse.ArgumentParser(description='Run workflow')
parser.add_argument('config_path', type... | [
"os.path.join",
"re.match",
"yaml.load",
"argparse.ArgumentParser"
] | [((221, 272), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run workflow"""'}), "(description='Run workflow')\n", (244, 272), False, 'import argparse\n'), ((1099, 1131), 're.match', 're.match', (['"""^(\\\\d*)$"""', 'args.range'], {}), "('^(\\\\d*)$', args.range)\n", (1107, 1131), False... |
# Copyright (C) 2016 <NAME>.
# This file is part of CodexGigas - https://github.com/codexgigassys/
# See the file 'LICENSE' for copying permission.
import pathmagic
from pymongo import MongoClient
import ssdeep
from env import envget
def searchFuzzy(fuzz, limit, thresh):
client = MongoClient(envget('metadata.host... | [
"env.envget",
"ssdeep.compare"
] | [((299, 322), 'env.envget', 'envget', (['"""metadata.host"""'], {}), "('metadata.host')\n", (305, 322), False, 'from env import envget\n'), ((324, 347), 'env.envget', 'envget', (['"""metadata.port"""'], {}), "('metadata.port')\n", (330, 347), False, 'from env import envget\n'), ((365, 391), 'env.envget', 'envget', (['"... |
#!/usr/bin/env python3
"""
Summary:
buildrpm (python3): branchdiff binary operating system package (.rpm, Redhat, Redhat-based systems)
- Automatic determination of version to be built
- Build version can optionally be forced to a specific version
- Resulting rpm ackage produced in packagi... | [
"subprocess.getoutput",
"tarfile.open",
"pyaws.utils.export_json_object",
"loggers.getLogger",
"pyaws.utils.stdout_message",
"sys.exit",
"os.walk",
"os.remove",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"shutil.copy2",
"json.dumps",
"os.path.split",
"subprocess.call",
... | [((1443, 1469), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (1459, 1469), False, 'import os\n'), ((1614, 1631), 'docker.from_env', 'docker.from_env', ([], {}), '()\n', (1629, 1631), False, 'import docker\n'), ((2223, 2247), 'loggers.getLogger', 'loggers.getLogger', (['"""1.0"""'], {}), "... |
import torch
import torch.nn.functional as F
from agent.td3 import TD3
class TD3MT(TD3):
def __init__(self,
state_dim,
action_dim,
max_action,
num_env,
discount=0.99,
tau=0.005,
policy_noise=0.2... | [
"torch.nn.functional.mse_loss",
"torch.rand_like",
"torch.load",
"torch.min",
"torch.no_grad",
"torch.zeros"
] | [((1351, 1402), 'torch.load', 'torch.load', (["(filename + '_actor_optimizer_online.pt')"], {}), "(filename + '_actor_optimizer_online.pt')\n", (1361, 1402), False, 'import torch\n'), ((1457, 1509), 'torch.load', 'torch.load', (["(filename + '_critic_optimizer_online.pt')"], {}), "(filename + '_critic_optimizer_online.... |
# Standard libraries
import os
import json
import logging
from typing import Text
# Azure functions
import azure.functions as func
# Inference runtime
import onnxruntime as ort
from tokenizers import BertWordPieceTokenizer
# Helper scripts
from .PreprocessData import normalize_text, truncate_text
from .Predict impor... | [
"json.loads",
"tokenizers.BertWordPieceTokenizer",
"azure.functions.HttpResponse",
"json.dumps",
"onnxruntime.InferenceSession",
"os.path.dirname",
"logging.info"
] | [((599, 638), 'tokenizers.BertWordPieceTokenizer', 'BertWordPieceTokenizer', (['vocab_file_path'], {}), '(vocab_file_path)\n', (621, 638), False, 'from tokenizers import BertWordPieceTokenizer\n'), ((764, 800), 'onnxruntime.InferenceSession', 'ort.InferenceSession', (['onnx_file_path'], {}), '(onnx_file_path)\n', (784,... |
import tensorflow as tf
def get_record_parser_qqp(config, is_test=False):
def parse(example):
ques_limit = config.test_ques_limit if is_test else config.ques_limit
features = tf.parse_single_example(example,
features={
... | [
"tensorflow.decode_raw",
"tensorflow.FixedLenFeature"
] | [((733, 780), 'tensorflow.decode_raw', 'tf.decode_raw', (["features['ques1_idxs']", 'tf.int32'], {}), "(features['ques1_idxs'], tf.int32)\n", (746, 780), True, 'import tensorflow as tf\n'), ((849, 896), 'tensorflow.decode_raw', 'tf.decode_raw', (["features['ques2_idxs']", 'tf.int32'], {}), "(features['ques2_idxs'], tf.... |
import numpy
import requests
import Quandl
import datetime
from pyrnkr.application import App
from pyrnkr.widgets import Line
from pyrnkr.formula import Trace
def extract_date_index(ts, format='%Y-%m-%d'):
return [x.strftime(format) for x in ts.index.tolist()]
class oil(App):
# This must be consistent with c... | [
"pyrnkr.widgets.Line",
"Quandl.get",
"pyrnkr.formula.Trace"
] | [((1239, 1279), 'Quandl.get', 'Quandl.get', (['symbol'], {'authtoken': 'self.TOKEN'}), '(symbol, authtoken=self.TOKEN)\n', (1249, 1279), False, 'import Quandl\n'), ((1751, 1794), 'pyrnkr.formula.Trace', 'Trace', ([], {'x': 'x', 'y': 'datay', 'extra': "{'name': symbol}"}), "(x=x, y=datay, extra={'name': symbol})\n", (17... |
from django.contrib.auth.password_validation import validate_password
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.utils.encoding import force_str
from django.utils.http import urlsafe_base64_decode
from rest_framework.exceptions import AuthenticationFailed
from rest_framework.serializ... | [
"rest_framework.serializers.EmailField",
"accounts.models.Donner.objects.create",
"accounts.models.NGO.objects.create",
"accounts.models.CustomUser.objects.get",
"accounts.models.Donner.objects.exclude",
"rest_framework.serializers.ValidationError",
"rest_framework.exceptions.AuthenticationFailed",
"d... | [((1638, 1711), 'rest_framework.serializers.CharField', 'CharField', ([], {'write_only': '(True)', 'required': '(True)', 'validators': '[validate_password]'}), '(write_only=True, required=True, validators=[validate_password])\n', (1647, 1711), False, 'from rest_framework.serializers import CharField, EmailField, ModelS... |
#!/usr/bin/env python
import argparse
import os
from PIL import Image
densities = {
'mdpi': 48,
'hdpi': 72,
'xhdpi': 96,
'xxhdpi': 144,
'xxxhdpi': 192
}
class PathAction(argparse.Action):
def __call__(self, parser, namespace, value, options_string=None):
if not os.path.exists(value):
raise argparse.Argume... | [
"os.path.exists",
"argparse.FileType",
"PIL.Image.open",
"os.makedirs",
"argparse.ArgumentParser",
"os.path.isdir"
] | [((622, 645), 'PIL.Image.open', 'Image.open', (['args.source'], {}), '(args.source)\n', (632, 645), False, 'from PIL import Image\n'), ((1007, 1032), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1030, 1032), False, 'import argparse\n'), ((1315, 1335), 'os.path.exists', 'os.path.exists', (['p... |
import re
try:
long
except NameError:
long = int
# list of prod source label for pilot tests
list_ptest_prod_sources = ['ptest', 'rc_test', 'rc_test2', 'rc_alrb']
# mapping with prodsourcelabels that belong to analysis and production
analy_sources = ['user', 'panda']
prod_sources = ['managed', 'prod_test']
n... | [
"re.search"
] | [((2824, 2860), 're.search', 're.search', (['"""^nStandby=(.+)"""', 'tmpItem'], {}), "('^nStandby=(.+)', tmpItem)\n", (2833, 2860), False, 'import re\n'), ((1758, 1799), 're.search', 're.search', (['"""coreCount=(\\\\d+)"""', 'jobMetrics'], {}), "('coreCount=(\\\\d+)', jobMetrics)\n", (1767, 1799), False, 'import re\n'... |
try:
from IPython import get_ipython
if get_ipython().__class__.__name__ not in ['NoneType']:
from IPython import display
i_am_in_interatcive = True
import pylab as pl
pl.rcParams['figure.figsize'] = [13, 13]
# print("INTERACTIVE")
else:
import matplotlib.pyp... | [
"IPython.get_ipython"
] | [((49, 62), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (60, 62), False, 'from IPython import get_ipython\n')] |
#! /usr/bin/python3
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
import copy
import os
import subprocess
import commonl
import ttbl
import ttbl.images
import ttbl.power
class pgm_c(ttbl.images.flash_shell_cmd_c):
"""Flash using Intel's Quartus PGM tool
This allows to fl... | [
"ttbl.usb_serial_to_path",
"subprocess.check_output",
"ttbl.power.daemon_c.on",
"commonl.assert_dict_of_ints",
"ttbl.power.daemon_c.__init__",
"ttbl.images.flash_shell_cmd_c.__init__",
"os.path.join",
"commonl.assert_none_or_dict_of_strings",
"ttbl.images.flash_shell_cmd_c.flash_start",
"commonl.p... | [((9249, 9300), 'commonl.assert_dict_of_ints', 'commonl.assert_dict_of_ints', (['image_map', '"""image_map"""'], {}), "(image_map, 'image_map')\n", (9276, 9300), False, 'import commonl\n'), ((9309, 9373), 'commonl.assert_none_or_dict_of_strings', 'commonl.assert_none_or_dict_of_strings', (['jtagconfig', '"""jtagconfig"... |
import pandas as pd
from config import WEBSCRAPE_DATA_PATH, OUTPUT_DATA_PATH
import os
def get_understat_filepaths(file_path):
filepaths = []
team = []
for root, dirs, files in os.walk(file_path):
for filename in files:
if ('understat' in filename) and ('team' not in filename) and ('pl... | [
"os.listdir",
"pandas.read_csv",
"pandas.to_datetime",
"os.path.join",
"os.path.normpath",
"pandas.DataFrame",
"os.walk"
] | [((191, 209), 'os.walk', 'os.walk', (['file_path'], {}), '(file_path)\n', (198, 209), False, 'import os\n'), ((497, 546), 'pandas.DataFrame', 'pd.DataFrame', (["{'Filepath': filepaths}"], {'index': 'team'}), "({'Filepath': filepaths}, index=team)\n", (509, 546), True, 'import pandas as pd\n'), ((701, 721), 'os.listdir'... |
import numpy as np
import pytest
from respy import RespyCls
from respy.python.shared.shared_constants import IS_PARALLELISM_MPI
from respy.python.shared.shared_constants import IS_PARALLELISM_OMP
from respy.tests.codes.auxiliary import compare_est_log
from respy.tests.codes.auxiliary import simulate_observed
from resp... | [
"numpy.testing.assert_equal",
"respy.tests.codes.random_model.generate_random_model",
"respy.tests.codes.auxiliary.compare_est_log",
"numpy.random.randint",
"respy.RespyCls",
"pytest.mark.skipif",
"respy.tests.codes.auxiliary.simulate_observed"
] | [((379, 487), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not IS_PARALLELISM_MPI and not IS_PARALLELISM_OMP)'], {'reason': '"""No PARALLELISM available"""'}), "(not IS_PARALLELISM_MPI and not IS_PARALLELISM_OMP,\n reason='No PARALLELISM available')\n", (397, 487), False, 'import pytest\n'), ((940, 982), 'respy.t... |
# -*- coding: utf-8 -*-
"""
"""
from __future__ import unicode_literals
from __future__ import print_function
import logging
from json import dumps
from flask import request
logger = logging.getLogger('plutil.http.s')
def define_common_routes(plugin, app, server):
""" Some routes are always defined. """
@... | [
"logging.getLogger",
"flask.request.environ.get",
"json.dumps"
] | [((186, 220), 'logging.getLogger', 'logging.getLogger', (['"""plutil.http.s"""'], {}), "('plutil.http.s')\n", (203, 220), False, 'import logging\n'), ((583, 626), 'json.dumps', 'dumps', (["{'status': 'OK', 'result': the_args}"], {}), "({'status': 'OK', 'result': the_args})\n", (588, 626), False, 'from json import dumps... |
'''Train Siamese NIMA model networks.'''
from model.siamese_nima import SiameseNIMA
if __name__ == '__main__':
# dirs and paths to load data
train_image_dir = './assets/demo/train_images'
train_data_path = './assets/demo/train_data.csv'
# load data and train model
siamese = SiameseNIMA(output_di... | [
"model.siamese_nima.SiameseNIMA"
] | [((299, 333), 'model.siamese_nima.SiameseNIMA', 'SiameseNIMA', ([], {'output_dir': '"""./assets"""'}), "(output_dir='./assets')\n", (310, 333), False, 'from model.siamese_nima import SiameseNIMA\n')] |
# -*- coding: utf-8 -*-
import datetime
from django.db.models import Count, Q
from django.utils import timezone
from trojsten.events.models import EventParticipant
from trojsten.people.constants import SCHOOL_YEAR_END_MONTH
from trojsten.results.constants import COEFFICIENT_COLUMN_KEY
from trojsten.results.generator... | [
"datetime.datetime",
"trojsten.submit.models.Submit.objects.filter",
"trojsten.results.representation.ResultsTag",
"django.db.models.Count",
"trojsten.results.representation.ResultsCol",
"trojsten.events.models.EventParticipant.objects.filter",
"django.db.models.Q"
] | [((6526, 6563), 'trojsten.results.representation.ResultsTag', 'ResultsTag', ([], {'key': 'KMS_ALFA', 'name': '"""Alfa"""'}), "(key=KMS_ALFA, name='Alfa')\n", (6536, 6563), False, 'from trojsten.results.representation import ResultsCell, ResultsCol, ResultsTag\n'), ((6583, 6620), 'trojsten.results.representation.Results... |
import obj as obj_lib
import road_artifact
import drive as drive_lib
import utilities as u
class Sensor(obj_lib.Obj):
"""
parent object class for car sensors
returns instruction
driving instruction - (heading, speed)
no driving instruction (no new process or process has completed) - None
... | [
"utilities.heading",
"drive.DriveArcChangeLane"
] | [((6048, 6086), 'utilities.heading', 'u.heading', (['car.center', 'artifact.center'], {}), '(car.center, artifact.center)\n', (6057, 6086), True, 'import utilities as u\n'), ((12729, 12828), 'drive.DriveArcChangeLane', 'drive_lib.DriveArcChangeLane', (['self.pygame', 'self.screen', 'car', 'road', 'lane_id_current', 'la... |
#!/usr/bin/env python3
import os
import sys
if os.getuid() != 0:
print ("Must be run as root, sorry.")
sys.exit(-1)
from solcx import install_solc_pragma
install_solc_pragma('>0.5.0 <0.6.0')
print ("Done.")
| [
"os.getuid",
"solcx.install_solc_pragma",
"sys.exit"
] | [((158, 194), 'solcx.install_solc_pragma', 'install_solc_pragma', (['""">0.5.0 <0.6.0"""'], {}), "('>0.5.0 <0.6.0')\n", (177, 194), False, 'from solcx import install_solc_pragma\n'), ((48, 59), 'os.getuid', 'os.getuid', ([], {}), '()\n', (57, 59), False, 'import os\n'), ((106, 118), 'sys.exit', 'sys.exit', (['(-1)'], {... |
"""
An implementation of a greedy transition-based dependency parser (unlabeled parsing only).
Released under BSD license.
Code is an adapted version of <NAME>'s parser:
https://explosion.ai/blog/parsing-english-in-python
-- change: move core logic to separate myparserutils file
modified by bplank, 03/2017
"""
###... | [
"collections.defaultdict"
] | [((1973, 1989), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1984, 1989), False, 'from collections import defaultdict\n'), ((2170, 2186), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2181, 2186), False, 'from collections import defaultdict\n')] |
# -*- coding: utf-8 -*-
import json
import threading
import time
from abc import abstractmethod
from typing import Optional
from dmtp.mtp import tlv
from dmtp import mtp
import dmtp
import stun
from .manager import ContactManager, FieldValueEncoder, Session
def time_string(timestamp: int) -> str:
time_array = ... | [
"dmtp.HelloCommand.new",
"time.strftime",
"json.dumps",
"time.sleep",
"dmtp.CallCommand.new",
"time.localtime",
"time.time"
] | [((320, 345), 'time.localtime', 'time.localtime', (['timestamp'], {}), '(timestamp)\n', (334, 345), False, 'import time\n'), ((357, 403), 'time.strftime', 'time.strftime', (['"""%y-%m-%d %H:%M:%S"""', 'time_array'], {}), "('%y-%m-%d %H:%M:%S', time_array)\n", (370, 403), False, 'import time\n'), ((2349, 2398), 'dmtp.He... |
from rest_framework import serializers
from projects.models import (
Project,
ProjectVolunteers,
ProjectVolunteersRegistration,
ProjectAttendees,
ProjectAttendeesRegistration,
ProjectDiscussion,
ProjectAnswerDiscussion,
ProjectHub,
)
class ProjectVolunteersRegistrationSerializer(seria... | [
"projects.models.ProjectDiscussion.objects.create",
"projects.models.ProjectVolunteersRegistration.objects.filter",
"projects.models.ProjectAnswerDiscussion.objects.create",
"projects.models.ProjectAttendees.objects.create",
"projects.models.ProjectAttendeesRegistration.objects.create",
"projects.models.P... | [((570, 644), 'projects.models.ProjectVolunteers.objects.get', 'ProjectVolunteers.objects.get', ([], {'pk': "validated_data['project_volunteers_ref']"}), "(pk=validated_data['project_volunteers_ref'])\n", (599, 644), False, 'from projects.models import Project, ProjectVolunteers, ProjectVolunteersRegistration, ProjectA... |
"""
@Author: <EMAIL>
@Created: 2021/3/10
@Application: 作用在mongodb去重
"""
import pymongo
from NewsCrawler.settings import MONGO_URL
client = pymongo.MongoClient(MONGO_URL, maxPoolSize=1024)
def find_duplicate(collection):
collection.aggregate([
{'$group': {
'_id': {'title': "$title", 'publishe... | [
"pymongo.MongoClient"
] | [((141, 189), 'pymongo.MongoClient', 'pymongo.MongoClient', (['MONGO_URL'], {'maxPoolSize': '(1024)'}), '(MONGO_URL, maxPoolSize=1024)\n', (160, 189), False, 'import pymongo\n')] |
import codecs
__memory_storage = {}
def file_read(path):
with codecs.open(path, 'r', 'utf-8') as f:
return f.read()
def file_write(path, text):
with codecs.open(path, 'w', 'utf-8') as f:
return f.write(text)
def memory_write(key, data):
__memory_storage[key] = data
STORAGES = {
'fil... | [
"codecs.open"
] | [((68, 99), 'codecs.open', 'codecs.open', (['path', '"""r"""', '"""utf-8"""'], {}), "(path, 'r', 'utf-8')\n", (79, 99), False, 'import codecs\n'), ((168, 199), 'codecs.open', 'codecs.open', (['path', '"""w"""', '"""utf-8"""'], {}), "(path, 'w', 'utf-8')\n", (179, 199), False, 'import codecs\n')] |
import torch
import torchaudio
import numpy as np
from torch.nn.functional import normalize
from audio_utils.common.utilities import _check_transform_input
class BaseAudioParser(object):
def __init__(self, mode="after_batch"):
super().__init__()
assert mode in ['after_batch', "per_instance"]
... | [
"torch.log",
"torchaudio.transforms.MelScale",
"torch.nn.functional.normalize",
"torchaudio.functional.spectrogram",
"audio_utils.common.utilities._check_transform_input",
"torch.clamp"
] | [((489, 557), 'audio_utils.common.utilities._check_transform_input', '_check_transform_input', (['audio_sample'], {'desired_dims': 'self.desired_dims'}), '(audio_sample, desired_dims=self.desired_dims)\n', (511, 557), False, 'from audio_utils.common.utilities import _check_transform_input\n'), ((1799, 2033), 'torchaudi... |
import itertools
from typing import Any, Callable, Sequence, Tuple
import dill as pickle
import jax.numpy as np
import numpy as onp
import pandas as pd
from jax import grad, jit, ops, random
from jax.experimental.optimizers import Optimizer, adam
from pzflow import distributions
from pzflow.bijectors import Bijector_... | [
"jax.random.split",
"pzflow.utils.build_bijector_from_info",
"dill.load",
"jax.numpy.repeat",
"jax.random.PRNGKey",
"jax.numpy.hstack",
"jax.numpy.delete",
"jax.experimental.optimizers.adam",
"pandas.DataFrame",
"jax.numpy.nan_to_num",
"jax.numpy.where",
"dill.dump",
"numpy.isnan",
"jax.nu... | [((12923, 12959), 'jax.numpy.nan_to_num', 'np.nan_to_num', (['log_prob'], {'nan': 'np.NINF'}), '(log_prob, nan=np.NINF)\n', (12936, 12959), True, 'import jax.numpy as np\n'), ((34142, 34162), 'jax.random.PRNGKey', 'random.PRNGKey', (['seed'], {}), '(seed)\n', (34156, 34162), False, 'from jax import grad, jit, ops, rand... |
import yaml
import pprint
def read_yaml():
""" A function to read YAML file"""
with open('configs.yml') as f:
config = list(yaml.safe_load_all(f))
return config
def write_yaml(data):
""" A function to write YAML file"""
with open('toyaml.yml', 'a') as f:
yaml.dump_all(data, f, d... | [
"pprint.pprint",
"yaml.safe_load_all",
"yaml.dump_all"
] | [((464, 488), 'pprint.pprint', 'pprint.pprint', (['my_config'], {}), '(my_config)\n', (477, 488), False, 'import pprint\n'), ((296, 344), 'yaml.dump_all', 'yaml.dump_all', (['data', 'f'], {'default_flow_style': '(False)'}), '(data, f, default_flow_style=False)\n', (309, 344), False, 'import yaml\n'), ((142, 163), 'yaml... |
"""pypyr step saves the current utc datetime to context."""
from datetime import datetime, timezone
import logging
# logger means the log level will be set correctly
logger = logging.getLogger(__name__)
def run_step(context):
"""Save current utc datetime to context.
Args:
context: pypyr.context.Cont... | [
"logging.getLogger",
"datetime.datetime.now"
] | [((176, 203), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (193, 203), False, 'import logging\n'), ((1210, 1236), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (1222, 1236), False, 'from datetime import datetime, timezone\n'), ((1319, 1345), 'datet... |
import os
import sys
from time import sleep as sleep
import glob
import cv2
from PIL import Image
ESC = b'\033'
CSI = ESC + b'['
Фuse_ansi_escape_sequences = True
if not use_ansi_escape_sequences:
import ctypes
from ctypes import c_long
console_handle = ctypes.windll.kernel32.GetStdHandle(c_long(-11))
... | [
"ctypes.c_long",
"PIL.Image.fromarray",
"time.sleep",
"ctypes.windll.kernel32.SetConsoleCursorPosition",
"cv2.VideoCapture",
"os.system",
"sys.stdout.fileno",
"glob.glob"
] | [((484, 500), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (493, 500), False, 'import os\n'), ((562, 580), 'glob.glob', 'glob.glob', (['"""*.mp4"""'], {}), "('*.mp4')\n", (571, 580), False, 'import glob\n'), ((923, 955), 'cv2.VideoCapture', 'cv2.VideoCapture', (['selected_video'], {}), '(selected_video)\... |
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\careers\acting\performance_object_data.py
# Compiled at: 2018-09-18 00:30:33
# Size of source mod 2*... | [
"services.active_sim_info"
] | [((854, 880), 'services.active_sim_info', 'services.active_sim_info', ([], {}), '()\n', (878, 880), False, 'import services\n')] |
import numpy as np
import torch
import os
import sys
import re
import math
from torch.utils.data import Dataset, DataLoader
from apex import amp
from apex.parallel import DistributedDataParallel as DDP
from lamb import Lamb
#tensorboard for accuracy graphs
import tensorflow as tf
def getCombinations(inputTensor, N, c... | [
"apex.amp.scale_loss",
"re.escape",
"torch.nn.CrossEntropyLoss",
"torch.distributed.destroy_process_group",
"math.sqrt",
"torch.utils.data.distributed.DistributedSampler",
"apex.amp.initialize",
"os.listdir",
"torch.nn.ModuleList",
"torch.eye",
"tensorflow.Session",
"tensorflow.placeholder",
... | [((707, 764), 'os.path.join', 'os.path.join', (['sys.argv[1]', "(dataset_name + '_preprocessed')"], {}), "(sys.argv[1], dataset_name + '_preprocessed')\n", (719, 764), False, 'import os\n'), ((524, 556), 'torch.cat', 'torch.cat', (['(tensorB, tensorA)', '(3)'], {}), '((tensorB, tensorA), 3)\n', (533, 556), False, 'impo... |
# Copyright 2022 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import functools
import os
from typing import Any, Callable, List, Tuple, Union
from assembly.common import FileEntry
__all__ = [
"create_fast_copy_mock... | [
"assembly.common.FileEntry",
"functools.partial"
] | [((675, 713), 'assembly.common.FileEntry', 'FileEntry', ([], {'source': 'src', 'destination': 'dst'}), '(source=src, destination=dst)\n', (684, 713), False, 'from assembly.common import FileEntry\n'), ((975, 1031), 'functools.partial', 'functools.partial', (['fast_copy_mock'], {'tracked_copies': 'copies'}), '(fast_copy... |
#!/usr/bin/env python3
import os
import sys
import subprocess
import traceback
from datetime import datetime
try:
sys.path.append(snakemake.config['args']['mcc_path'])
import scripts.mccutils as mccutils
import config.preprocessing.trimgalore as trimgalore
from Bio import SeqIO
except Exception as e:
... | [
"traceback.format_exc",
"os.listdir",
"scripts.mccutils.remove",
"scripts.mccutils.check_file_exists",
"os.chdir",
"scripts.mccutils.run_command",
"scripts.mccutils.mkdir",
"Bio.SeqIO.parse",
"sys.exit",
"scripts.mccutils.log",
"sys.path.append",
"scripts.mccutils.run_command_stdout"
] | [((119, 172), 'sys.path.append', 'sys.path.append', (["snakemake.config['args']['mcc_path']"], {}), "(snakemake.config['args']['mcc_path'])\n", (134, 172), False, 'import sys\n'), ((1060, 1119), 'scripts.mccutils.log', 'mccutils.log', (['"""processing"""', '"""prepping reads for McClintock"""'], {}), "('processing', 'p... |
from django import forms
class EmptyForm(forms.Form):
pass
class LoginForm(forms.Form):
username = forms.CharField(
max_length=50,
label='Username'
)
password = forms.CharField(
max_length=32,
label='Password',
widget=forms.PasswordInput(),
required=True... | [
"django.forms.HiddenInput",
"django.forms.BooleanField",
"django.forms.CharField",
"django.forms.PasswordInput",
"django.forms.RegexField",
"django.forms.ChoiceField",
"django.forms.IntegerField",
"django.forms.Textarea",
"django.forms.TextInput",
"django.forms.MultipleChoiceField",
"django.form... | [((109, 157), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(50)', 'label': '"""Username"""'}), "(max_length=50, label='Username')\n", (124, 157), False, 'from django import forms\n'), ((623, 667), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(50)', 'label': '"""Name"""'}), "(ma... |
"""
Author(s):
<NAME> (<EMAIL>)
Date: 02/21/2020
Description:
This action will return several lines, with each line being a JSON
representation of the user
"""
from azure_utility_tool.utils import paginate
from azure_utility_tool.graph_endpoints import USER_GET_ENDPOINT
from azure_utility_tool.test_cases im... | [
"azure_utility_tool.test_cases.TestCases"
] | [((758, 769), 'azure_utility_tool.test_cases.TestCases', 'TestCases', ([], {}), '()\n', (767, 769), False, 'from azure_utility_tool.test_cases import TestCases\n')] |
import torch
import torch.nn as nn
from oncopolicy.models.factory import RegisterModel
import pdb
class AbstractDeterministicGuideline(nn.Module):
def __init__(self, args):
super(AbstractDeterministicGuideline, self).__init__()
self.args = args
self.max_steps = args.max_steps
def get_... | [
"torch.nn.MaxPool1d",
"oncopolicy.models.factory.RegisterModel",
"torch.randn_like",
"torch.arange",
"torch.zeros",
"torch.gather",
"torch.cat"
] | [((467, 502), 'oncopolicy.models.factory.RegisterModel', 'RegisterModel', (['"""last_observed_risk"""'], {}), "('last_observed_risk')\n", (480, 502), False, 'from oncopolicy.models.factory import RegisterModel\n'), ((1994, 2022), 'oncopolicy.models.factory.RegisterModel', 'RegisterModel', (['"""static_risk"""'], {}), "... |
"""
Base settings to build other settings files upon.
"""
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (histonets/config/settings/base.py - 3 = histonets/)
APPS_DIR = ROOT_DIR.path('histonets')
env = environ.Env()
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FIL... | [
"environ.Path",
"environ.Env"
] | [((214, 227), 'environ.Env', 'environ.Env', ([], {}), '()\n', (225, 227), False, 'import environ\n'), ((86, 108), 'environ.Path', 'environ.Path', (['__file__'], {}), '(__file__)\n', (98, 108), False, 'import environ\n')] |
#
# voice-skill-sdk
#
# (C) 2020, Deutsche Telekom AG
#
# Deutsche Telekom AG and all other contributors /
# copyright owners license this file to you under the MIT
# License (the "License"); you may not use this file
# except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensou... | [
"bottle.redirect",
"bottle.static_file",
"bottle.get",
"pathlib.Path"
] | [((835, 843), 'bottle.get', 'get', (['"""/"""'], {}), "('/')\n", (838, 843), False, 'from bottle import get, static_file, redirect\n'), ((895, 914), 'bottle.get', 'get', (['"""/swagger-ui/"""'], {}), "('/swagger-ui/')\n", (898, 914), False, 'from bottle import get, static_file, redirect\n'), ((916, 950), 'bottle.get', ... |
# Generated by Django 2.1.7 on 2019-05-24 19:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("courses", "0008_enrollment_company")]
operations = [
migrations.AddField(
model_name="courserunenrollment",
name="active",
... | [
"django.db.models.BooleanField"
] | [((332, 452), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)', 'help_text': '"""Indicates whether or not this enrollment should be considered active"""'}), "(default=True, help_text=\n 'Indicates whether or not this enrollment should be considered active')\n", (351, 452), False, 'fr... |
from flask import Blueprint, current_app, jsonify
from seventweets.exceptions import error_handler
from seventweets import tweet
base = Blueprint('base', __name__)
@base.route('/')
@error_handler
def index():
original = tweet.count('original')
retweets = tweet.count('retweet')
return jsonify({
'n... | [
"flask.jsonify",
"flask.Blueprint",
"seventweets.tweet.count"
] | [((137, 164), 'flask.Blueprint', 'Blueprint', (['"""base"""', '__name__'], {}), "('base', __name__)\n", (146, 164), False, 'from flask import Blueprint, current_app, jsonify\n'), ((227, 250), 'seventweets.tweet.count', 'tweet.count', (['"""original"""'], {}), "('original')\n", (238, 250), False, 'from seventweets impor... |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime
# import rasahub_google_calendar
from time import gmtime, time, strftime
import json
import locale
import logging
import math
import mysql.c... | [
"logging.getLogger",
"re.split",
"datetime.datetime.replace",
"locale.setlocale",
"datetime.datetime.strptime",
"os.urandom",
"nltk.stem.snowball.SnowballStemmer",
"datetime.datetime.now",
"random.randint",
"re.search"
] | [((501, 526), 'nltk.stem.snowball.SnowballStemmer', 'SnowballStemmer', (['"""german"""'], {}), "('german')\n", (516, 526), False, 'from nltk.stem.snowball import SnowballStemmer\n'), ((536, 563), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (553, 563), False, 'import logging\n'), ((584,... |
from pycocotools.coco import COCO
import json
with open('person_keypoints_val2017.json') as f:
data = json.load(f)
coco = COCO('person_keypoints_val2017.json')
def search(id):
for annotation in data['annotations']:
if annotation['image_id']==id:
print(annotation)
#print(data... | [
"json.load",
"pycocotools.coco.COCO"
] | [((123, 160), 'pycocotools.coco.COCO', 'COCO', (['"""person_keypoints_val2017.json"""'], {}), "('person_keypoints_val2017.json')\n", (127, 160), False, 'from pycocotools.coco import COCO\n'), ((103, 115), 'json.load', 'json.load', (['f'], {}), '(f)\n', (112, 115), False, 'import json\n')] |
import discord
from discord.ext import commands
class System(commands.Cog):
def __init__(self, perceus):
self.perceus = perceus
@commands.Cog.listener()
async def on_ready(self):
print('Logged in as: ')
print(self.perceus.user.name)
print(self.perceus.user.id)
prin... | [
"discord.ext.commands.Cog.listener",
"discord.ext.commands.command"
] | [((148, 171), 'discord.ext.commands.Cog.listener', 'commands.Cog.listener', ([], {}), '()\n', (169, 171), False, 'from discord.ext import commands\n'), ((346, 364), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (362, 364), False, 'from discord.ext import commands\n'), ((510, 528), 'discord.ext.c... |
# -*- coding: utf-8 -*-
import argparse
import os
import sys
from base64 import b64encode
from datetime import datetime, timedelta
from cryptography import x509
from cryptography.hazmat.primitives import serialization, hashes
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives... | [
"os.path.exists",
"cryptography.x509.NameAttribute",
"cryptography.x509.random_serial_number",
"argparse.ArgumentParser",
"cryptography.x509.SubjectAlternativeName",
"datetime.datetime.utcnow",
"cryptography.x509.CertificateBuilder",
"cryptography.x509.DNSName",
"cryptography.hazmat.primitives.seria... | [((472, 534), 'cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key', 'rsa.generate_private_key', ([], {'public_exponent': '(65537)', 'key_size': '(4096)'}), '(public_exponent=65537, key_size=4096)\n', (496, 534), False, 'from cryptography.hazmat.primitives.asymmetric import rsa\n'), ((552, 580), 'cryptog... |
from math import pi
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.optimize import minimize_scalar
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "0.1"
__license__ = "MIT"
# gravitational acceleration
g = 9.81 # m/s²
#... | [
"numpy.sqrt",
"numpy.append",
"numpy.array",
"matplotlib.pyplot.close",
"numpy.interp",
"pandas.DataFrame",
"numpy.logspace",
"matplotlib.pyplot.subplots",
"pandas.notna"
] | [((1168, 1180), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1176, 1180), True, 'import numpy as np\n'), ((1620, 1801), 'numpy.array', 'np.array', (['[0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.1, 0.125, 0.15, 0.2, 0.25, 0.3,\n 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, \n 1.8, ... |
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from bbbs.afisha.factories import EventFactory
from bbbs.afisha.models import EventParticipant
from bbbs.common.factories import CityFactory
from bbbs.users.factories import UserFactory
from... | [
"bbbs.common.factories.CityFactory",
"bbbs.users.factories.UserFactory",
"rest_framework.test.APIClient",
"bbbs.afisha.models.EventParticipant.objects.create",
"django.urls.reverse",
"bbbs.afisha.factories.EventFactory"
] | [((488, 515), 'bbbs.common.factories.CityFactory', 'CityFactory', ([], {'name': '"""Воркута"""'}), "(name='Воркута')\n", (499, 515), False, 'from bbbs.common.factories import CityFactory\n'), ((538, 608), 'bbbs.users.factories.UserFactory', 'UserFactory', ([], {'profile__role': 'Profile.Role.MENTOR', 'profile__city': '... |
#!/usr/bin/env python2.7
import pdb_structure
import sys
import os.path
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: "+sys.argv[0]+" <pdb-file>")
sys.exit(1)
pdbFile = sys.argv[1]
struc = pdb_structure.PDBFile(pdbFile)
name = os.path.basename(pdbFile).replace("... | [
"pdb_structure.PDBFile",
"collections.defaultdict",
"sys.exit"
] | [((241, 271), 'pdb_structure.PDBFile', 'pdb_structure.PDBFile', (['pdbFile'], {}), '(pdbFile)\n', (262, 271), False, 'import pdb_structure\n'), ((388, 405), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (399, 405), False, 'from collections import defaultdict\n'), ((186, 197), 'sys.exit', 'sys.ex... |
# Создайте модель мероприятия для сайта-афиши.
# У модели должны быть такие поля:
# Название мероприятия (name), не больше 200 символов
# Дата и время проведения мероприятия (start_at)
# Описание мероприятия (description)
# Адрес электронной почты организатора мероприятия (contact)
# Пользователь, который создал меропр... | [
"django.db.models.DateTimeField",
"django.db.models.EmailField",
"django.contrib.auth.get_user_model",
"django.db.models.CharField"
] | [((571, 603), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (587, 603), False, 'from django.db import models\n'), ((619, 677), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""event published"""'], {'auto_now_add': '(True)'}), "('event published', ... |
from __future__ import division, print_function
import os
import os.path as fs
import numpy as np
import pandas as pd
import re
### PURPOSE: Takes a directory containing N files of the form mXXXXXX.ovf ###
### and imports them to an N x X x Y x Z x 3 numpy array ###
### where X,Y,Z are the number of cells in x,y... | [
"numpy.mean",
"os.listdir",
"pandas.read_csv",
"os.path.join",
"re.match",
"numpy.floor",
"numpy.append",
"numpy.empty"
] | [((1449, 1518), 'numpy.empty', 'np.empty', (['(num_files_to_import, data_dimensions[2])'], {'dtype': '(float, 3)'}), '((num_files_to_import, data_dimensions[2]), dtype=(float, 3))\n', (1457, 1518), True, 'import numpy as np\n'), ((3116, 3204), 'pandas.read_csv', 'pd.read_csv', (['this_filename'], {'header': 'None', 'sk... |
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: proxyFetcher
Description :
Author : JHao
date: 2016/11/25
-------------------------------------------------
Change Activity:
2016/11/25: proxyFetcher
---------------------------... | [
"requests.Session",
"os.path.join",
"datetime.date.today",
"time.sleep",
"requests.get",
"datetime.timedelta",
"base64.b64decode",
"util.webRequest.WebRequest",
"lxml.etree.HTML",
"os.path.abspath",
"re.findall"
] | [((1856, 1933), 're.findall', 're.findall', (['"""(\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}:\\\\d{1,5})"""', 'resp.text'], {}), "('(\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}:\\\\d{1,5})', resp.text)\n", (1866, 1933), False, 'import re\n'), ((2166, 2175), 'requests.Session', 'Session', ([], ... |
# Code generated by lark_sdk_gen. DO NOT EDIT.
import unittest
import pylark
import pytest
from tests.test_conf import app_all_permission, app_no_permission
from tests.test_helper import mock_get_tenant_access_token_failed
def mock(*args, **kwargs):
raise pylark.PyLarkError(scope="scope", func="func", code=1, ms... | [
"pylark.UpdateSheetDimensionRangeReq",
"pylark.UpdateDriveCommentReq",
"pylark.BatchUpdateSheetReq",
"pylark.GetSheetDataValidationDropdownReq",
"pylark.GetWikiSpaceListReq",
"pylark.UploadDriveMediaReq",
"pylark.TransferDriveMemberPermissionReq",
"pylark.GetSheetMetaReq",
"pylark.FindSheetReq",
"... | [((263, 336), 'pylark.PyLarkError', 'pylark.PyLarkError', ([], {'scope': '"""scope"""', 'func': '"""func"""', 'code': '(1)', 'msg': '"""mock-failed"""'}), "(scope='scope', func='func', code=1, msg='mock-failed')\n", (281, 336), False, 'import pylark\n'), ((388, 478), 'pylark.PyLarkError', 'pylark.PyLarkError', ([], {'s... |
# -*- coding: utf-8 -*-
#
# Implementation of Granger-Geweke causality
#
#
# Builtin/3rd party package imports
import numpy as np
def granger(CSD, Hfunc, Sigma):
"""
Computes the pairwise Granger-Geweke causalities
for all (non-symmetric!) channel combinations
according to Equation 8 in [1]_.
The... | [
"numpy.abs",
"numpy.log",
"numpy.ones"
] | [((1711, 1731), 'numpy.abs', 'np.abs', (['auto_spectra'], {}), '(auto_spectra)\n', (1717, 1731), True, 'import numpy as np\n'), ((2119, 2134), 'numpy.abs', 'np.abs', (['Sigma.T'], {}), '(Sigma.T)\n', (2125, 2134), True, 'import numpy as np\n'), ((2475, 2495), 'numpy.log', 'np.log', (['(Smat / denom)'], {}), '(Smat / de... |
import os
import sys
import maya.standalone
import mayaLib
print("=" * 30)
print("This is mayaLib package test")
print("=" * 30)
print("Initializing maya standalone ...")
maya.standalone.initialize(name="python")
# Create engine
maya_engine = mayaLib.MayaEngine()
print("Engine : " + str(maya_engine))
# Get engine p... | [
"mayaLib.MayaEngine",
"os.path.join",
"sys.exit"
] | [((247, 267), 'mayaLib.MayaEngine', 'mayaLib.MayaEngine', ([], {}), '()\n', (265, 267), False, 'import mayaLib\n'), ((955, 966), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (963, 966), False, 'import sys\n'), ((433, 472), 'os.path.join', 'os.path.join', (["os.environ['USERPROFILE']"], {}), "(os.environ['USERPROFILE... |
"""Initial Migration
Revision ID: b0c12eb8ae59
Revises: <PASSWORD>
Create Date: 2020-07-15 11:44:46.190193
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### com... | [
"sqlalchemy.String",
"alembic.op.drop_column",
"sqlalchemy.VARCHAR"
] | [((470, 508), 'alembic.op.drop_column', 'op.drop_column', (['"""users"""', '"""pass_secure"""'], {}), "('users', 'pass_secure')\n", (484, 508), False, 'from alembic import op\n'), ((746, 786), 'alembic.op.drop_column', 'op.drop_column', (['"""users"""', '"""password_hash"""'], {}), "('users', 'password_hash')\n", (760,... |
# Exercício Python #045 - GAME: <NAME> e Tesoura
#
# Crie um programa que faça o computador jogar JOKENPÔ com você.
# Aprenda a arrumar as cores nas respostas!
from random import choice
from random import randint # Maneira utilizada na resolução deste exercício
from time import sleep
print('\033[1;31mATENÇÃO! ESTE ... | [
"random.choice",
"time.sleep"
] | [((670, 680), 'random.choice', 'choice', (['PC'], {}), '(PC)\n', (676, 680), False, 'from random import choice\n'), ((682, 692), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (687, 692), False, 'from time import sleep\n'), ((706, 714), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (711, 714), False, 'from time im... |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ... | [
"tensorflow.cast",
"tensorflow.expand_dims",
"tensorflow.reduce_sum",
"tf_cider.CiderScorer",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.range",
"tensorflow.stop_gradient",
"tensorflow.summary.histogram",
"tensorflow.nn.softmax",
"tensorflow.nn.sigmoid_cross_entropy_with... | [((1878, 1952), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'labels': 'labels', 'logits': 'predictions'}), '(labels=labels, logits=predictions)\n', (1917, 1952), True, 'import tensorflow as tf\n'), ((2372, 2458), 'tensorflow.nn.sparse_softmax_cross_entropy_with_lo... |
# Generated by Django 2.0.5 on 2019-06-23 19:14
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('provarme_dashboard', '0005_devolution_traffic'),
]
operations = [
migrations.RemoveField(
model_name='devolution',
name='add... | [
"django.db.migrations.RemoveField"
] | [((238, 301), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""devolution"""', 'name': '"""address"""'}), "(model_name='devolution', name='address')\n", (260, 301), False, 'from django.db import migrations\n'), ((346, 406), 'django.db.migrations.RemoveField', 'migrations.RemoveField... |
#!/usr/bin/env python3
import json
import os
from pybytom.script import (
get_public_key_hash, get_p2pkh_program, get_p2wpkh_program, get_p2wpkh_address
)
# Test Values
base_path = os.path.dirname(__file__)
file_path = os.path.abspath(os.path.join(base_path, "..", "values.json"))
values = open(file_path, "r")
_ ... | [
"pybytom.script.get_p2wpkh_program",
"os.path.join",
"os.path.dirname",
"pybytom.script.get_p2wpkh_address",
"pybytom.script.get_public_key_hash",
"pybytom.script.get_p2pkh_program"
] | [((188, 213), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (203, 213), False, 'import os\n'), ((242, 286), 'os.path.join', 'os.path.join', (['base_path', '""".."""', '"""values.json"""'], {}), "(base_path, '..', 'values.json')\n", (254, 286), False, 'import os\n'), ((394, 459), 'pybytom.scr... |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
:copyright: (c) 2008-2017 Sigasi
:license: BSD, see LICENSE for more details.
"""
import os
from SigasiProjectCreator.ArgsAndFileParser import ArgsAndFileParser
from SigasiProjectCreator.Creator import SigasiProjectCreator
from SigasiProjectCreator import VhdlVersi... | [
"SigasiProjectCreator.Creator.SigasiProjectCreator",
"os.getcwd",
"SigasiProjectCreator.ArgsAndFileParser.ArgsAndFileParser",
"os.path.normpath",
"os.path.commonprefix",
"os.path.abspath",
"os.path.relpath"
] | [((782, 806), 'SigasiProjectCreator.ArgsAndFileParser.ArgsAndFileParser', 'ArgsAndFileParser', (['usage'], {}), '(usage)\n', (799, 806), False, 'from SigasiProjectCreator.ArgsAndFileParser import ArgsAndFileParser\n'), ((909, 920), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (918, 920), False, 'import os\n'), ((1147, 1... |
#!/usr/bin/env python3
"""Benchmark icontract against deal when used together with hypothesis."""
import os
import sys
import timeit
from typing import List
import deal
import dpcontracts
import hypothesis
import hypothesis.extra.dpcontracts
import hypothesis.strategies
import icontract
import tabulate
import icontr... | [
"os.linesep.encode",
"tabulate.tabulate",
"icontract_hypothesis.make_assume_preconditions",
"icontract_hypothesis.test_with_inferred_strategy",
"hypothesis.strategies.integers",
"dpcontracts.require",
"deal.cases",
"deal.pre",
"hypothesis.settings",
"icontract.require",
"hypothesis.extra.dpcontr... | [((3648, 3707), 'icontract_hypothesis.test_with_inferred_strategy', 'icontract_hypothesis.test_with_inferred_strategy', (['some_func'], {}), '(some_func)\n', (3696, 3707), False, 'import icontract_hypothesis\n'), ((541, 575), 'icontract.require', 'icontract.require', (['(lambda a: a > 0)'], {}), '(lambda a: a > 0)\n', ... |
from pathlib import Path
import matplotlib.pyplot as plt
import pandas as pd
results_dir = Path('results')
results_dir.mkdir(exist_ok=True)
# Performance plot
for scale in [3, 4]:
for test_set in ['Set5', 'Set14']:
time = []
psnr = []
model = []
for save_dir in sorted(Path('.').g... | [
"matplotlib.pyplot.text",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"pathlib.Path",
"matplotlib.pyplot.semilogx",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matpl... | [((94, 109), 'pathlib.Path', 'Path', (['"""results"""'], {}), "('results')\n", (98, 109), False, 'from pathlib import Path\n'), ((1193, 1205), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1203, 1205), True, 'import matplotlib.pyplot as plt\n'), ((1563, 1575), 'matplotlib.pyplot.legend', 'plt.legend', ([... |
# Matplotlib
# 파이썬 데이터과학 관련 시각화 페키지
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
#%matplotlib inline # 주피터 노트북에서 show() 호출없이도
# 그래프를 그릴수 있게 해 줌
# data = np.arange(10)
# plt.plot(data)
# plt.show()
# 산점도 - 100의 표준정규분포 난수 생성
list = []
for i in range(100): # 0 ... | [
"numpy.random.normal",
"scipy.stats.pearsonr",
"matplotlib.pyplot.plot",
"scipy.stats.ttest_ind",
"matplotlib.rc",
"pandas.read_excel",
"matplotlib.pyplot.show"
] | [((550, 580), 'matplotlib.pyplot.plot', 'plt.plot', (['x_data', 'y_data', '"""ro"""'], {}), "(x_data, y_data, 'ro')\n", (558, 580), True, 'import matplotlib.pyplot as plt\n'), ((582, 592), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (590, 592), True, 'import matplotlib.pyplot as plt\n'), ((617, 654), 'panda... |
from django.contrib.auth.models import User
from rollservice.models import DiceSequence
import rest_framework.test as rf_test
import rest_framework.status as status
import rest_framework.reverse as reverse
import hypothesis.extra.django
import hypothesis.strategies as strategies
import unittest
class DiceSeq... | [
"hypothesis.strategies.text",
"hypothesis.strategies.sampled_from",
"hypothesis.strategies.integers",
"rollservice.models.DiceSequence.objects.create",
"rollservice.models.DiceSequence.objects.all",
"hypothesis.strategies.uuids",
"django.contrib.auth.models.User.objects.create",
"rest_framework.revers... | [((1358, 1376), 'hypothesis.strategies.uuids', 'strategies.uuids', ([], {}), '()\n', (1374, 1376), True, 'import hypothesis.strategies as strategies\n'), ((1396, 1425), 'hypothesis.strategies.text', 'strategies.text', ([], {'max_size': '(100)'}), '(max_size=100)\n', (1411, 1425), True, 'import hypothesis.strategies as ... |
# -*- coding: utf-8 -*-
import unittest
import numpy as np
import torch
from comet.metrics import RegressionReport, WMTKendall
class TestMetrics(unittest.TestCase):
def test_regression_report(self):
report = RegressionReport()
a = np.array([0, 0, 0, 1, 1, 1, 1])
b = np.arange(7)
... | [
"comet.metrics.RegressionReport",
"numpy.array",
"torch.tensor",
"comet.metrics.WMTKendall",
"numpy.arange"
] | [((224, 242), 'comet.metrics.RegressionReport', 'RegressionReport', ([], {}), '()\n', (240, 242), False, 'from comet.metrics import RegressionReport, WMTKendall\n'), ((255, 286), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 1]'], {}), '([0, 0, 0, 1, 1, 1, 1])\n', (263, 286), True, 'import numpy as np\n'), ((299, 31... |
import tensorflow as tf
from nalp.corpus import TextCorpus
from nalp.datasets import LanguageModelingDataset
from nalp.encoders import IntegerEncoder
from nalp.models import RelGAN
# Creating a character TextCorpus from file
corpus = TextCorpus(from_file='data/text/chapter1_harry.txt', corpus_type='char')
# Creating... | [
"nalp.encoders.IntegerEncoder",
"nalp.datasets.LanguageModelingDataset",
"nalp.models.RelGAN",
"tensorflow.optimizers.Adam",
"nalp.corpus.TextCorpus"
] | [((236, 308), 'nalp.corpus.TextCorpus', 'TextCorpus', ([], {'from_file': '"""data/text/chapter1_harry.txt"""', 'corpus_type': '"""char"""'}), "(from_file='data/text/chapter1_harry.txt', corpus_type='char')\n", (246, 308), False, 'from nalp.corpus import TextCorpus\n'), ((388, 404), 'nalp.encoders.IntegerEncoder', 'Inte... |
from setuptools import setup
setup(name='filecompare',
version='0.1',
description='A package for comparing text and JSON files.',
url='https://github.com/thomasms/filecompare',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=[
'filecompare',
... | [
"setuptools.setup"
] | [((31, 550), 'setuptools.setup', 'setup', ([], {'name': '"""filecompare"""', 'version': '"""0.1"""', 'description': '"""A package for comparing text and JSON files."""', 'url': '"""https://github.com/thomasms/filecompare"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'packages':... |
from text import longest_common_substring
from text._utils import suffix_array
import itertools
class HelperTestMixin:
"""
author: Anonta (https://stackoverflow.com/users/5798361/anonta)
source: https://stackoverflow.com/questions/51456472/python-fastest-algorithm-to-get-the-most-common-prefix-out-of-a-li... | [
"text._utils.suffix_array",
"itertools.product",
"text.longest_common_substring"
] | [((435, 468), 'text._utils.suffix_array', 'suffix_array', ([], {'text': 'tx', '_step': 'step'}), '(text=tx, _step=step)\n', (447, 468), False, 'from text._utils import suffix_array\n'), ((1282, 1319), 'text._utils.suffix_array', 'suffix_array', ([], {'text': '"""banana"""', '_step': '(16)'}), "(text='banana', _step=16)... |
import os
import re
import subprocess
from collections import Counter
from django.conf import settings
from django.core.management.base import BaseCommand
import datadog
from dimagi.ext.couchdbkit import Document
from corehq.feature_previews import all_previews
from corehq.toggles import all_toggles
class Datadog... | [
"corehq.feature_previews.all_previews",
"datadog.initialize",
"subprocess.Popen",
"subprocess.run",
"os.environ.get",
"re.match",
"datadog.api.Metric.send",
"django.conf.settings.DOMAIN_MODULE_MAP.values",
"re.findall",
"corehq.toggles.all_toggles"
] | [((2957, 3058), 'subprocess.Popen', 'subprocess.Popen', (["['./scripts/codechecks/hqDefine.sh', 'static-analysis']"], {'stdout': 'subprocess.PIPE'}), "(['./scripts/codechecks/hqDefine.sh', 'static-analysis'],\n stdout=subprocess.PIPE)\n", (2973, 3058), False, 'import subprocess\n'), ((412, 447), 'os.environ.get', 'o... |
import datetime
import datetime as dt
import pytz
def current_time():
return dt.datetime.now().strftime("%H:%M:%S")
def time_string_to_js_timestamp(time: datetime) -> int:
# js need * 1000 because of different standards
timezone = pytz.timezone("UTC")
return round(timezone.localize(time).timestamp()... | [
"pytz.timezone",
"datetime.datetime.now"
] | [((247, 267), 'pytz.timezone', 'pytz.timezone', (['"""UTC"""'], {}), "('UTC')\n", (260, 267), False, 'import pytz\n'), ((83, 100), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (98, 100), True, 'import datetime as dt\n')] |
"""
Tests for snakelize
module: django_auto_model.utils
"""
import datetime
from django_auto_model.utils import get_now
def test_is_datetime():
"""Should be a datetime instance"""
now = get_now()
assert isinstance(now, datetime.datetime)
def test_value_is_close_to_now():
"""Should be close enough to t... | [
"datetime.datetime.now",
"django_auto_model.utils.get_now"
] | [((195, 204), 'django_auto_model.utils.get_now', 'get_now', ([], {}), '()\n', (202, 204), False, 'from django_auto_model.utils import get_now\n'), ((359, 382), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (380, 382), False, 'import datetime\n'), ((393, 402), 'django_auto_model.utils.get_now', 'ge... |
from setuptools import setup, find_packages
from setuptools.command.test import test
from distutils.util import convert_path
# We can't import the submodule normally as that would "run" the main module
# code while the setup script is meant to *build* the module.
# Besides preventing a whole possible mess of issues w... | [
"distutils.util.convert_path",
"setuptools.find_packages",
"setuptools.command.test.test.finalize_options",
"os.path.dirname",
"unittest.TextTestRunner",
"unittest.TestLoader"
] | [((805, 832), 'setuptools.command.test.test.finalize_options', 'test.finalize_options', (['self'], {}), '(self)\n', (826, 832), False, 'from setuptools.command.test import test\n'), ((1041, 1077), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (1064, 1077), False, '... |
from main import dp
from aiogram import types
from aiogram.dispatcher.filters.builtin import Text
@dp.message_handler(Text(equals="Все задания 🤩"))
async def vse_zadaniya(msg: types.Message):
await msg.answer(text="<b>Ваши задания:</b>\n\nскоро наполню")
@dp.message_handler(Text(equals="Добавить 📝"))
async d... | [
"aiogram.dispatcher.filters.builtin.Text",
"aiogram.types.ReplyKeyboardRemove"
] | [((121, 149), 'aiogram.dispatcher.filters.builtin.Text', 'Text', ([], {'equals': '"""Все задания 🤩"""'}), "(equals='Все задания 🤩')\n", (125, 149), False, 'from aiogram.dispatcher.filters.builtin import Text\n'), ((284, 309), 'aiogram.dispatcher.filters.builtin.Text', 'Text', ([], {'equals': '"""Добавить 📝"""'}), "(... |
#!/usr/bin/env python
import Bio
from Bio import SeqIO
import sys
filt = []
seqs = list(SeqIO.parse(sys.argv[1],'fasta'))
minlen = int(sys.argv[2])
maxlen = int(sys.argv[3])
output = sys.argv[4]
for rec in seqs:
s = str(rec.seq)
l = len(s)
if ((l >= minlen) and (l <= maxlen)):
filt.append(rec... | [
"Bio.SeqIO.parse",
"Bio.SeqIO.write"
] | [((323, 357), 'Bio.SeqIO.write', 'SeqIO.write', (['filt', 'output', '"""fasta"""'], {}), "(filt, output, 'fasta')\n", (334, 357), False, 'from Bio import SeqIO\n'), ((91, 124), 'Bio.SeqIO.parse', 'SeqIO.parse', (['sys.argv[1]', '"""fasta"""'], {}), "(sys.argv[1], 'fasta')\n", (102, 124), False, 'from Bio import SeqIO\n... |
import json
import datetime
import requests
import urlobject
from .utils import format_faf_date
API_BASE = urlobject.URLObject('https://api.faforever.com')
ENTITY_TYPE_TO_DEFAULT_DATE_FIELD = {
'game': 'startTime',
'player': 'createTime',
'map': 'createTime',
'mapVersion': 'createTime',
}
def constr... | [
"urlobject.URLObject",
"json.dump",
"requests.get"
] | [((109, 157), 'urlobject.URLObject', 'urlobject.URLObject', (['"""https://api.faforever.com"""'], {}), "('https://api.faforever.com')\n", (128, 157), False, 'import urlobject\n'), ((1163, 1180), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1175, 1180), False, 'import requests\n'), ((1700, 1752), 'json.dum... |
from nornir import InitNornir
from nornir.core.filter import F
def main():
nr = InitNornir()
print("\nExercise 3a (role AGG)")
print("-" * 20)
agg_devs = nr.filter(F(role__contains="AGG"))
print(agg_devs.inventory.hosts)
print("-" * 20)
print("\nExercise 3b (sea or sfo group)")
print... | [
"nornir.core.filter.F",
"nornir.InitNornir"
] | [((86, 98), 'nornir.InitNornir', 'InitNornir', ([], {}), '()\n', (96, 98), False, 'from nornir import InitNornir\n'), ((183, 206), 'nornir.core.filter.F', 'F', ([], {'role__contains': '"""AGG"""'}), "(role__contains='AGG')\n", (184, 206), False, 'from nornir.core.filter import F\n'), ((353, 378), 'nornir.core.filter.F'... |
# -*- coding: utf-8 -*-
from collections import Iterable
from aserializer.utils import py2to3, registry
from aserializer.fields.fields import BaseSerializerField, SerializerFieldValueError
class SerializerObjectField(BaseSerializerField):
def __init__(self, fields=None, exclude=None, *args, **kwargs):
... | [
"aserializer.utils.registry.get_serializer",
"aserializer.fields.fields.SerializerFieldValueError"
] | [((705, 744), 'aserializer.utils.registry.get_serializer', 'registry.get_serializer', (['serializer_cls'], {}), '(serializer_cls)\n', (728, 744), False, 'from aserializer.utils import py2to3, registry\n'), ((1998, 2072), 'aserializer.fields.fields.SerializerFieldValueError', 'SerializerFieldValueError', (['self._serial... |
from django.db import models
from django.utils.translation import ugettext_lazy as _
class DirectoryAccessGroup(models.Model):
"""
Grants expiring group access to the personnel directory.
"""
organization = models.ForeignKey('core.Organization', on_delete=models.CASCADE)
group = models.ForeignKey... | [
"django.db.models.DateTimeField",
"django.utils.translation.ugettext_lazy",
"django.db.models.ForeignKey"
] | [((226, 290), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""core.Organization"""'], {'on_delete': 'models.CASCADE'}), "('core.Organization', on_delete=models.CASCADE)\n", (243, 290), False, 'from django.db import models\n'), ((303, 360), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""auth.Group""... |
""" MACD Indicator
"""
import math
import pandas
from talib import abstract
from analyzers.utils import IndicatorUtils
class SAR(IndicatorUtils):
def analyze(self, historical_data, signal=['sar'], hot_thresh=None, cold_thresh=None):
"""Performs a macd analysis on the historical data
Args:
... | [
"talib.abstract.SAR"
] | [((967, 990), 'talib.abstract.SAR', 'abstract.SAR', (['dataframe'], {}), '(dataframe)\n', (979, 990), False, 'from talib import abstract\n')] |
import sqlite3
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, open_sqlite_db_readonly
def get_installedappsGass(files_found, report_folder, seeker, wrap_text):
for file_found in files_found:
file_found = str(file_found)
if file_found.endswith('.d... | [
"scripts.artifact_report.ArtifactHtmlReport",
"scripts.ilapfuncs.tsv",
"scripts.ilapfuncs.logfunc",
"scripts.ilapfuncs.open_sqlite_db_readonly"
] | [((355, 390), 'scripts.ilapfuncs.open_sqlite_db_readonly', 'open_sqlite_db_readonly', (['file_found'], {}), '(file_found)\n', (378, 390), False, 'from scripts.ilapfuncs import logfunc, tsv, open_sqlite_db_readonly\n'), ((913, 949), 'scripts.artifact_report.ArtifactHtmlReport', 'ArtifactHtmlReport', (['"""Installed Apps... |
import click
import collections
@click.group()
def cli():
pass
@cli.group()
def day1():
pass
@day1.command()
@click.argument('input_file', type=click.File())
def part1(input_file):
from day1 import part1_stanta_floor_positioning_system
for directions in input_file:
floor = part1_stanta_flo... | [
"day1.part2_santa_fps_halt",
"click.argument",
"click.group",
"click.option",
"day2.part1_wrapping_paper_estimate",
"click.File",
"collections.Counter",
"day2.part2_ribbon_estimate",
"day1.part1_stanta_floor_positioning_system"
] | [((35, 48), 'click.group', 'click.group', ([], {}), '()\n', (46, 48), False, 'import click\n'), ((470, 502), 'click.option', 'click.option', (['"""--halt"""'], {'type': 'int'}), "('--halt', type=int)\n", (482, 502), False, 'import click\n'), ((1998, 2026), 'click.argument', 'click.argument', (['"""secret_key"""'], {}),... |
# Utilization Checks
# https://aonecode.com/amazon-online-assessment-utilization-checks
import math
class UtilizationChecks:
def solve(self, instances, averageUtil):
i = 0
while i < len(averageUtil):
if 25 <= averageUtil[i] <= 60:
i += 1
continue
... | [
"math.ceil"
] | [((706, 730), 'math.ceil', 'math.ceil', (['(instances / 2)'], {}), '(instances / 2)\n', (715, 730), False, 'import math\n')] |
#!/usr/bin/env python
import dynamic_reconfigure.server
from jsk_topic_tools import ConnectionBasedTransport
import json
import os.path as osp
import rospy
from std_msgs.msg import String
from jsk_arc2017_common.cfg import CandidatesPublisherConfig
from jsk_recognition_msgs.msg import Label
from jsk_recognition_msgs.... | [
"os.path.exists",
"rospy.logfatal_throttle",
"rospy.init_node",
"rospy.get_param",
"jsk_recognition_msgs.msg.Label",
"os.path.join",
"jsk_recognition_msgs.msg.LabelArray",
"rospy.Time.now",
"json.load",
"rospy.logwarn_throttle",
"os.path.isdir",
"rospy.spin",
"rospy.Duration",
"rospy.Subsc... | [((2897, 2936), 'rospy.init_node', 'rospy.init_node', (['"""candidates_publisher"""'], {}), "('candidates_publisher')\n", (2912, 2936), False, 'import rospy\n'), ((2990, 3002), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (3000, 3002), False, 'import rospy\n'), ((706, 737), 'rospy.get_param', 'rospy.get_param', (['"""... |
#!/usr/bin/env python
from redis import Redis
import uuid
import sys
import os
import subprocess
import shutil
import numpy as np
import itertools as it
import json
from rdkit import Chem
from rdkit.Chem import AllChem, ChemicalForceFields
redis = Redis.from_url("redis://" + os.environ.get("EXECUTOR_C... | [
"rdkit.Chem.MolFromMolBlock",
"itertools.product",
"json.dumps",
"os.environ.get",
"rdkit.Chem.ChemicalForceFields.MMFFGetMoleculeForceField",
"rdkit.Chem.ChemicalForceFields.MMFFGetMoleculeProperties",
"numpy.arange"
] | [((760, 804), 'rdkit.Chem.MolFromMolBlock', 'Chem.MolFromMolBlock', (['sdfstr'], {'removeHs': '(False)'}), '(sdfstr, removeHs=False)\n', (780, 804), False, 'from rdkit import Chem\n'), ((818, 868), 'rdkit.Chem.ChemicalForceFields.MMFFGetMoleculeProperties', 'ChemicalForceFields.MMFFGetMoleculeProperties', (['mol'], {})... |
# -*- coding: utf-8 -*-
"""
Javelin Web2Py Admin Controller
"""
# metadata
__author__ = "<NAME>"
__copyright__ = "(c) 2013, Jacobson and Varni, LLC"
__date__ = "7/12/2013"
__email__ = "<EMAIL>"
__data__ = {'name' : 'jadmin', 'label' : 'Admin', 'description' : 'Only accessible to admins',
'icon' : 'briefcase', 'u-ic... | [
"StringIO.StringIO",
"reportlab.platypus.TableStyle",
"reportlab.platypus.flowables.PageBreak",
"gluon.contrib.simplejson.loads",
"reportlab.lib.styles.ParagraphStyle",
"reportlab.platypus.Spacer",
"datetime.datetime.now",
"csv.reader",
"time.time",
"reportlab.platypus.SimpleDocTemplate",
"appli... | [((1045, 1059), 'applications.javelin.ctr_data.get_ctr_data', 'get_ctr_data', ([], {}), '()\n', (1057, 1059), False, 'from applications.javelin.ctr_data import ctr_enabled, get_ctr_data\n'), ((1815, 1834), 'StringIO.StringIO', 'StringIO.StringIO', ([], {}), '()\n', (1832, 1834), False, 'import StringIO\n'), ((1843, 197... |
import unittest
import sys
#sys.path.append('../')
from models.Individual import Individual
from models.Family import Family
from models.Gedcom import Gedcom
class TestSprint1(unittest.TestCase):
def setUp(self):
SUPPORT_TAGS = {"INDI", "NAME", "SEX", "BIRT", "DEAT", "FAMC", "FAMS", "FAM", "MARR", "HUSB... | [
"unittest.main",
"models.Family.Family",
"models.Gedcom.Gedcom",
"models.Individual.Individual"
] | [((35999, 36014), 'unittest.main', 'unittest.main', ([], {}), '()\n', (36012, 36014), False, 'import unittest\n'), ((420, 470), 'models.Gedcom.Gedcom', 'Gedcom', (['"""../testing_files/right.ged"""', 'SUPPORT_TAGS'], {}), "('../testing_files/right.ged', SUPPORT_TAGS)\n", (426, 470), False, 'from models.Gedcom import Ge... |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 17 09:36:07 2015
@author: Ben
"""
from shared_classes import Stock, StockItem, SpecifiedStock
from datamapfunctions import DataMapFunctions, Abstract
import util
import numpy as np
import config as cfg
class SupplyStock(Stock, StockItem):
def __init__(se... | [
"datamapfunctions.DataMapFunctions.__init__",
"numpy.repeat",
"shared_classes.SpecifiedStock.__init__",
"numpy.any",
"util.expand_multi",
"numpy.sum",
"shared_classes.StockItem.__init__",
"util.remove_df_levels",
"datamapfunctions.Abstract.__init__",
"util.unit_convert",
"numpy.nonzero",
"util... | [((458, 590), 'shared_classes.Stock.__init__', 'Stock.__init__', (['self', 'id', 'drivers'], {'sql_id_table': '"""SupplyStock"""', 'sql_data_table': '"""SupplyStockData"""', 'primary_key': '"""node_id"""'}), "(self, id, drivers, sql_id_table='SupplyStock',\n sql_data_table='SupplyStockData', primary_key='node_id', *... |
"""
2.Question 2
This problem also asks you to solve a knapsack instance, but a much bigger one.
This file (knapsack_big.txt) describes a knapsack instance, and it has the following format:
[knapsack_size][number_of_items]
[value_1] [weight_1]
[value_2] [weight_2]
...
For example, the third line of ... | [
"sys.setrecursionlimit"
] | [((1220, 1250), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10 ** 6)'], {}), '(10 ** 6)\n', (1241, 1250), False, 'import sys\n')] |
import json
import urllib.request
from linebot import (LineBotApi, WebhookHandler)
from linebot.models import (MessageEvent, TextMessage, PostbackEvent, FollowEvent, UnfollowEvent)
from linebot.exceptions import (LineBotApiError, InvalidSignatureError)
import os
import sys
import logging
import boto3
from boto3.dynamod... | [
"linebot.WebhookHandler",
"linebot.LineBotApi",
"textmessage.textmessage",
"postbackevent.postbackevent",
"dynamodbfunctions.user_regist",
"boto3.resource",
"sys.exit",
"dynamodbfunctions.get_new_user_id",
"dynamodbfunctions.user_delete"
] | [((472, 498), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {}), "('dynamodb')\n", (486, 498), False, 'import boto3\n'), ((722, 754), 'linebot.LineBotApi', 'LineBotApi', (['channel_access_token'], {}), '(channel_access_token)\n', (732, 754), False, 'from linebot import LineBotApi, WebhookHandler\n'), ((765, ... |