code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import os
import time
import subprocess
import pyblish.api
class MyAction(pyblish.api.Action):
label = "My Action"
on = "processed"
def process(self, context, plugin):
self.log.info("Running!")
class MyOtherAction(pyblish.api.Action):
label = "My Other Action"
def process(self, contex... | [
"subprocess.call",
"time.sleep",
"os.getcwd"
] | [((11574, 11587), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (11584, 11587), False, 'import time\n'), ((12143, 12154), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (12152, 12154), False, 'import os\n'), ((12226, 12273), 'subprocess.call', 'subprocess.call', (['"""start ."""'], {'cwd': 'cwd', 'shell': '(True)'})... |
import cv2
fs = cv2.FileStorage("back.yaml", cv2.FILE_STORAGE_READ)
camera_matrix = fs.getNode("camera_matrix").mat()
dist_coeffs = fs.getNode("dist_coeffs").mat()
resolution = fs.getNode("resolution").mat().flatten()
#load_camera_params()
print(camera_matrix)
| [
"cv2.FileStorage"
] | [((18, 69), 'cv2.FileStorage', 'cv2.FileStorage', (['"""back.yaml"""', 'cv2.FILE_STORAGE_READ'], {}), "('back.yaml', cv2.FILE_STORAGE_READ)\n", (33, 69), False, 'import cv2\n')] |
import requests
from bs4 import BeautifulSoup
from csv import writer
html_doc = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta http-equiv="X-UA-Compatible" content="ie=edge" />
... | [
"bs4.BeautifulSoup",
"csv.writer",
"requests.get"
] | [((1419, 1462), 'requests.get', 'requests.get', (['"""https://webscraper.io/blog/"""'], {}), "('https://webscraper.io/blog/')\n", (1431, 1462), False, 'import requests\n'), ((1473, 1516), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""html.parser"""'], {}), "(response.text, 'html.parser')\n", (1486, 1516)... |
# -*- coding: utf-8 -*-
"""
アカウントのアクセストークンとアクセストークンシークレットを xAuth で取得するファイル
"""
import base64
import os
import oauth2
import urllib.parse
from TwitterAPI import TwitterAPI
class TwitterXAuth:
def __init__(self):
# Windows 環境向けの hack
# 参考: https://stackoverflow.com/questions/31469707/changing-th... | [
"oauth2.Consumer",
"oauth2.SignatureMethod_HMAC_SHA1",
"TwitterAPI.TwitterAPI.generate_header",
"oauth2.Client",
"base64.standard_b64decode",
"_locale._getdefaultlocale_backup"
] | [((1528, 1573), 'TwitterAPI.TwitterAPI.generate_header', 'TwitterAPI.generate_header', (['self.consumer_key'], {}), '(self.consumer_key)\n', (1554, 1573), False, 'from TwitterAPI import TwitterAPI\n'), ((1637, 1693), 'oauth2.Consumer', 'oauth2.Consumer', (['self.consumer_key', 'self.consumer_secret'], {}), '(self.consu... |
import keras.backend as K
from params import Params
from keras.losses import binary_crossentropy
def negative_avg_log_error(y_true, y_pred):
def sum_of_log_probabilities(true_and_pred):
y_true, y_pred = true_and_pred
losses = []
def get_loss_per_passage(true_and_pred):
y_true... | [
"keras.backend.cast",
"keras.backend.mean",
"keras.backend.map_fn",
"keras.backend.squeeze",
"keras.backend.log",
"keras.backend.binary_crossentropy"
] | [((720, 746), 'keras.backend.squeeze', 'K.squeeze', (['y_true'], {'axis': '(-1)'}), '(y_true, axis=-1)\n', (729, 746), True, 'import keras.backend as K\n'), ((776, 845), 'keras.backend.map_fn', 'K.map_fn', (['sum_of_log_probabilities', '(y_true, y_pred)'], {'dtype': '"""float32"""'}), "(sum_of_log_probabilities, (y_tru... |
import setuptools
import os
import configparser
from pathlib import Path
PROJECT_ROOT_DIR = Path(__file__).parent
if __name__ == "__main__":
setup_cfg_path = os.path.join(os.path.dirname(__file__), "setup.cfg")
config = configparser.ConfigParser()
config.read(setup_cfg_path)
version = config["metada... | [
"os.path.dirname",
"setuptools.setup",
"configparser.ConfigParser",
"pathlib.Path"
] | [((94, 108), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (98, 108), False, 'from pathlib import Path\n'), ((232, 259), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (257, 259), False, 'import configparser\n'), ((453, 471), 'setuptools.setup', 'setuptools.setup', ([], {}), '... |
# Generated by Django 2.2.5 on 2019-10-28 18:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('classified', '0004_auto_20191027_1129'),
]
operations = [
migrations.AlterField(
model_name='classifiedad',
name='bo... | [
"django.db.models.TextField"
] | [((343, 397), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(1000)', 'verbose_name': '"""body"""'}), "(max_length=1000, verbose_name='body')\n", (359, 397), False, 'from django.db import migrations, models\n')] |
from flask import Flask, Markup, render_template
import pandas as pd
import datetime as dt
import re
source = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/'
app = Flask(__name__)
class GraphCorona:
def __init__(self, timeframe, country, dates=[... | [
"flask.render_template",
"pandas.read_csv",
"flask.Flask",
"datetime.datetime.today",
"pandas.DataFrame",
"datetime.timedelta",
"pandas.concat"
] | [((233, 248), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (238, 248), False, 'from flask import Flask, Markup, render_template\n'), ((1864, 1984), 'flask.render_template', 'render_template', (['"""bar_chart.html"""'], {'title': '"""Confirmed cases in Romania"""', 'max': 'maximum', 'labels': 'bar_dates',... |
# AUTOGENERATED FILE! PLEASE DON'T EDIT
"""
This is for all short utilities that has the boilerplate feeling. Conversion clis
might feel they have different styles, as :class:`toFloat` converts object iterator to
float iterator, while :class:`toPIL` converts single image url to single PIL image,
whereas :class:`toSum` ... | [
"k1lib.cli.insertIdColumn",
"k1lib.cli.init.patchDefaultDelim",
"k1lib.cli.transpose",
"k1lib.cli.item",
"k1lib.cli.wrapList"
] | [((5150, 5174), 'k1lib.cli.init.patchDefaultDelim', 'patchDefaultDelim', (['delim'], {}), '(delim)\n', (5167, 5174), False, 'from k1lib.cli.init import patchDefaultDelim, BaseCli, Table, T\n'), ((11723, 11747), 'k1lib.cli.insertIdColumn', 'cli.insertIdColumn', (['(True)'], {}), '(True)\n', (11741, 11747), True, 'import... |
import pytest
from faker import Faker
from django.db.utils import IntegrityError
from django.urls import reverse
from .factory import AccountSubTypeFactory
from ..models import AccountSubType, Account
from ..choices import AccountType
fake = Faker()
class TestAccountSubType:
def test_name_field(self, db):
... | [
"faker.Faker",
"pytest.raises",
"django.urls.reverse"
] | [((244, 251), 'faker.Faker', 'Faker', ([], {}), '()\n', (249, 251), False, 'from faker import Faker\n'), ((1578, 1607), 'pytest.raises', 'pytest.raises', (['IntegrityError'], {}), '(IntegrityError)\n', (1591, 1607), False, 'import pytest\n'), ((1781, 1810), 'pytest.raises', 'pytest.raises', (['IntegrityError'], {}), '(... |
import pandas as pd
from polo2 import PoloDb
class PoloRetro:
def __init__(self, config):
self.config = config
self.corpus = None
self.model = None
self.retro = None
# todo: Rewrite as PoloCombiner or something and make this the init
def retro_combine(self, corpus_dbfile, ... | [
"pandas.DataFrame",
"polo2.PoloDb",
"pandas.read_sql_query"
] | [((376, 397), 'polo2.PoloDb', 'PoloDb', (['corpus_dbfile'], {}), '(corpus_dbfile)\n', (382, 397), False, 'from polo2 import PoloDb\n'), ((419, 439), 'polo2.PoloDb', 'PoloDb', (['model_dbfile'], {}), '(model_dbfile)\n', (425, 439), False, 'from polo2 import PoloDb\n'), ((584, 604), 'polo2.PoloDb', 'PoloDb', (['retro_dbf... |
import os
import torch
import argparse
import datetime
import numpy as np
import torch.optim as optim
from models.resnet import resnet50, Model, resnet50_1d
from losses.loss import ContrastiveLoss_
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from dataset.industry_dataset_identify import... | [
"os.path.exists",
"models.resnet.resnet50_1d",
"os.listdir",
"os.makedirs",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"argparse.ArgumentParser",
"losses.loss.ContrastiveLoss_",
"torch.load",
"sklearn.model_selection.train_test_split",
"datetime.datetime.now",
"torch.no_grad",
"os.path.dirn... | [((654, 680), 'os.path.exists', 'os.path.exists', (['result_dir'], {}), '(result_dir)\n', (668, 680), False, 'import os\n'), ((686, 709), 'os.makedirs', 'os.makedirs', (['result_dir'], {}), '(result_dir)\n', (697, 709), False, 'import os\n'), ((1160, 1176), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (1170,... |
import os
import glob
import string
import re
import pathlib
import numpy as np
import pandas as pd
import joblib
from tqdm import tqdm
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
from spacy.lang.en import English
from unidecode import unidecode
nlp = spacy.load("en_core_web_md")
# tokenizer = Bert... | [
"os.path.exists",
"re.escape",
"pandas.read_parquet",
"os.makedirs",
"pathlib.Path",
"spacy.load",
"spacy.lang.en.English",
"tqdm.tqdm",
"joblib.delayed",
"joblib.Parallel",
"os.path.isdir",
"html.parser.HTMLParser",
"os.path.basename",
"unidecode.unidecode",
"re.sub",
"tqdm.tqdm.panda... | [((273, 301), 'spacy.load', 'spacy.load', (['"""en_core_web_md"""'], {}), "('en_core_web_md')\n", (283, 301), False, 'import spacy\n'), ((439, 448), 'spacy.lang.en.English', 'English', ([], {}), '()\n', (446, 448), False, 'from spacy.lang.en import English\n'), ((1024, 1039), 'unidecode.unidecode', 'unidecode', (['text... |
from django.shortcuts import render
from django.http import HttpResponse
# Include the `fusioncharts.py` file which has required functions to embed the charts in html page
from ..fusioncharts import FusionCharts
from ..fusioncharts import FusionTable
from ..fusioncharts import TimeSeries
import requests
# Loading Dat... | [
"requests.get"
] | [((468, 583), 'requests.get', 'requests.get', (['"""https://s3.eu-central-1.amazonaws.com/fusion.store/ft/data/single-event-overlay-data.json"""'], {}), "(\n 'https://s3.eu-central-1.amazonaws.com/fusion.store/ft/data/single-event-overlay-data.json'\n )\n", (480, 583), False, 'import requests\n'), ((592, 711), 'r... |
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='pymixconsole',
version='0.0.1',
description='Headless multitrack mixing console in Python',
long_description=long_description,
long_description_content_type="text/markdow... | [
"setuptools.find_packages"
] | [((449, 464), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (462, 464), False, 'from setuptools import setup, find_packages\n')] |
"""
This script was modified from https://github.com/ZhaoJ9014/face.evoLVe.PyTorch
"""
import os
import cv2
import bcolz
import numpy as np
import tqdm
from sklearn.model_selection import KFold
from scipy import interpolate
import math
from .utils import l2_norm
def get_val_pair(path, name):
carray = bcolz.carray... | [
"numpy.arccos",
"numpy.logical_not",
"scipy.interpolate.interp1d",
"numpy.linalg.norm",
"sklearn.model_selection.KFold",
"numpy.arange",
"numpy.mean",
"numpy.less",
"numpy.multiply",
"numpy.asarray",
"numpy.subtract",
"numpy.max",
"numpy.argmax",
"numpy.square",
"numpy.std",
"cv2.resiz... | [((1952, 1976), 'numpy.less', 'np.less', (['dist', 'threshold'], {}), '(dist, threshold)\n', (1959, 1976), True, 'import numpy as np\n'), ((2867, 2908), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'nrof_folds', 'shuffle': '(False)'}), '(n_splits=nrof_folds, shuffle=False)\n', (2872, 2908), False, 'from ... |
import logging
import random
import gzip
import filetype
from moonstone.normalization.reads.base import BaseDownsizing
logger = logging.getLogger(__name__)
class DownsizePair(BaseDownsizing):
"""Normalization for the purposes of assessing diversity. Reads are downsized by random selection of raw reads
gener... | [
"logging.getLogger",
"filetype.guess",
"gzip.open",
"random.seed",
"logging.info",
"random.randint"
] | [((130, 157), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (147, 157), False, 'import logging\n'), ((3892, 3914), 'random.seed', 'random.seed', (['self.seed'], {}), '(self.seed)\n', (3903, 3914), False, 'import random\n'), ((6067, 6089), 'random.seed', 'random.seed', (['self.seed'], {})... |
#!/usr/bin/env python
import time
from pyplanter.lib.light import Light
from pyplanter.logger import logger
def main():
"""
Get the sunrise and sunset data for the day
Usage: python pyplanter/scripts/light.py
"""
while True:
logger.debug("Updating light data")
light = Light()
... | [
"pyplanter.logger.logger.debug",
"time.sleep",
"pyplanter.lib.light.Light"
] | [((257, 292), 'pyplanter.logger.logger.debug', 'logger.debug', (['"""Updating light data"""'], {}), "('Updating light data')\n", (269, 292), False, 'from pyplanter.logger import logger\n'), ((309, 316), 'pyplanter.lib.light.Light', 'Light', ([], {}), '()\n', (314, 316), False, 'from pyplanter.lib.light import Light\n')... |
from flask import current_app
from app.server import app_logger
from app.server import ContextEnvironment
from app.server import sms
from app.server.models.user import User
def send_sms(message: str, phone_number: str):
context_env = ContextEnvironment(current_app)
if context_env.is_development or context_en... | [
"app.server.ContextEnvironment",
"app.server.app_logger.info",
"app.server.sms.send"
] | [((241, 272), 'app.server.ContextEnvironment', 'ContextEnvironment', (['current_app'], {}), '(current_app)\n', (259, 272), False, 'from app.server import ContextEnvironment\n'), ((342, 460), 'app.server.app_logger.info', 'app_logger.info', (['f"""IS NOT PRODUCTION NOT ACTUALLY SENDING:\nRecipient: {phone_number}\nMessa... |
from html.entities import name2codepoint
from numpy import isin
from MyGrammerParser import MyGrammerParser
from MyGrammerVisitor import MyGrammerVisitor
from MusicNodes import *
from music21 import *
import string
import copy
def appendInstrument(part, instru):
switcher = {
"clarinet": instrument.Clarine... | [
"MyGrammerVisitor.MyGrammerVisitor",
"copy.deepcopy"
] | [((8668, 8686), 'MyGrammerVisitor.MyGrammerVisitor', 'MyGrammerVisitor', ([], {}), '()\n', (8684, 8686), False, 'from MyGrammerVisitor import MyGrammerVisitor\n'), ((9648, 9666), 'MyGrammerVisitor.MyGrammerVisitor', 'MyGrammerVisitor', ([], {}), '()\n', (9664, 9666), False, 'from MyGrammerVisitor import MyGrammerVisito... |
import time
import pytest
from brownie import ERC20Basic, network
from scripts.helpful_scripts import LOCAL_BLOCKCHAIN_ENVIRONMENTS, get_account
@pytest.fixture
def deploy_erc20_exercise():
# Arrange
# Arrange / Act
erc20 = ERC20Basic.deploy(
10000000,
{"from": get_account()}
)
# A... | [
"brownie.network.show_active",
"pytest.skip",
"scripts.helpful_scripts.get_account"
] | [((445, 466), 'brownie.network.show_active', 'network.show_active', ([], {}), '()\n', (464, 466), False, 'from brownie import ERC20Basic, network\n'), ((513, 550), 'pytest.skip', 'pytest.skip', (['"""Only for local testing"""'], {}), "('Only for local testing')\n", (524, 550), False, 'import pytest\n'), ((705, 726), 'b... |
import cv2
img1 = cv2.imread('img1.jpg')
height , width , layers = img1.shape
fourcc = cv2.VideoWriter_fourcc(*'X264')
video = cv2.VideoWriter('stitched_video.mp4',fourcc,7.0,(width,height))
frame_num = 1
while (frame_num <= 6552):
frame_name = "img" + str(frame_num) + ".jpg"
img = cv2.imread(frame_name)
video.w... | [
"cv2.imread",
"cv2.destroyAllWindows",
"cv2.VideoWriter_fourcc",
"cv2.VideoWriter"
] | [((19, 41), 'cv2.imread', 'cv2.imread', (['"""img1.jpg"""'], {}), "('img1.jpg')\n", (29, 41), False, 'import cv2\n'), ((90, 121), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'X264'"], {}), "(*'X264')\n", (112, 121), False, 'import cv2\n'), ((130, 197), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""stitched_vi... |
# Generated by Django 2.2.5 on 2019-09-21 17:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('machines', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='device',
name='serial',
fi... | [
"django.db.models.CharField"
] | [((324, 414), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(150)', 'null': '(True)', 'verbose_name': '"""serial number"""'}), "(blank=True, max_length=150, null=True, verbose_name=\n 'serial number')\n", (340, 414), False, 'from django.db import migrations, models\n'), ((... |
from django.db import models
class Porteiro(models.Model):
usuario = models.OneToOneField('usuarios.Usuario', verbose_name='Usuário', on_delete=models.PROTECT)
nome_completo = models.CharField(verbose_name='Nome completo', max_length=194)
cpf = models.CharField(verbose_name='CPF', max_length=11)
telef... | [
"django.db.models.OneToOneField",
"django.db.models.DateField",
"django.db.models.CharField"
] | [((75, 170), 'django.db.models.OneToOneField', 'models.OneToOneField', (['"""usuarios.Usuario"""'], {'verbose_name': '"""Usuário"""', 'on_delete': 'models.PROTECT'}), "('usuarios.Usuario', verbose_name='Usuário', on_delete=\n models.PROTECT)\n", (95, 170), False, 'from django.db import models\n'), ((186, 248), 'djan... |
import logging
import torch
log = logging.getLogger(__name__)
def get_training_device(try_gpu=True):
"""
Retrieves the device used for training. It favours a CUDA-enabled device,
but reverts to CPU if it doesn't find one.
Keyword arguments:
try_gpu -- If true, this function will prefer a CUDA-e... | [
"logging.getLogger",
"torch.cuda.is_available",
"torch.device"
] | [((36, 63), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (53, 63), False, 'import logging\n'), ((448, 473), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (471, 473), False, 'import torch\n'), ((541, 563), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), ... |
import pandas as pd
import scipy.io
import os
filenames = []
for filename in os.listdir('.'):
if '.mat' in filename:
filenames.append(filename)
for filename in filenames:
print(f'Processing file: {filename}')
mat = scipy.io.loadmat(filename)
headings = [
'Timestamp', '... | [
"pandas.DataFrame",
"os.listdir"
] | [((85, 100), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (95, 100), False, 'import os\n'), ((631, 649), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (643, 649), True, 'import pandas as pd\n')] |
# Generated by Django 3.0.4 on 2020-04-17 23:13
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Champion',
fields=[
... | [
"django.db.models.DateTimeField",
"django.db.models.AutoField",
"django.db.models.CharField"
] | [((333, 426), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (349, 426), False, 'from django.db import migrations, models\... |
"""
This module contains helper functions
"""
import logging
import signal
from typing import Any
def handle_sigterm(*_args: Any) -> None:
"""
Sigterm handler: raise KeyboardInterrupt.
:param _args: unused arguments, accept any arguments
:raise KeyboardInterrupt: raises a keyboard inte... | [
"logging.getLogger",
"signal.signal",
"logging.basicConfig"
] | [((1124, 1169), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'handle_sigterm'], {}), '(signal.SIGTERM, handle_sigterm)\n', (1137, 1169), False, 'import signal\n'), ((1186, 1209), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (1203, 1209), False, 'import logging\n'), ((1215, 1302), 'logg... |
import os, random, numpy as np, copy
from torch.utils.data import Dataset
import torch
def seq_collate(data):
(past_traj, future_traj) = zip(*data)
past_traj = torch.stack(past_traj,dim=0)
future_traj = torch.stack(future_traj,dim=0)
data = {
'past_traj': past_traj,
'future_traj': fu... | [
"torch.stack",
"torch.from_numpy",
"numpy.load"
] | [((172, 201), 'torch.stack', 'torch.stack', (['past_traj'], {'dim': '(0)'}), '(past_traj, dim=0)\n', (183, 201), False, 'import torch\n'), ((219, 250), 'torch.stack', 'torch.stack', (['future_traj'], {'dim': '(0)'}), '(future_traj, dim=0)\n', (230, 250), False, 'import torch\n'), ((843, 861), 'numpy.load', 'np.load', (... |
from django import template
from core.models import *
register = template.Library()
import re
@register.filter(name="cimg")
def cimg(value):
if value.img.name != '':
return value.img.name[4:]
else:
return "" | [
"django.template.Library"
] | [((65, 83), 'django.template.Library', 'template.Library', ([], {}), '()\n', (81, 83), False, 'from django import template\n')] |
#! /usr/bin/python
# -*- coding: utf8 -*-
import os, time, random
import numpy as np
import scipy
import tensorflow as tf
import tensorlayer as tl
from model import *
from utils import *
from config import *
###====================== HYPER-PARAMETERS ===========================###
batch_size = config.train.batch_siz... | [
"tensorflow.image.resize_images",
"numpy.sqrt",
"tensorlayer.layers.initialize_global_variables",
"tensorlayer.files.load_and_assign_npz",
"os.listdir",
"argparse.ArgumentParser",
"tensorflow.placeholder",
"tensorlayer.layers.get_variables_with_name",
"tensorflow.assign",
"tensorlayer.vis.save_ima... | [((371, 403), 'numpy.sqrt', 'np.sqrt', (['config.train.batch_size'], {}), '(config.train.batch_size)\n', (378, 403), True, 'import numpy as np\n'), ((2259, 2308), 'numpy.empty', 'np.empty', (['[batch_size, patch_size, patch_size, 3]'], {}), '([batch_size, patch_size, patch_size, 3])\n', (2267, 2308), True, 'import nump... |
import lxml.html
from billy.scrape.committees import CommitteeScraper, Committee
from apiclient import ApiClient
from .utils import get_with_increasing_timeout
from scrapelib import HTTPError
class INCommitteeScraper(CommitteeScraper):
jurisdiction = 'in'
def process_special_members(self,comm,comm_json,role... | [
"apiclient.ApiClient",
"billy.scrape.committees.Committee"
] | [((2230, 2245), 'apiclient.ApiClient', 'ApiClient', (['self'], {}), '(self)\n', (2239, 2245), False, 'from apiclient import ApiClient\n'), ((3771, 3821), 'billy.scrape.committees.Committee', 'Committee', (['chamber', 'owning_comm'], {'subcommittee': 'name'}), '(chamber, owning_comm, subcommittee=name)\n', (3780, 3821),... |
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
... | [
"datetime.datetime.strptime",
"lxml.html.fromstring",
"datetime.date.today",
"datetime.timedelta",
"datetime.datetime.combine",
"re.search"
] | [((1821, 1842), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1840, 1842), False, 'import datetime\n'), ((2129, 2194), 're.search', 're.search', (['"""(?:absid|ProeveholdId|outboundCensorID)=(\\\\d+)"""', 'link'], {}), "('(?:absid|ProeveholdId|outboundCensorID)=(\\\\d+)', link)\n", (2138, 2194), Fals... |
import pandas as pd
import numpy as np
from typing import Tuple
from itertools import product
from tqdm import tqdm
from sklearn.ensemble import RandomForestClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import confusion_matrix, f1_score
from sklearn.model_selection import train_tes... | [
"sklearn.model_selection.train_test_split",
"itertools.product",
"sklearn.ensemble.RandomForestClassifier",
"numpy.array",
"numpy.linspace",
"sklearn.multiclass.OneVsRestClassifier"
] | [((924, 944), 'numpy.array', 'np.array', (['f1_mac_lst'], {}), '(f1_mac_lst)\n', (932, 944), True, 'import numpy as np\n'), ((1045, 1065), 'numpy.array', 'np.array', (['f1_mic_lst'], {}), '(f1_mic_lst)\n', (1053, 1065), True, 'import numpy as np\n'), ((1544, 1605), 'sklearn.ensemble.RandomForestClassifier', 'RandomFore... |
import numpy as np
import pandas as pd
import datetime as dt
import operator
import time
import os
def most_common_day(dataframe):
start = time.time()
most_common_day_dict = dataframe['Day of the Week'].value_counts().to_dict()
popular_day_of_the_week = max(most_common_day_dict.items() , key=operator.item... | [
"os.path.exists",
"time.time",
"operator.itemgetter",
"pandas.read_csv"
] | [((145, 156), 'time.time', 'time.time', ([], {}), '()\n', (154, 156), False, 'import time\n'), ((519, 530), 'time.time', 'time.time', ([], {}), '()\n', (528, 530), False, 'import time\n'), ((902, 913), 'time.time', 'time.time', ([], {}), '()\n', (911, 913), False, 'import time\n'), ((1765, 1776), 'time.time', 'time.tim... |
#!/usr/bin/env python3
"""
"""
import liblo
from pythonosc import udp_client
from ptpython.python_input import PythonInput
PORT = 7000
client = udp_client.SimpleUDPClient('127.0.0.1', PORT)
def send(data):
liblo.send(liblo.Address(PORT), data)
def send_prefix(prefix, data):
client.send_message(prefix, data... | [
"pythonosc.udp_client.SimpleUDPClient",
"ptpython.python_input.PythonInput",
"liblo.Address"
] | [((147, 192), 'pythonosc.udp_client.SimpleUDPClient', 'udp_client.SimpleUDPClient', (['"""127.0.0.1"""', 'PORT'], {}), "('127.0.0.1', PORT)\n", (173, 192), False, 'from pythonosc import udp_client\n'), ((349, 374), 'ptpython.python_input.PythonInput', 'PythonInput', ([], {'vi_mode': '(True)'}), '(vi_mode=True)\n', (360... |
from functools import partial
import torch
import dphsir.solvers.fns.inpaint as task
from dphsir.degrades.inpaint import FastHyStripe
from dphsir.denoisers.wrapper import GRUNetDenoiser
from dphsir.solvers import callbacks
from dphsir.solvers.base import ADMMSolver
from dphsir.solvers.params import admm_log_descent
f... | [
"dphsir.solvers.callbacks.ProgressBar",
"dphsir.utils.io.loadmat",
"dphsir.degrades.inpaint.FastHyStripe",
"functools.partial",
"dphsir.solvers.fns.inpaint.Prox",
"dphsir.metrics.mpsnr",
"dphsir.denoisers.wrapper.GRUNetDenoiser",
"dphsir.solvers.base.ADMMSolver",
"torch.device"
] | [((427, 440), 'dphsir.utils.io.loadmat', 'loadmat', (['path'], {}), '(path)\n', (434, 440), False, 'from dphsir.utils.io import loadmat\n'), ((468, 482), 'dphsir.degrades.inpaint.FastHyStripe', 'FastHyStripe', ([], {}), '()\n', (480, 482), False, 'from dphsir.degrades.inpaint import FastHyStripe\n'), ((545, 567), 'torc... |
import re
import sys
import json
import supybot.world as world
import supybot.utils as utils
from supybot import httpserver
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircmsgs as ircmsgs
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
from supybot.i18n impor... | [
"supybot.callbacks.Plugin.__init__",
"supybot.httpserver.hook",
"supybot.ircmsgs.privmsg",
"supybot.i18n.PluginInternationalization",
"supybot.httpserver.unhook"
] | [((488, 526), 'supybot.i18n.PluginInternationalization', 'PluginInternationalization', (['"""Realhook"""'], {}), "('Realhook')\n", (514, 526), False, 'from supybot.i18n import PluginInternationalization, internationalizeDocstring\n'), ((1353, 1389), 'supybot.callbacks.Plugin.__init__', 'callbacks.Plugin.__init__', (['s... |
# coding=utf-8
# Copyright 2018 Google LLC & <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law o... | [
"compare_gan.gans.modular_gan.ModularGAN",
"absl.testing.parameterized.parameters",
"tensorflow.contrib.tpu.TPUConfig",
"tensorflow.test.main",
"compare_gan.datasets.get_dataset"
] | [((1372, 1407), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['[1, 2, 5]'], {}), '([1, 2, 5])\n', (1396, 1407), False, 'from absl.testing import parameterized\n'), ((2356, 2391), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['[1, 2, 5]'], {}), '([1, 2, 5])\n', (2380, 23... |
import os
import requests
import us
from django.core.management.base import BaseCommand
from geography.models import Division, DivisionLevel
from government.models import Body, Jurisdiction, Office
from tqdm import tqdm
BASE_URL = 'https://api.propublica.org/congress/v1/'
class Command(BaseCommand):
help = (
... | [
"government.models.Jurisdiction.objects.filter",
"tqdm.tqdm",
"os.environ.get",
"government.models.Jurisdiction.objects.get",
"geography.models.DivisionLevel.objects.get",
"government.models.Office.objects.get_or_create",
"government.models.Body.objects.get",
"us.states.lookup",
"geography.models.Di... | [((410, 466), 'government.models.Jurisdiction.objects.get', 'Jurisdiction.objects.get', ([], {'name': '"""U.S. Federal Government"""'}), "(name='U.S. Federal Government')\n", (434, 466), False, 'from government.models import Body, Jurisdiction, Office\n'), ((856, 894), 'tqdm.tqdm', 'tqdm', (["members['results'][0]['mem... |
"""
Supporting file for Sala 2019 trawl model
Contains a function that accepts a fishery model as input and returns the appropriate "cluster" from the
Sala study. Unfortunately, the mediterranean trawlers are considerably smaller than the largest-scale models, so
large-scale trawl models will be inaccurately represen... | [
"collections.namedtuple"
] | [((417, 473), 'collections.namedtuple', 'namedtuple', (['"""ClusterModel"""', "('name', 'LOA', 'hp', 'GRT')"], {}), "('ClusterModel', ('name', 'LOA', 'hp', 'GRT'))\n", (427, 473), False, 'from collections import namedtuple\n')] |
'''
Copyright (c) 2017-2018, wezu (<EMAIL>)
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WA... | [
"os.path.exists",
"random.choice",
"datetime.datetime.now",
"copy.deepcopy"
] | [((48802, 48830), 'os.path.exists', 'os.path.exists', (['name_of_file'], {}), '(name_of_file)\n', (48816, 48830), False, 'import os\n'), ((67149, 67200), 'copy.deepcopy', 'copy.deepcopy', (['self.critical_hit_dmg[self.aim_mode]'], {}), '(self.critical_hit_dmg[self.aim_mode])\n', (67162, 67200), False, 'import copy\n'),... |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import tkinter
from tkinter import ttk
def go(*args): #处理事件,*args表示可变参数
print(comboxlist.get()) #打印选中的值
win=tkinter.Tk() #构造窗体
comvalue=tkinter.StringVar()#窗体自带的文本,新建一个值
comboxlist=ttk.Combobox(win,textvariable=comvalue) #初始化
comboxlist["values"]=("1","2","3","4")
co... | [
"tkinter.StringVar",
"tkinter.Tk",
"tkinter.ttk.Combobox"
] | [((161, 173), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (171, 173), False, 'import tkinter\n'), ((189, 208), 'tkinter.StringVar', 'tkinter.StringVar', ([], {}), '()\n', (206, 208), False, 'import tkinter\n'), ((234, 274), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['win'], {'textvariable': 'comvalue'}), '(win, textva... |
# Generated by Django 2.2.5 on 2019-09-20 23:14
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('adjudication', '0005_auto_20190918_0711'),
]
operations = [
migrations.AlterField(
model_name='... | [
"django.db.models.ManyToManyField"
] | [((376, 470), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'related_name': '"""panelists"""', 'to': 'settings.AUTH_USER_MODEL'}), "(blank=True, related_name='panelists', to=settings.\n AUTH_USER_MODEL)\n", (398, 470), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/env python3
#
# Read configfile and return it.
'''
Wolfspyre Configurator. v 0.01
Who doesn't like Config Files.
'''
import yaml
from pprint import pprint
from PyBTSteward.dict_utils import smerge_dicts, merge_dict
import logging
logger = logging.getLogger(__name__)
def wpl_cfg(base_cfg='config.yml',custom... | [
"logging.getLogger",
"PyBTSteward.dict_utils.smerge_dicts",
"PyBTSteward.dict_utils.merge_dict",
"yaml.load",
"pprint.pprint"
] | [((251, 278), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (268, 278), False, 'import logging\n'), ((509, 521), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (518, 521), False, 'import yaml\n'), ((581, 593), 'yaml.load', 'yaml.load', (['c'], {}), '(c)\n', (590, 593), False, 'import ya... |
from dataclasses import dataclass
from uff.origin import Origin
from uff.rotation import Rotation
@dataclass
class PlaneWaveOrigin(Origin):
rotation: Rotation = Rotation()
| [
"uff.rotation.Rotation"
] | [((168, 178), 'uff.rotation.Rotation', 'Rotation', ([], {}), '()\n', (176, 178), False, 'from uff.rotation import Rotation\n')] |
import os
import shutil
import sys
import glob
import time
import datetime
import logging
from media_grouper.image import Image
from media_grouper.video import Video
from media_grouper.detect import Detector
from typing import List
class MediaGrouper:
def __init__(self,
src: str,
... | [
"os.path.exists",
"media_grouper.video.Video",
"sys.exit",
"os.makedirs",
"glob.iglob",
"os.path.splitext",
"os.path.join",
"media_grouper.detect.Detector",
"logging.warning",
"datetime.datetime.now",
"os.path.isdir",
"os.path.basename",
"shutil.copy",
"shutil.rmtree",
"os.path.abspath",... | [((831, 851), 'os.path.abspath', 'os.path.abspath', (['dst'], {}), '(dst)\n', (846, 851), False, 'import os\n'), ((1192, 1223), 'os.path.exists', 'os.path.exists', (['self.dst_folder'], {}), '(self.dst_folder)\n', (1206, 1223), False, 'import os\n'), ((1562, 1590), 'os.makedirs', 'os.makedirs', (['self.dst_folder'], {}... |
import collections
from varappx.common.genotypes import decode_int
from varappx.constants.filters import ALL_VARIANT_FILTER_NAMES
from varappx.main.filters.sort import Sort
from varappx.models.gemini import Variants, GeneDetailed
# For export to frontend
_variant_genotype_expose = {0: [0,0], 1: [0,1], 2: [None,None], ... | [
"collections.namedtuple",
"varappx.models.gemini.GeneDetailed.transcript.in_",
"varappx.handle_init.db.create_all",
"varappx.common.genotypes.decode_int",
"varappx.models.gemini.Variants.__table__.columns.keys",
"varappx.main.filters.sort.Sort"
] | [((662, 716), 'collections.namedtuple', 'collections.namedtuple', (['"""VariantTuple"""', 'VARIANT_FIELDS'], {}), "('VariantTuple', VARIANT_FIELDS)\n", (684, 716), False, 'import collections\n'), ((734, 819), 'collections.namedtuple', 'collections.namedtuple', (['"""VariantTriplet"""', "['variant_id', 'gene_symbol', 's... |
from stack_and_queue import __version__
import pytest
from stack_and_queue.stack_and_queue import Stack , Queue
def test_version():
assert __version__ == '0.1.0'
def test_push_onto_a_stack():
node = Stack()
node.push(1)
excepted =1
actual = node.top.data
assert excepted == actual
def test_p... | [
"stack_and_queue.stack_and_queue.Queue",
"stack_and_queue.stack_and_queue.Stack"
] | [((211, 218), 'stack_and_queue.stack_and_queue.Stack', 'Stack', ([], {}), '()\n', (216, 218), False, 'from stack_and_queue.stack_and_queue import Stack, Queue\n'), ((367, 374), 'stack_and_queue.stack_and_queue.Stack', 'Stack', ([], {}), '()\n', (372, 374), False, 'from stack_and_queue.stack_and_queue import Stack, Queu... |
# Time: O(n * l^2), it also takes O(l) to make the substring w[:i]+w[i+1:]
# Space: O(n * l)
# 1048 weekly contest 137 5/18/2019
# Given a list of words, each word consists of English lowercase letters.
#
# Let's say word1 is a predecessor of word2 if and only if we can add exactly one letter anywhere in word1
# to m... | [
"collections.defaultdict"
] | [((965, 993), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (988, 993), False, 'import collections\n')] |
#!/usr/bin/python
# Script to commit and push to a remote repository using gitPython package
# Usage: python commitAndPushToRemote.py
# Author: <NAME>
# Date: 18 July 2019
# Version: v0.0.2 (23 August 2019)
# NOTE: Repository should already exist and have a remote named 'origin' configured
from git import Repo # Impor... | [
"git.Repo",
"pathlib.Path"
] | [((580, 624), 'pathlib.Path', 'Path', (['"""C:\\\\Users\\\\USERNAME\\\\test_repository"""'], {}), "('C:\\\\Users\\\\USERNAME\\\\test_repository')\n", (584, 624), False, 'from pathlib import Path\n'), ((761, 781), 'git.Repo', 'Repo', (['source_git_dir'], {}), '(source_git_dir)\n', (765, 781), False, 'from git import Rep... |
import os
class Configuration:
DEBUG = True
PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
DATABASE = os.path.join(PROJECT_ROOT, 'tmp', 'flask_test.db')
SQLALCHEMY_DATABASE_URI = f'sqlite:///{DATABASE}'
SQLALCHEMY_TRACK_MODIFICATIONS = True
| [
"os.path.realpath",
"os.path.join"
] | [((128, 178), 'os.path.join', 'os.path.join', (['PROJECT_ROOT', '"""tmp"""', '"""flask_test.db"""'], {}), "(PROJECT_ROOT, 'tmp', 'flask_test.db')\n", (140, 178), False, 'import os\n'), ((85, 111), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (101, 111), False, 'import os\n')] |
# Temporary version of simplejson in a single file to get around older packaging
# restrictions in previous releases of Access Grid 2.
# ---------------------------------------------------------------------------------------
# simplejson/scanner.py
"""
Iterator based sre token scanner
"""
import sre_parse, sre_compi... | [
"sre_parse.SubPattern",
"re.compile",
"struct.unpack",
"sre_compile.compile",
"sre_parse.parse",
"warnings.warn",
"sre_parse.Pattern"
] | [((2538, 2585), 're.compile', 're.compile', (['"""[\\\\x00-\\\\x19\\\\\\\\"\\\\b\\\\f\\\\n\\\\r\\\\t]"""'], {}), '(\'[\\\\x00-\\\\x19\\\\\\\\"\\\\b\\\\f\\\\n\\\\r\\\\t]\')\n', (2548, 2585), False, 'import re\n'), ((2593, 2626), 're.compile', 're.compile', (['"""([\\\\\\\\"/]|[^\\\\ -~])"""'], {}), '(\'([\\\\\\\\"/]|[^\... |
#!/usr/bin/python3
# wucheng August 8, 2019
# sockmetrics.py --- collect sock communication info among hosts
# version 0.9
# two methods to output results:
# 1. to screen
# 2. to TDengine, a kind of structured time series db
# please run ./sockmetrics -h to get help
# This program bases on https://github.com/ioviso... | [
"collections.namedtuple",
"argparse.ArgumentParser",
"socket.inet_ntop",
"argparse.ArgumentTypeError",
"taos.connect",
"time.sleep",
"struct.pack",
"datetime.datetime.now",
"collections.defaultdict",
"json.load",
"os.stat",
"bcc.BPF"
] | [((1354, 1517), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Summarize Sock send/recv throughput by host"""', 'formatter_class': 'argparse.RawDescriptionHelpFormatter', 'epilog': 'examples'}), "(description=\n 'Summarize Sock send/recv throughput by host', formatter_class=argparse\n... |
#!/usr/bin/env python3
import argparse
import csv
from logging import error, warning
import requests
import urllib3
import act
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def parseargs():
""" Parse arguments """
parser = argparse.ArgumentParser(
description='Get Threat Acto... | [
"argparse.ArgumentParser",
"logging.warning",
"requests.get",
"urllib3.disable_warnings",
"logging.error",
"act.Act"
] | [((131, 198), 'urllib3.disable_warnings', 'urllib3.disable_warnings', (['urllib3.exceptions.InsecureRequestWarning'], {}), '(urllib3.exceptions.InsecureRequestWarning)\n', (155, 198), False, 'import urllib3\n'), ((259, 329), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Get Threat Actor... |
"""
`icclim.models.frequency` wraps the concept of pandas frequency in order to resample
time series. `slice_mode` paramater of `icclim.index` is always converted to a
`Frequency`.
"""
import datetime
from enum import Enum
from typing import Any, Callable, List, Optional, Tuple, Union
import cftime
impor... | [
"xarray.coding.cftime_offsets.to_offset",
"xarray.core.dataarray.DataArray",
"numpy.unique",
"pandas.Timedelta",
"xarray.concat",
"pandas.tseries.frequencies.to_offset",
"cftime.datetime",
"icclim.icclim_exceptions.InvalidIcclimArgumentError",
"datetime.timedelta",
"pandas.to_datetime"
] | [((7101, 7163), 'icclim.icclim_exceptions.InvalidIcclimArgumentError', 'InvalidIcclimArgumentError', (['f"""Unknown frequency {slice_mode}."""'], {}), "(f'Unknown frequency {slice_mode}.')\n", (7127, 7163), False, 'from icclim.icclim_exceptions import InvalidIcclimArgumentError\n'), ((1198, 1224), 'numpy.unique', 'np.u... |
# -*- coding: utf-8 -*-
# this file is generated by gen_kdata_schema function, dont't change it
from sqlalchemy.ext.declarative import declarative_base
from zvt.api.data_type import Region, Provider, EntityType
from zvt.domain.quotes import StockKdataCommon
from zvt.contract.register import register_schema
KdataBase ... | [
"zvt.contract.register.register_schema",
"sqlalchemy.ext.declarative.declarative_base"
] | [((322, 340), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (338, 340), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((633, 870), 'zvt.contract.register.register_schema', 'register_schema', ([], {'regions': '[Region.CHN, Region.US]', 'providers': '{Region.C... |
"""
Test grades.py
"""
# Standard library imports
from unittest.mock import patch
# Third-party library imports
import pytest
class TestDataIsRetrievedCorrectly:
@staticmethod
def test_count_finished_modules(local_grades):
with patch.dict(
local_grades.data,
{
... | [
"pytest.mark.parametrize",
"unittest.mock.patch.dict"
] | [((22455, 22661), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""num_credits,exp_percentage"""', '[(-100, -1), (-2, -1), (-1, -1), (0, 0), (15, 4.17), (30, 8.33), (60, 16.67\n ), (135, 37.5), (240, 66.67), (300, 83.33), (360, 100), (375, 100)]'], {}), "('num_credits,exp_percentage', [(-100, -1), (-2, -1... |
###########################################################################
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/l... | [
"starthinker.util.configuration.Configuration",
"textwrap.dedent",
"starthinker.task.fred.run.fred"
] | [((1909, 2178), 'starthinker.task.fred.run.fred', 'fred', (['config', "{'auth': auth, 'api_key': fred_api_key, 'frequency': fred_frequency,\n 'series': [{'series_id': fred_series_id, 'units': fred_units,\n 'aggregation_method': fred_aggregation_method}], 'out': {'bigquery': {\n 'project': project, 'dataset': d... |
"""Day 10: Monitoring Station"""
from collections import defaultdict, deque
from functools import partial
from math import atan2, gcd, sqrt
from typing import DefaultDict, Deque, Iterable, Iterator, List, NamedTuple, Set, Tuple
import pytest
import aoc
DAY = 10
class Location(NamedTuple):
across: int
down:... | [
"aoc.format_solution",
"math.sqrt",
"functools.partial",
"collections.defaultdict",
"aoc.load_puzzle_input",
"math.atan2"
] | [((3366, 3423), 'math.sqrt', 'sqrt', (['((b.across - a.across) ** 2 + (b.down - a.down) ** 2)'], {}), '((b.across - a.across) ** 2 + (b.down - a.down) ** 2)\n', (3370, 3423), False, 'from math import atan2, gcd, sqrt\n'), ((3745, 3784), 'functools.partial', 'partial', (['direction_and_distance', 'centre'], {}), '(direc... |
from phue import Bridge
import time
from secrets import hueIP
bridge = Bridge(hueIP)
try:
# If the app is not registered and the button is not pressed, press the button and call connect() (this only needs to be run a single time)
bridge.connect()
except Exception:
print("Press the Hue Bridge button to co... | [
"phue.Bridge",
"time.sleep"
] | [((72, 85), 'phue.Bridge', 'Bridge', (['hueIP'], {}), '(hueIP)\n', (78, 85), False, 'from phue import Bridge\n'), ((1254, 1272), 'time.sleep', 'time.sleep', (['(0.0001)'], {}), '(0.0001)\n', (1264, 1272), False, 'import time\n'), ((1669, 1686), 'time.sleep', 'time.sleep', (['(0.125)'], {}), '(0.125)\n', (1679, 1686), F... |
# import time
from flask import Flask,redirect
from .TeamAssigner import assignTeam
from .CandidateAssigner import assignCandidate
app = Flask(__name__)
@app.route("/executeAlgo")
def execute_algo():
assignTeam()
return redirect("http://localhost:3000/team_match_success")
# return {
# "msg": "succ... | [
"flask.redirect",
"flask.Flask"
] | [((137, 152), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (142, 152), False, 'from flask import Flask, redirect\n'), ((230, 282), 'flask.redirect', 'redirect', (['"""http://localhost:3000/team_match_success"""'], {}), "('http://localhost:3000/team_match_success')\n", (238, 282), False, 'from flask impor... |
from typing import Optional
import pandas as pd
from episuite import data
class GoogleMobility:
"""This is a class implementing a client for the Google
Community Mobility Reports.
.. seealso::
`Google Community Mobility Report <https://www.google.com/covid19/mobility/>`_
Community M... | [
"episuite.data.load_from_cache",
"pandas.read_csv",
"pandas.concat"
] | [((1384, 1530), 'episuite.data.load_from_cache', 'data.load_from_cache', (['self.report_url', '"""google_mobility.csv"""', '"""Google Mobility Report"""'], {'show_progress': 'show_progress', 'invalidate': '(not cache)'}), "(self.report_url, 'google_mobility.csv',\n 'Google Mobility Report', show_progress=show_progre... |
from datetime import date, datetime
from decimal import Decimal
from enum import Enum
from typing import Union
from dacite import from_dict, Config
from konduto import KONDUTO_DOMAIN
from konduto.api.clients import KondutoHttpClient
from konduto.api.resources.konduto_order_status import KondutoOrderStatus
from kondut... | [
"konduto.api.resources.konduto_order_status.KondutoOrderStatus.from_string",
"konduto.api.resources.response.konduto_order_response.KondutoRecommendation.from_string",
"dacite.Config",
"konduto.infrastructure.either.Right"
] | [((1172, 1239), 'konduto.api.resources.response.konduto_order_response.KondutoRecommendation.from_string', 'KondutoRecommendation.from_string', (["response_order['recommendation']"], {}), "(response_order['recommendation'])\n", (1205, 1239), False, 'from konduto.api.resources.response.konduto_order_response import Kond... |
# coding=utf8
"""
@author: <NAME>
@date: 09/26/2019
@code description: It is a Python3 file to implement cosine similarity with TF-IDF and Word Embedding methods.
"""
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise im... | [
"numpy.mean",
"nltk.corpus.stopwords.words",
"sklearn.metrics.pairwise.cosine_similarity",
"nltk.stem.WordNetLemmatizer",
"numpy.array",
"sklearn.feature_extraction.text.TfidfVectorizer"
] | [((1062, 1090), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['vec', 'args'], {}), '(vec, args)\n', (1079, 1090), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((4364, 4381), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {}), '()\n', (4379, 4381)... |
# -*- coding: utf-8 -*-
"""
This file contains implementations of the functions used to train a CNN model:
train_cnn - Function used to facilitate the training of the Convolutinal Neural Network model.
test_cnn - Function used to facilitate the testing of the Convolutinal Neural Network model.
"""
# Built-i... | [
"model.Classifier",
"torch.nn.functional.softmax",
"numpy.mean",
"torch.cuda.amp.GradScaler",
"torch.optim.lr_scheduler.CyclicLR",
"torch.cuda.amp.autocast",
"utils.log",
"torch.nn.functional.log_softmax",
"time.time",
"torch.cat",
"torch.device",
"os.makedirs",
"torch.stack",
"os.path.joi... | [((1630, 1653), 'dataset.get_datasets', 'get_datasets', (['arguments'], {}), '(arguments)\n', (1642, 1653), False, 'from dataset import get_datasets\n'), ((1748, 1892), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'batch_size': 'arguments.batch_size', 'shuffle': '(True)', 'num_workers': 'arguments.dat... |
import json
import logging
import binascii
from hashlib import sha256
from string import hexdigits
from torba.client.baseaccount import BaseAccount
from torba.client.basetransaction import TXORef
log = logging.getLogger(__name__)
def validate_claim_id(claim_id):
if not len(claim_id) == 40:
raise Except... | [
"logging.getLogger",
"json.dumps",
"binascii.unhexlify"
] | [((205, 232), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (222, 232), False, 'import logging\n'), ((6684, 6713), 'json.dumps', 'json.dumps', (['results'], {'indent': '(2)'}), '(results, indent=2)\n', (6694, 6713), False, 'import json\n'), ((2247, 2287), 'binascii.unhexlify', 'binascii.... |
"""Unit Tests for Region model"""
import pytest
from altaudit.models import Region
def test_create_region_table(db):
assert db.has_table('regions')
def test_add_region(db_session):
us = Region(name='US')
db_session.add(us)
assert us == db_session.query(Region).filter(Region.name=='US').first()
... | [
"altaudit.models.Region"
] | [((197, 214), 'altaudit.models.Region', 'Region', ([], {'name': '"""US"""'}), "(name='US')\n", (203, 214), False, 'from altaudit.models import Region\n')] |
from bs4 import BeautifulSoup as bs
from .forecast import Forecast
from .day import Day
def parsetemp(t):
return int(t.find(class_="wob_t").text)
def parseday(d):
s=bs(str(d),'html.parser')
dayname=s.find(class_="QrNVmd Z1VzSb")['aria-label']
desc=s.find(class_="DxhUm").img['alt']
tmps=bs(str(s.find... | [
"bs4.BeautifulSoup"
] | [((544, 564), 'bs4.BeautifulSoup', 'bs', (['d', '"""html.parser"""'], {}), "(d, 'html.parser')\n", (546, 564), True, 'from bs4 import BeautifulSoup as bs\n')] |
import os
import re
import time
import shutil
from tempfile import mkdtemp
import operator
from collections.abc import Mapping
from pathlib import Path
import datetime
from .log import Handle
logger = Handle(__name__)
_FLAG_FIRST = object()
class Timewith:
def __init__(self, name=""):
"""Timewith contex... | [
"shutil.copyfileobj",
"pathlib.Path",
"datetime.datetime.now",
"tempfile.mkdtemp",
"time.time"
] | [((1248, 1270), 'tempfile.mkdtemp', 'mkdtemp', ([], {'suffix': 'suffix'}), '(suffix=suffix)\n', (1255, 1270), False, 'from tempfile import mkdtemp\n'), ((1282, 1297), 'pathlib.Path', 'Path', (['directory'], {}), '(directory)\n', (1286, 1297), False, 'from pathlib import Path\n'), ((3782, 3791), 'pathlib.Path', 'Path', ... |
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import unittest.mock as mock
from zsl.service.service import SessionFactory
from zsl.testing.db import TestSessionFactory as DbTestTestSessionFactory
from zsl.utils.injection_helper import bind
def mock_db_sess... | [
"unittest.mock.MagicMock",
"zsl.utils.injection_helper.bind"
] | [((343, 359), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (357, 359), True, 'import unittest.mock as mock\n'), ((607, 650), 'zsl.utils.injection_helper.bind', 'bind', (['SessionFactory'], {'to': 'TestSessionFactory'}), '(SessionFactory, to=TestSessionFactory)\n', (611, 650), False, 'from zsl.utils.in... |
"""
定时任务子模块
# 会初始化一个定时任务句柄
提供对定时任务的基本操作
-- 这里采用额外开启一个线程来控制整个定时任务模块
-- 考虑到多核cpu和一些 cpu密集型程序,
-- 这里采用 多线程的方式执行定时任务
"""
from apscheduler.util import obj_to_ref, utc_timestamp_to_datetime
from pymysql import IntegrityError
from bspider.core.api import BaseService, Conflict, PostSuccess, PatchSuccess, DeleteSuccess, GetSu... | [
"bspider.master.dao.CronDao",
"bspider.core.api.DeleteSuccess",
"bspider.master.log.info",
"bspider.core.api.ParameterException",
"bspider.master.log.error",
"bspider.core.api.NotFound",
"apscheduler.util.utc_timestamp_to_datetime",
"bspider.core.api.PostSuccess",
"apscheduler.util.obj_to_ref",
"b... | [((569, 578), 'bspider.master.dao.CronDao', 'CronDao', ([], {}), '()\n', (576, 578), False, 'from bspider.master.dao import CronDao\n'), ((693, 736), 'bspider.utils.tools.get_crontab_next_run_time', 'get_crontab_next_run_time', (['trigger', 'self.tz'], {}), '(trigger, self.tz)\n', (718, 736), False, 'from bspider.utils... |
import torch.nn as nn
import torch.nn.functional as F
from im2mesh.layers import (
ResnetBlockFC, CResnetBlockConv1d,
CBatchNorm1d, CBatchNorm1d_legacy,
ResnetBlockConv1d
)
import torch
class FusionModule(nn.Module):
''' FusionModule class.
Args:
dim (int): input dimension
c_dim (i... | [
"torch.nn.Conv1d",
"torch.nn.ReLU",
"torch.nn.functional.leaky_relu",
"torch.nn.InstanceNorm1d",
"im2mesh.layers.ResnetBlockFC",
"torch.nn.Linear",
"torch.nn.functional.softmax",
"torch.cat"
] | [((781, 808), 'torch.nn.Linear', 'nn.Linear', (['dim', 'hidden_size'], {}), '(dim, hidden_size)\n', (790, 808), True, 'import torch.nn as nn\n'), ((831, 857), 'im2mesh.layers.ResnetBlockFC', 'ResnetBlockFC', (['hidden_size'], {}), '(hidden_size)\n', (844, 857), False, 'from im2mesh.layers import ResnetBlockFC, CResnetB... |
import unittest
from unittest.mock import *
from sample.checker import Checker
class TestChecker(unittest.TestCase):
def setUp(self):
self.temp = Checker()
def test_checker_before(self):
file = 'file.wav'
#prepare mock
self.temp.temp.getTime = Mock(name = 'getTime')
s... | [
"sample.checker.Checker"
] | [((161, 170), 'sample.checker.Checker', 'Checker', ([], {}), '()\n', (168, 170), False, 'from sample.checker import Checker\n')] |
# Generated by Django 2.0.7 on 2018-09-21 11:24
import django.db.models.deletion
import enumfields.fields
from django.conf import settings
from django.db import migrations, models
import campaigns.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dep... | [
"django.db.models.DateField",
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.PositiveIntegerField",
"django.db.models.ImageField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
... | [((296, 353), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (327, 353), False, 'from django.db import migrations, models\n'), ((617, 710), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)... |
'''
Module that is the entry point into the game
'''
from SnakesVsLadder.src import game
game.main() | [
"SnakesVsLadder.src.game.main"
] | [((90, 101), 'SnakesVsLadder.src.game.main', 'game.main', ([], {}), '()\n', (99, 101), False, 'from SnakesVsLadder.src import game\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 27 12:47:00 2017
@author: sakurai
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import AffinityPropagation
from sklearn.metrics import f1_score
from sklearn.metrics import normalized_mutual_info_score
from sklearn.preprocessing import LabelE... | [
"numpy.random.normal",
"sklearn.preprocessing.LabelEncoder",
"sklearn.cluster.AffinityPropagation",
"numpy.vstack",
"numpy.random.uniform",
"sklearn.metrics.normalized_mutual_info_score",
"numpy.load",
"matplotlib.pyplot.show"
] | [((4137, 4159), 'numpy.load', 'np.load', (['"""y_train.npy"""'], {}), "('y_train.npy')\n", (4144, 4159), True, 'import numpy as np\n'), ((4218, 4239), 'numpy.load', 'np.load', (['"""y_test.npy"""'], {}), "('y_test.npy')\n", (4225, 4239), True, 'import numpy as np\n'), ((4757, 4767), 'matplotlib.pyplot.show', 'plt.show'... |
import sys
sys.path.append('../../')
import keras2caffe
DATA_DIR='../../data/'
import caffe
import cv2
import numpy as np
import sys
sys.path.append('/media/toshiba_ml/models/keras-models/keras-squeezenet')
from keras_squeezenet import SqueezeNet
#TensorFlow backend uses all GPU memory by default, so we need limit... | [
"keras_squeezenet.SqueezeNet",
"tensorflow.Session",
"caffe.set_mode_gpu",
"keras2caffe.convert",
"numpy.argmax",
"numpy.array",
"caffe.Net",
"tensorflow.ConfigProto",
"cv2.resize",
"sys.path.append",
"cv2.imread"
] | [((11, 36), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (26, 36), False, 'import sys\n'), ((136, 209), 'sys.path.append', 'sys.path.append', (['"""/media/toshiba_ml/models/keras-models/keras-squeezenet"""'], {}), "('/media/toshiba_ml/models/keras-models/keras-squeezenet')\n", (151, 209... |
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from florence.models import User, Lib, Intlib
from django.core import serializers
from rest_framework.renderers import JSONRenderer
from django.contrib.postgres.search import SearchQuery, SearchRank, SearchVecto... | [
"django.shortcuts.render",
"django.http.JsonResponse",
"django.contrib.postgres.search.SearchQuery",
"django.contrib.postgres.search.SearchRank",
"florence.serializers.IntlibSearchSerializer",
"florence.models.Lib.objects.get",
"django.contrib.postgres.search.SearchVector"
] | [((449, 487), 'django.shortcuts.render', 'render', (['request', '"""florence/intro.html"""'], {}), "(request, 'florence/intro.html')\n", (455, 487), False, 'from django.shortcuts import render\n'), ((524, 566), 'django.shortcuts.render', 'render', (['request', '"""florence/dashboard.html"""'], {}), "(request, 'florence... |
import random
from flask_testing import TestCase
from url_shortener.app import app
from url_shortener.app import db
class URLShortenerTests(TestCase):
def setUp(self):
db.create_all()
db.session.commit()
def create_app(self):
app.config.from_object('url_shortener.config.TestingConfig... | [
"url_shortener.app.db.create_all",
"url_shortener.app.db.session.commit",
"url_shortener.app.db.session.remove",
"url_shortener.app.db.drop_all",
"random.seed",
"url_shortener.app.app.config.from_object"
] | [((183, 198), 'url_shortener.app.db.create_all', 'db.create_all', ([], {}), '()\n', (196, 198), False, 'from url_shortener.app import db\n'), ((207, 226), 'url_shortener.app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (224, 226), False, 'from url_shortener.app import db\n'), ((262, 322), 'url_shortener.a... |
import sys
from dataclasses import dataclass
from unittest import TestCase
from custom_imports.importer import Importer, SimpleFinder, SimpleLoader
@dataclass
class SimpleLocator:
fullname: str
@dataclass
class SimpleModule:
value: str = ""
def set_value(self, locator):
self.value = locator.fu... | [
"custom_imports.importer.Importer",
"custom_imports.importer.SimpleLoader"
] | [((520, 594), 'custom_imports.importer.SimpleLoader', 'SimpleLoader', ([], {'module_type': 'SimpleModule', 'load_module': 'SimpleModule.set_value'}), '(module_type=SimpleModule, load_module=SimpleModule.set_value)\n', (532, 594), False, 'from custom_imports.importer import Importer, SimpleFinder, SimpleLoader\n'), ((64... |
import unittest
from parameterized import parameterized as p
from solns.removeDuplicatesFromSortedArr.removeDuplicatesFromSortedArr import *
class Test_RemoveDuplicatesFromSortedArr(unittest.TestCase):
@p.expand([
[[1,1,2],2],[[],0],[[1],1],[[1,2],2],
[[0,0,1,1,1,2,2,3,3,4],5],
[[0,0,0,2,2,... | [
"parameterized.parameterized.expand"
] | [((208, 342), 'parameterized.parameterized.expand', 'p.expand', (['[[[1, 1, 2], 2], [[], 0], [[1], 1], [[1, 2], 2], [[0, 0, 1, 1, 1, 2, 2, 3, \n 3, 4], 5], [[0, 0, 0, 2, 2, 2, 4, 4, 4], 3]]'], {}), '([[[1, 1, 2], 2], [[], 0], [[1], 1], [[1, 2], 2], [[0, 0, 1, 1, 1, \n 2, 2, 3, 3, 4], 5], [[0, 0, 0, 2, 2, 2, 4, 4,... |
"""
Test cases for the regi0.geographic.utils.get_nearest_year function.
"""
import numpy as np
import pandas as pd
import pytest
from regi0.geographic.utils import get_nearest_year
@pytest.fixture()
def dates():
return pd.Series(["17/08/1945", np.nan, "21/09/2011", "01/01/1984", "17/04/2009"])
@pytest.fixture... | [
"pytest.fixture",
"pandas.Series",
"pandas.testing.assert_series_equal",
"regi0.geographic.utils.get_nearest_year"
] | [((186, 202), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (200, 202), False, 'import pytest\n'), ((306, 322), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (320, 322), False, 'import pytest\n'), ((227, 302), 'pandas.Series', 'pd.Series', (["['17/08/1945', np.nan, '21/09/2011', '01/01/1984', '17/04/200... |
# importing necessary packages
from keras.models import load_model
import argparse
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from preprocessing.preprocessing import AspectAwarePreprocessor, ImageToArrayPreprocessor, SimplePreprocessor
from preprocessing.datas... | [
"keras.models.load_model",
"preprocessing.datasets.SimpleDatasetLoader",
"argparse.ArgumentParser",
"keras.callbacks.ModelCheckpoint",
"sklearn.model_selection.train_test_split",
"preprocessing.preprocessing.ImageToArrayPreprocessor",
"preprocessing.preprocessing.AspectAwarePreprocessor",
"glob.glob"
... | [((684, 709), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (707, 709), False, 'import argparse\n'), ((882, 906), 'keras.models.load_model', 'load_model', (["args['path']"], {}), "(args['path'])\n", (892, 906), False, 'from keras.models import load_model\n'), ((997, 1061), 'glob.glob', 'glob.g... |
from collections import OrderedDict
from typing import Any, List, Optional
from pydantic import BaseModel, Field, HttpUrl
class ProductMetadata(BaseModel):
name: str
short_code: str
brand: str
sku: Optional[str]
url: HttpUrl
class ProductImage(BaseModel):
name: str
alt: str
suffix: ... | [
"pydantic.Field"
] | [((647, 674), 'pydantic.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (652, 674), False, 'from pydantic import BaseModel, Field, HttpUrl\n')] |
from past.builtins import basestring
from acc_utils.errors import _assert
from .operator_base import OperatorBase
class ConvPoolOpBase(OperatorBase):
def __init__(
self,
kernel=1,
stride=1,
pad=0,
**kwargs
):
super(ConvPoolOpBase, self).__i... | [
"acc_utils.errors._assert"
] | [((1140, 1188), 'acc_utils.errors._assert', '_assert', (['(False)', '"""not supported type for padding"""'], {}), "(False, 'not supported type for padding')\n", (1147, 1188), False, 'from acc_utils.errors import _assert\n')] |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicab... | [
"cubert.unified_tokenizer.check_mappings",
"cubert.unified_tokenizer.subtokenize_agnostic_tokens_in_place",
"re.compile",
"six.add_metaclass",
"six.ensure_text",
"cubert.unified_tokenizer.flatten_subtoken_lists",
"absl.logging.warning",
"keyword.iskeyword",
"tokenize.EXACT_TOKEN_TYPES.keys",
"cube... | [((1245, 1275), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (1262, 1275), False, 'import six\n'), ((7902, 7944), 're.compile', 're.compile', (["('(' + tokenize.Number + ')\\\\Z')"], {}), "('(' + tokenize.Number + ')\\\\Z')\n", (7912, 7944), False, 'import re\n'), ((7996, 8038), '... |
#!/usr/bin/env python3
# TODO: Rewrite to remove unnecessary features; this program will only ever
# handle metadata.json version numbers.
"""
Increment a version number in a JSON file.
The JSON must have a top-level "version" key, as either a float or an int.
Usage:
python increment.py metadata.json -i 0.1 -O
... | [
"os.path.samefile",
"os.path.exists",
"json.loads",
"argparse.ArgumentParser",
"json.dumps",
"os.path.join",
"sys.stderr.write",
"os.path.basename",
"sys.stdin.read"
] | [((602, 630), 'sys.stderr.write', 'sys.stderr.write', (['os.linesep'], {}), '(os.linesep)\n', (618, 630), False, 'import sys\n'), ((4152, 4175), 'os.path.basename', 'os.path.basename', (['fpath'], {}), '(fpath)\n', (4168, 4175), False, 'import os\n'), ((4320, 4347), 'os.path.join', 'os.path.join', (['bkdir', 'bkname'],... |
import unittest
from app.models import News_Source
class News_sourcesTest(unittest.TestCase):
'''
Test Class to test the behaviour of the New Source Class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.news_source = News_Source("abc-new... | [
"app.models.News_Source"
] | [((300, 435), 'app.models.News_Source', 'News_Source', (['"""abc-news"""', '"""ABC News"""', '"""Your trusted source for breaking news and analysis"""', '"""en"""', '"""https://abcnews.go.com"""', '"""us"""'], {}), "('abc-news', 'ABC News',\n 'Your trusted source for breaking news and analysis', 'en',\n 'https://... |
import re
from services import Boto3Service
from nodes import Node, ServiceNodes
from boto3_docs_parser import Boto3DocsParser, boto3_session, parser
import os
import boto3
from pprint import pprint
from collections import namedtuple
from itertools import combinations
from services import is_method_attr_in_list, is_req... | [
"nodes.ServiceNodes",
"services.is_method_attr_in_list",
"itertools.combinations",
"services.Boto3Service",
"boto3_docs_parser.parser.get_all_available_services"
] | [((517, 552), 'boto3_docs_parser.parser.get_all_available_services', 'parser.get_all_available_services', ([], {}), '()\n', (550, 552), False, 'from boto3_docs_parser import Boto3DocsParser, boto3_session, parser\n'), ((934, 975), 'services.is_method_attr_in_list', 'is_method_attr_in_list', (['shorty', 'long_list'], {}... |
# -*- coding: utf-8 -*-
import argparse
import re
import subprocess
import sys
from typing import List
from .fork_release import create_develop_if_not_exists
def main():
args = parse_args(sys.argv[1:])
branch_name = f"feature-{args.feature_name}"
cp = subprocess.run(["git", "branch"], stdout=subprocess.... | [
"subprocess.run",
"argparse.ArgumentParser"
] | [((268, 325), 'subprocess.run', 'subprocess.run', (["['git', 'branch']"], {'stdout': 'subprocess.PIPE'}), "(['git', 'branch'], stdout=subprocess.PIPE)\n", (282, 325), False, 'import subprocess\n'), ((502, 548), 'subprocess.run', 'subprocess.run', (["['git', 'checkout', 'develop']"], {}), "(['git', 'checkout', 'develop'... |
from src.utils.position import Position
from src.exceptions.position_exception import PositionException
import unittest
class TestPosition(unittest.TestCase):
def test_position_creation(self):
pos = Position(line=3, column=10)
self.assertEqual(pos.line, 3, "Line doesn't match")
self.asser... | [
"src.utils.position.Position"
] | [((214, 241), 'src.utils.position.Position', 'Position', ([], {'line': '(3)', 'column': '(10)'}), '(line=3, column=10)\n', (222, 241), False, 'from src.utils.position import Position\n'), ((427, 453), 'src.utils.position.Position', 'Position', ([], {'line': '(1)', 'column': '(1)'}), '(line=1, column=1)\n', (435, 453), ... |
from .task import BaseQueueTask as _BaseQueueTask
from .features import BaseFeatureAdapterFactory as _BaseFeatureAdapterFactory
from .adapter.collection import BaseList as _BaseList
from .result import MRResult as _MRResult
from ..mode import RunningMode as _RunningMode
from ..types import MRTasks as _MRTasks
import mu... | [
"multirunnable._utils.get_cls_name"
] | [((1009, 1047), 'multirunnable._utils.get_cls_name', '_utils.get_cls_name', ([], {'cls_str': '__cls_str'}), '(cls_str=__cls_str)\n', (1028, 1047), True, 'import multirunnable._utils as _utils\n')] |
from telebot import types as t
class Buttons:
botoes = t.ReplyKeyboardMarkup(row_width=1)
botao1 = t.KeyboardButton('Dados recentes')
botao2 = t.KeyboardButton('Dados por estado')
botao3 = t.KeyboardButton('Dados por cidade')
botoes.add(botao1, botao2, botao3)
class Estados:
estados = t.Inli... | [
"telebot.types.InlineKeyboardButton",
"telebot.types.ReplyKeyboardMarkup",
"telebot.types.KeyboardButton",
"telebot.types.InlineKeyboardMarkup"
] | [((61, 95), 'telebot.types.ReplyKeyboardMarkup', 't.ReplyKeyboardMarkup', ([], {'row_width': '(1)'}), '(row_width=1)\n', (82, 95), True, 'from telebot import types as t\n'), ((109, 143), 'telebot.types.KeyboardButton', 't.KeyboardButton', (['"""Dados recentes"""'], {}), "('Dados recentes')\n", (125, 143), True, 'from t... |
from random import sample
from urllib.parse import urljoin
import click
import requests
from .settings import API_KEY, SOURCES, TOP_HEAD
SOURCE_URL = urljoin(SOURCES, '?apiKey={}'.format(API_KEY))
# TODO: Try a deliberate failure to see how the remote API responds
# so that your code knows how to catch those except... | [
"click.Choice",
"random.sample",
"click.command",
"requests.get"
] | [((346, 370), 'requests.get', 'requests.get', (['SOURCE_URL'], {}), '(SOURCE_URL)\n', (358, 370), False, 'import requests\n'), ((613, 628), 'click.command', 'click.command', ([], {}), '()\n', (626, 628), False, 'import click\n'), ((587, 609), 'random.sample', 'sample', (['source_list', '(4)'], {}), '(source_list, 4)\n'... |
###This was mainly made to test the pandas_datareader library.
###Libraries
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import pandas_datareader as web
import datetime
###Base info
start = datetime.datetime(2000,1,1)
end = datetime.datet... | [
"datetime.datetime",
"pandas_datareader.DataReader",
"dash.dependencies.Output",
"dash.dependencies.Input",
"datetime.datetime.now",
"dash_html_components.Div",
"dash_core_components.Graph",
"dash_core_components.Input",
"dash.Dash"
] | [((272, 301), 'datetime.datetime', 'datetime.datetime', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (289, 301), False, 'import datetime\n'), ((306, 329), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (327, 329), False, 'import datetime\n'), ((347, 390), 'pandas_datareader.DataReader', 'web.D... |
from flask import Flask, request,redirect, abort,render_template,session,copy_current_request_context
from flask import render_template, flash, redirect, session, url_for, request, g
from flask_login import LoginManager, login_user, current_user, UserMixin
from flask_socketio import SocketIO, emit, send
from flask_uplo... | [
"flask.render_template",
"flask_login.LoginManager",
"flask.request.args.get",
"flask_uploads.UploadSet",
"flask_cors.CORS",
"flask.Flask",
"flask_cors.cross_origin",
"flask_socketio.SocketIO",
"app.db.db",
"flask_socketio.join_room",
"flask_uploads.configure_uploads",
"sys.path.append",
"fl... | [((685, 717), 'sys.path.append', 'sys.path.append', (['"""transfchatbot"""'], {}), "('transfchatbot')\n", (700, 717), False, 'import sys\n'), ((796, 843), 'locale.setlocale', 'locale.setlocale', (['locale.LC_TIME', '"""fr_FR.UTF-8"""'], {}), "(locale.LC_TIME, 'fr_FR.UTF-8')\n", (812, 843), False, 'import locale\n'), ((... |
import configparser
from datetime import datetime
from pyrogram import Client, Filters, Emoji
config = configparser.ConfigParser()
config.read("config.ini")
prefixes = list(config["prefixes"].keys())
chatinfo_message = {"id": f"{Emoji.ID_BUTTON} <b>Id</b>: <code>[%id%]</code>",
"type": f"{Emoji.J... | [
"pyrogram.Filters.user",
"configparser.ConfigParser",
"pyrogram.Filters.command"
] | [((105, 132), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (130, 132), False, 'import configparser\n'), ((2340, 2360), 'pyrogram.Filters.user', 'Filters.user', (['"""self"""'], {}), "('self')\n", (2352, 2360), False, 'from pyrogram import Client, Filters, Emoji\n'), ((2363, 2409), 'pyrogr... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import subprocess
class objImpresoras():
def __init__(self):
self.lista=[]
if os.name == 'nt': #Windows
import win32print
for i in range (1,6):
for p in win32print.EnumPrinters(i):
if not (p[2] in self.lista):
self.lista.append (p[2]... | [
"subprocess.Popen",
"win32print.EnumPrinters"
] | [((345, 403), 'subprocess.Popen', 'subprocess.Popen', (["['lpstat', '-a']"], {'stdout': 'subprocess.PIPE'}), "(['lpstat', '-a'], stdout=subprocess.PIPE)\n", (361, 403), False, 'import subprocess\n'), ((228, 254), 'win32print.EnumPrinters', 'win32print.EnumPrinters', (['i'], {}), '(i)\n', (251, 254), False, 'import win3... |
import argparse
import numpy as np
from scipy import sparse
from scipy.optimize import linprog
import matplotlib.pyplot as plt
import networkx as nx
import torch
import torch.nn as nn
import torch.nn.functional as func
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from torch.utils.data impor... | [
"networkx.draw_networkx_edge_labels",
"torch.nn.MSELoss",
"numpy.array",
"torch.cuda.is_available",
"numpy.genfromtxt",
"networkx.draw_networkx_edges",
"numpy.mean",
"scipy.sparse.lil_matrix",
"argparse.ArgumentParser",
"matplotlib.pyplot.Normalize",
"networkx.DiGraph",
"captum.attr.Integrated... | [((924, 944), 'numpy.full', 'np.full', (['time', 'c_buy'], {}), '(time, c_buy)\n', (931, 944), True, 'import numpy as np\n'), ((959, 977), 'numpy.zeros', 'np.zeros', (['(time * 5)'], {}), '(time * 5)\n', (967, 977), True, 'import numpy as np\n'), ((1453, 1489), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(2 * ti... |