code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# -*- coding: utf-8 -*-
"""
Solace
======
*a multilingual support system*
Solace is a multilingual support system developed at Plurk
for end user support. The application design is heavily
influenced by bulletin boards like phpBB and the new
stackoverflow programming community site.
For more information consult th... | [
"setuptools.setup"
] | [((1275, 1780), 'setuptools.setup', 'setup', ([], {'name': '"""Solace"""', 'version': '"""0.2"""', 'license': '"""BSD"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'description': '"""Multilangual User Support Platform"""', 'long_description': '__doc__', 'packages': "['solace', 'solace.views', 'solace.... |
#!/usr/bin/env python3
import discord
import config
import util
from functools import reduce
from handler import Handler
class ListWordsHandler(Handler):
name = "listwords"
async def message_handler(self, message, jail, bonkbot):
print("Starting listwords handler")
if self.cf.get("list_words_t... | [
"util.is_mentioned",
"util.list_trigger_words"
] | [((367, 402), 'util.is_mentioned', 'util.is_mentioned', (['message', 'bonkbot'], {}), '(message, bonkbot)\n', (384, 402), False, 'import util\n'), ((443, 468), 'util.list_trigger_words', 'util.list_trigger_words', ([], {}), '()\n', (466, 468), False, 'import util\n')] |
#! python3
from sys import argv
from pathlib import Path
from re import compile
from enum import Enum
from inflection import camelize
RE_CLASS = compile(r'^use [a-zA-Z_][a-zA-Z0-9_\\]*;$')
RE_PARAM = compile(r'^[a-zA-Z_][a-zA-Z0-9_]* \$[a-zA-Z_][a-zA-Z0-9_]*,?$')
PARAM_BEGIN = r'function ('
PARAM_END = r') {'
PARAM... | [
"inflection.camelize",
"pathlib.Path",
"re.compile"
] | [((148, 192), 're.compile', 'compile', (['"""^use [a-zA-Z_][a-zA-Z0-9_\\\\\\\\]*;$"""'], {}), "('^use [a-zA-Z_][a-zA-Z0-9_\\\\\\\\]*;$')\n", (155, 192), False, 'from re import compile\n'), ((203, 266), 're.compile', 'compile', (['"""^[a-zA-Z_][a-zA-Z0-9_]* \\\\$[a-zA-Z_][a-zA-Z0-9_]*,?$"""'], {}), "('^[a-zA-Z_][a-zA-Z0... |
import os
import requests
from bouncer import Bouncer
from flask import Flask, url_for, redirect, \
render_template, session, request, Response, \
flash, get_flashed_messages, jsonify
from flask_login import LoginManager, login_required, login_user, \
logout_user, current_user, UserMixin
from oauthlib.oaut... | [
"flask.render_template",
"flask_login.LoginManager",
"flask.request.args.get",
"requests.post",
"oauth2client.client.verify_id_token",
"flask.Flask",
"flask.get_flashed_messages",
"utils.redact_email",
"elasticsearch_dsl.Search",
"utils.decrypt",
"flask.session.pop",
"flask.request.headers.get... | [((2813, 2828), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (2818, 2828), False, 'from flask import Flask, url_for, redirect, render_template, session, request, Response, flash, get_flashed_messages, jsonify\n'), ((2934, 2951), 'flask_login.LoginManager', 'LoginManager', (['app'], {}), '(app)\n', (2946,... |
import io, os, re
from os import path
from setuptools import find_packages
from distutils.core import setup
# pip's single-source version method as described here:
# https://python-packaging-user-guide.readthedocs.io/single_source_version/
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirn... | [
"os.path.dirname",
"setuptools.find_packages",
"re.search"
] | [((519, 592), 're.search', 're.search', (['"""^__version__ = [\'\\\\"]([^\'\\\\"]*)[\'\\\\"]"""', 'version_file', 're.M'], {}), '(\'^__version__ = [\\\'\\\\"]([^\\\'\\\\"]*)[\\\'\\\\"]\', version_file, re.M)\n', (528, 592), False, 'import io, os, re\n'), ((1213, 1228), 'setuptools.find_packages', 'find_packages', ([], ... |
import os
import git
import csv
from datetime import datetime
from progress.bar import Bar
def tagAnalysis(repo: git.Repo, outputDir: str):
print("Analyzing tags")
tagInfo = []
tags = sorted(repo.tags, key=getTaggedDate)
lastTag = None
for tag in Bar('Processing').iter(tags):
... | [
"progress.bar.Bar",
"csv.writer",
"datetime.datetime.fromtimestamp",
"os.path.join"
] | [((1574, 1602), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['date'], {}), '(date)\n', (1596, 1602), False, 'from datetime import datetime\n'), ((950, 978), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (960, 978), False, 'import csv\n'), ((1171, 1199), 'csv.wr... |
""" Configuration variables used in the application.
These variables should be setup as environment variables
in the docker-compose.yml file when launching all the services.
If these environment variables are not present, default values
are asigned to them.
"""
import os
KAFKA_ENDPOINT = os.environ.get('KAFKA_ENDPOI... | [
"os.environ.get"
] | [((292, 324), 'os.environ.get', 'os.environ.get', (['"""KAFKA_ENDPOINT"""'], {}), "('KAFKA_ENDPOINT')\n", (306, 324), False, 'import os\n'), ((365, 394), 'os.environ.get', 'os.environ.get', (['"""INPUT_TOPIC"""'], {}), "('INPUT_TOPIC')\n", (379, 394), False, 'import os\n'), ((428, 459), 'os.environ.get', 'os.environ.ge... |
import numpy as np
import networkx as nx
import argparse
import random
from models.distance import get_dist_func
def get_fitness(solution, initial_node, node_list):
"""
Get fitness of solution encoded by permutation.
Args:
solution (numpy.ndarray): Solution encoded as a permutation
ini... | [
"numpy.ones_like",
"numpy.ceil",
"numpy.mean",
"numpy.random.rand",
"argparse.ArgumentParser",
"numpy.hstack",
"numpy.random.choice",
"numpy.in1d",
"models.distance.get_dist_func",
"numpy.max",
"numpy.sum",
"numpy.empty",
"numpy.min",
"networkx.read_gpickle",
"numpy.zeros_like",
"netwo... | [((601, 636), 'numpy.hstack', 'np.hstack', (['(solution, initial_node)'], {}), '((solution, initial_node))\n', (610, 636), True, 'import numpy as np\n'), ((10915, 11017), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Approximate solution to TSP using ant colony optimization."""'}), "(de... |
# -*- coding: utf-8 -*-
# Copyright © 2021 Wacom Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unle... | [
"uim.codec.parser.uim.UIMParser",
"uim.codec.parser.will.WILL2Parser",
"uim.codec.writer.encoder.encoder_3_1_0.UIMEncoder310",
"io.open"
] | [((886, 897), 'uim.codec.parser.uim.UIMParser', 'UIMParser', ([], {}), '()\n', (895, 897), False, 'from uim.codec.parser.uim import UIMParser\n'), ((2117, 2130), 'uim.codec.parser.will.WILL2Parser', 'WILL2Parser', ([], {}), '()\n', (2128, 2130), False, 'from uim.codec.parser.will import WILL2Parser\n'), ((1070, 1123), ... |
'''
Analytic Hierarchy Process, AHP.
Base on Wasserstein distance
'''
from scipy.stats import wasserstein_distance
from sklearn.decomposition import PCA
import scipy
import numpy as np
import pandas as pd
import sys
import argparse
import os
import glob
import datasets_analysis_module as dam
class idx_analysis(obje... | [
"pandas.read_csv",
"numpy.array",
"numpy.arange",
"os.path.exists",
"sklearn.linear_model.RidgeClassifier",
"numpy.reshape",
"argparse.ArgumentParser",
"sklearn.decomposition.PCA",
"numpy.max",
"os.path.split",
"scipy.stats.wasserstein_distance",
"numpy.vstack",
"numpy.round",
"datasets_an... | [((1918, 1936), 'numpy.round', 'np.round', (['w_dis', '(4)'], {}), '(w_dis, 4)\n', (1926, 1936), True, 'import numpy as np\n'), ((4358, 4388), 'sklearn.linear_model.RidgeClassifier', 'linear_model.RidgeClassifier', ([], {}), '()\n', (4386, 4388), False, 'from sklearn import linear_model\n'), ((4600, 4616), 'sklearn.pre... |
""" Copyright 2016-2022 by Bitmain Technologies Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applica... | [
"copy.deepcopy",
"mxnet.gluon.nn.SymbolBlock",
"mxnet.cpu",
"json.dumps",
"os.path.join",
"os.path.dirname",
"mxnet.sym.var",
"mxnet.sym.load",
"mxnet.nd.array",
"bmnetm.op_support"
] | [((1907, 1941), 'copy.deepcopy', 'copy.deepcopy', (['sub_graph.input_ops'], {}), '(sub_graph.input_ops)\n', (1920, 1941), False, 'import copy\n'), ((2288, 2323), 'copy.deepcopy', 'copy.deepcopy', (['sub_graph.output_ops'], {}), '(sub_graph.output_ops)\n', (2301, 2323), False, 'import copy\n'), ((6852, 6895), 'json.dump... |
# https://atcoder.jp/contests/abc185/tasks/abc185_d
from math import ceil
N, M = map(int, input().split())
if M == 0:
print(1)
exit()
a_arr = list(map(int, input().split()))
a_arr.sort()
blanks = [0]
for i in range(M):
if i == 0:
blanks.append(a_arr[i] - 1)
continue
blanks.append(a_arr[i... | [
"math.ceil"
] | [((499, 516), 'math.ceil', 'ceil', (['(b / minimum)'], {}), '(b / minimum)\n', (503, 516), False, 'from math import ceil\n')] |
"""The implementation of U-Net and FCRN-A models."""
from typing import Tuple
import numpy as np
import torch
from torch import nn
from torchvision.models import resnet
from model_config import DROPOUT_PROB
class UOut(nn.Module):
"""Add random noise to every layer of the net."""
def forward(self, input_te... | [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.cuda.FloatTensor",
"torch.cuda.device",
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.Upsample",
"torch.ones"
] | [((9248, 9289), 'torch.ones', 'torch.ones', (['(1, input_channels, 224, 224)'], {}), '((1, input_channels, 224, 224))\n', (9258, 9289), False, 'import torch\n'), ((8298, 8376), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'filters', 'out_channels': '(1)', 'kernel_size': '(1, 1)', 'bias': '(False)'}), '(in_chann... |
import os
import argparse
import json
from datetime import datetime
import numpy as np
from sklearn.utils.class_weight import compute_class_weight
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import confusion_matrix
from tensorflow import keras
from tensorflow.keras.callbacks import EarlySto... | [
"sklearn.metrics.balanced_accuracy_score",
"bert.load_bert_weights",
"tensorflow.keras.callbacks.EarlyStopping",
"onecycle.OneCycleScheduler",
"tensorflow.keras.layers.Dense",
"amazon.get_reviews_data",
"os.path.exists",
"tensorflow.keras.layers.Input",
"bert.BertModelLayer.from_params",
"tensorfl... | [((627, 652), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (650, 652), False, 'import argparse\n'), ((3962, 4000), 'os.path.join', 'os.path.join', (['log_dir', 'experiment_name'], {}), '(log_dir, experiment_name)\n', (3974, 4000), False, 'import os\n'), ((4012, 4040), 'os.path.join', 'os.path... |
# scene_manager.py
from copy import deepcopy
from . import editor_session
# Recursively updates a dictionary a with b.
# This assumes that b has a structure that is a subset of a
# Returns whether the dictionary a was modified.
def recursive_update(a, b):
modified = False
for k, v in b.items():
if is... | [
"copy.deepcopy"
] | [((5095, 5110), 'copy.deepcopy', 'deepcopy', (['value'], {}), '(value)\n', (5103, 5110), False, 'from copy import deepcopy\n'), ((30687, 30738), 'copy.deepcopy', 'deepcopy', (['changed_instance.value[changed_prop_name]'], {}), '(changed_instance.value[changed_prop_name])\n', (30695, 30738), False, 'from copy import dee... |
#!/usr/bin/python
from pyspark.sql import SparkSession
spark = (
SparkSession.builder.master("yarn")
.appName("bigquery-analytics-avg-temperature")
.getOrCreate()
)
bucket = "01-logistics-backup"
spark.conf.set("temporaryGcsBucket", bucket)
history = (
spark.read.format("bigquery").option("table", "v... | [
"pyspark.sql.SparkSession.builder.master"
] | [((70, 105), 'pyspark.sql.SparkSession.builder.master', 'SparkSession.builder.master', (['"""yarn"""'], {}), "('yarn')\n", (97, 105), False, 'from pyspark.sql import SparkSession\n')] |
# -*- coding: utf-8 -*-
import gensim
import numpy as np
from sklearn.cluster import MiniBatchKMeans
def read_data_batches(path, batch_size=50, minlength=5):
"""
Reading batched texts of given min. length
:param path: path to the text file ``one line -- one normalized sentence''
:return: batches i... | [
"gensim.models.Word2Vec.load",
"sklearn.cluster.MiniBatchKMeans",
"numpy.asarray",
"numpy.stack",
"numpy.zeros",
"numpy.linalg.norm",
"numpy.matrix"
] | [((1577, 1610), 'gensim.models.Word2Vec.load', 'gensim.models.Word2Vec.load', (['path'], {}), '(path)\n', (1604, 1610), False, 'import gensim\n'), ((2729, 2793), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {'n_clusters': 'aspects_count', 'verbose': '(0)', 'n_init': '(100)'}), '(n_clusters=aspects_count, ... |
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring,unused-import,reimported
import io
import pytest # type: ignore
import odata_url_parser.cli as cli
import odata_url_parser.odata_url_parser as oup
def test_main_ok_minimal(capsys):
job = ['does not matter']
report_expected = job[0]
assert cli.m... | [
"odata_url_parser.cli.main"
] | [((315, 328), 'odata_url_parser.cli.main', 'cli.main', (['job'], {}), '(job)\n', (323, 328), True, 'import odata_url_parser.cli as cli\n')] |
#!/usr/bin/env python
import sys,os,commands
from CommonMethods import *
def main():
if len(sys.argv) < 3:
error = "Usage: cpFromCastor fromDir toDir (optional filter)"
exit(error)
user = os.getenv("USER")
castorDir = "/castor/cern.ch/cms/store/caf/user/" + user + "/" + sys.argv[1] + "/"
... | [
"os.getenv"
] | [((212, 229), 'os.getenv', 'os.getenv', (['"""USER"""'], {}), "('USER')\n", (221, 229), False, 'import sys, os, commands\n')] |
#Dependencies, libraries, and imports
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
#SQLalchemy libraries and functions
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import S... | [
"sqlalchemy.func.count",
"sqlalchemy.func.min",
"flask.Flask",
"datetime.datetime.strptime",
"sqlalchemy.ext.automap.automap_base",
"sqlalchemy.create_engine",
"sqlalchemy.orm.Session",
"sqlalchemy.func.max",
"matplotlib.style.use",
"sqlalchemy.func.avg",
"pandas.DataFrame",
"datetime.timedelt... | [((67, 95), 'matplotlib.style.use', 'style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (76, 95), False, 'from matplotlib import style\n'), ((524, 574), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///Resources/hawaii.sqlite"""'], {}), "('sqlite:///Resources/hawaii.sqlite')\n", (537, 574... |
from collections import defaultdict
import functools
import json
import logging
import re
import subprocess
import threading
import time
import os
from .abstract_connection import AbstractConnection, RcloneException
class RcloneConnection(AbstractConnection):
def __init__(self):
self._job_status = default... | [
"subprocess.check_output",
"json.loads",
"subprocess.Popen",
"os.environ.copy",
"time.sleep",
"threading.Event",
"collections.defaultdict",
"functools.partial",
"re.sub",
"threading.Thread",
"logging.error",
"random.randint",
"re.search"
] | [((421, 437), 'collections.defaultdict', 'defaultdict', (['str'], {}), '(str)\n', (432, 437), False, 'from collections import defaultdict\n'), ((469, 485), 'collections.defaultdict', 'defaultdict', (['str'], {}), '(str)\n', (480, 485), False, 'from collections import defaultdict\n'), ((514, 530), 'collections.defaultdi... |
import os
import re
import sys
from cffi import FFI
from pyvx import __backend_version__
mydir = os.path.dirname(os.path.abspath(__file__))
def build(name, openvx_install, default):
pwd = os.getcwd()
os.chdir(os.path.dirname(mydir))
assert name != 'default'
hdr = os.path.join(openvx_install, 'inclu... | [
"pyvx.backend.lib._get_backend_name",
"os.path.exists",
"pyvx.backend.lib._get_backend_version",
"cffi.FFI",
"os.path.join",
"pyvx.__backend_version__.decode",
"pyvx.backend.lib._get_backend_install_path",
"os.getcwd",
"os.path.dirname",
"os.path.abspath",
"re.subn"
] | [((116, 141), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (131, 141), False, 'import os\n'), ((196, 207), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (205, 207), False, 'import os\n'), ((285, 338), 'os.path.join', 'os.path.join', (['openvx_install', '"""include"""', '"""VX"""', '"""vx.h"""... |
import requests
from bs4 import BeautifulSoup
class TrackInfo:
"""
An object containing track information and operations necessary for
scraping the info off of 1001tracklists.com
"""
def __init__(self, url):
self.url = url
self.tracklist_id = self.url.split('tracklist/')[1].split... | [
"bs4.BeautifulSoup",
"requests.get"
] | [((736, 775), 'requests.get', 'requests.get', (['self.url'], {'headers': 'headers'}), '(self.url, headers=headers)\n', (748, 775), False, 'import requests\n'), ((791, 833), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.content', '"""html.parser"""'], {}), "(page.content, 'html.parser')\n", (804, 833), False, 'from bs4 ... |
import os
import sys
import datetime
import logging
import traceback
from extensions import db
from sqlalchemy import exc, func
sys.path.append(os.getcwd())
def handle_exception():
# Get exception information
exception_details = sys.exc_info()
# Rollback the db (so the session doesn't crash)
db.sess... | [
"extensions.db.session.rollback",
"sqlalchemy.func.min",
"traceback.print_tb",
"sqlalchemy.func.max",
"os.getcwd",
"sys.exc_info",
"extensions.db.session.add",
"datetime.datetime.now",
"extensions.db.session.commit",
"extensions.db.session.delete",
"logging.error"
] | [((145, 156), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (154, 156), False, 'import os\n'), ((240, 254), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (252, 254), False, 'import sys\n'), ((313, 334), 'extensions.db.session.rollback', 'db.session.rollback', ([], {}), '()\n', (332, 334), False, 'from extensions impo... |
from setuptools import setup, find_packages
REQUIRES = [
'Flask>=1.1.1',
'Flask-SocketIO>=4.2.1',
'Flask-Login>=0.4.1',
'requests>=2.22.0',
'pytz>=2019.2',
'paho-mqtt>=1.4.0',
'RPi.GPIO>=0.7.0',
]
setup(
name='AlarmPI',
version='4.7',
description='Home Security System',
au... | [
"setuptools.find_packages"
] | [((377, 392), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (390, 392), False, 'from setuptools import setup, find_packages\n')] |
from keras.layers.pooling import AveragePooling2D, MaxPooling2D
from keras.applications.mobilenet_v2 import MobileNetV2
from keras.applications.nasnet import NASNetMobile
from keras.applications import ResNet50V2
from keras.layers.core import Dropout
from keras.layers.core import Flatten
from keras.layers.core import D... | [
"keras.layers.core.Flatten",
"keras.layers.convolutional.Convolution2D",
"keras.models.Sequential",
"keras.layers.LSTM",
"keras.layers.core.Dense",
"keras.layers.Input",
"keras.models.Model",
"keras.layers.convolutional.MaxPooling2D",
"keras.layers.BatchNormalization",
"keras.layers.core.Dropout",... | [((1064, 1112), 'keras.models.Model', 'Model', ([], {'inputs': 'baseModel.input', 'outputs': 'headModel'}), '(inputs=baseModel.input, outputs=headModel)\n', (1069, 1112), False, 'from keras.models import Model\n'), ((2006, 2018), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2016, 2018), False, 'from kera... |
import pyglet
import os
from classes.car import Car
from classes.improvedCircuit import circuit
from classes.Vector import Vector2D
### MAIN LOOP
# config = pyglet.gl.Config(sample_buffers=1, samples=4)
window = pyglet.window.Window(resizable=False, width=1920, height=1080, vsync=True)
#inner_points = [[18,3],[8,3],[... | [
"pyglet.app.exit",
"pyglet.clock.schedule_interval",
"pyglet.app.run",
"pyglet.graphics.OrderedGroup",
"pyglet.graphics.Batch",
"os.path.realpath",
"pyglet.sprite.Sprite",
"classes.improvedCircuit.circuit.fromJSON",
"classes.car.Car",
"pyglet.window.Window"
] | [((213, 287), 'pyglet.window.Window', 'pyglet.window.Window', ([], {'resizable': '(False)', 'width': '(1920)', 'height': '(1080)', 'vsync': '(True)'}), '(resizable=False, width=1920, height=1080, vsync=True)\n', (233, 287), False, 'import pyglet\n'), ((1688, 1711), 'pyglet.graphics.Batch', 'pyglet.graphics.Batch', ([],... |
from __future__ import absolute_import
import os
import errno
import numpy as np
def mkdir_if_missing(dir_path):
try:
os.makedirs(dir_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def get_free_gpu():
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >... | [
"os.system",
"numpy.argmax",
"os.makedirs"
] | [((260, 325), 'os.system', 'os.system', (['"""nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp"""'], {}), "('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')\n", (269, 325), False, 'import os\n'), ((494, 521), 'numpy.argmax', 'np.argmax', (['memory_available'], {}), '(memory_available)\n', (503, 521), True, '... |
import os
import sys
import json
import argparse
script_content = """\
#!/bin/sh
gpython=${PYENV_ROOT}/versions/$(pyenv global)/bin/python
gproj=${PYENV_ROOT}/versions/$(pyenv global)/bin/proj
if [[ $1 =~ ^[^\-] ]] ; then
result=$(exec $gpython $gproj --echo $1)
exit_code=$?
if test $exit_code -eq 0 ; th... | [
"os.path.exists",
"argparse.ArgumentParser",
"os.makedirs",
"json.dump",
"os.path.join",
"os.getcwd",
"sys.exit",
"json.load",
"os.path.abspath",
"os.path.expanduser"
] | [((1403, 1439), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.config/proj"""'], {}), "('~/.config/proj')\n", (1421, 1439), False, 'import os\n'), ((1454, 1484), 'os.path.join', 'os.path.join', (['conf_dir', '"""proj"""'], {}), "(conf_dir, 'proj')\n", (1466, 1484), False, 'import os\n'), ((1494, 1526), 'os.path.j... |
# coding: utf-8
"""
Relay
~~~~~
Relay is an irc micro-framework that smells too much like a web framework
Copyright (c) 2015, ldesgoui <relay at ldesgoui dot xyz>
See LICENSE for more informations.
"""
from collections import defaultdict
import logging
import os
import socket
from . import constants
from . import ... | [
"logging.getLogger",
"collections.defaultdict",
"socket.socket",
"os.getenv"
] | [((499, 515), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (510, 515), False, 'from collections import defaultdict\n'), ((587, 610), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (604, 610), False, 'import logging\n'), ((632, 649), 'collections.defaultdict', 'defaultdict',... |
from urwid import Edit, IntEdit, LineBox
from src.models.grade import AssignmentType, grade
from src.models.state import state
from src.views.widgets.form import Form
from src.views.widgets.radio import RadioGroup
class GradePanel(Form):
def __init__(self):
grading_directory = LineBox(Edit(("header", "G... | [
"urwid.Edit",
"src.views.widgets.radio.RadioGroup"
] | [((491, 559), 'src.views.widgets.radio.RadioGroup', 'RadioGroup', (['"""Assignment type"""', 'AssignmentType', 'state.assignment_type'], {}), "('Assignment type', AssignmentType, state.assignment_type)\n", (501, 559), False, 'from src.views.widgets.radio import RadioGroup\n'), ((302, 368), 'urwid.Edit', 'Edit', (["('he... |
# Trinket IO demo
# Welcome to CircuitPython 3.1.1 :)
import board
import adafruit_dotstar as dotstar
import time
import busio
import struct
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.mouse import Mouse
from qwertyMAC import *
overlay = webaccess
# One pixel connected internally!
dot = dotstar.DotS... | [
"adafruit_hid.keyboard.Keyboard",
"adafruit_hid.mouse.Mouse",
"struct.pack",
"time.sleep",
"struct.unpack",
"busio.UART",
"adafruit_dotstar.DotStar"
] | [((308, 379), 'adafruit_dotstar.DotStar', 'dotstar.DotStar', (['board.APA102_SCK', 'board.APA102_MOSI', '(1)'], {'brightness': '(0.7)'}), '(board.APA102_SCK, board.APA102_MOSI, 1, brightness=0.7)\n', (323, 379), True, 'import adafruit_dotstar as dotstar\n'), ((388, 435), 'busio.UART', 'busio.UART', (['board.TX', 'board... |
# Generated by Django 3.0.6 on 2020-07-10 22:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
... | [
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.CharField",
"django.db.models.IntegerField"
] | [((335, 428), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (351, 428), False, 'from django.db import migrations, models\... |
import numpy as np
import networkx as nx
from commons import *
from tqdm import tqdm
def apply_rrt(state_space, starting_state, target_space, obstacle_map, granularity=0.1, d_threshold=0.5,
n_samples=1000, find_optimal=True):
tree = nx.DiGraph()
tree.add_node(starting_state)
final_state = N... | [
"networkx.DiGraph",
"networkx.dijkstra_path_length"
] | [((253, 265), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (263, 265), True, 'import networkx as nx\n'), ((1155, 1207), 'networkx.dijkstra_path_length', 'nx.dijkstra_path_length', (['tree', 'starting_state', 'm_new'], {}), '(tree, starting_state, m_new)\n', (1178, 1207), True, 'import networkx as nx\n'), ((1389,... |
"""add-sign-hash-table
Revision ID: b829c4a4c128
Revises: cc<PASSWORD>
Create Date: 2021-05-25 16:04:18.028626
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b829c4a4c128'
down_revision = 'cc5dce03ad39'
branch_labels = None
depends_on = None
def upgrade():
... | [
"sqlalchemy.ForeignKeyConstraint",
"alembic.op.drop_table",
"sqlalchemy.PrimaryKeyConstraint",
"alembic.op.batch_alter_table",
"sqlalchemy.Integer",
"sqlalchemy.String",
"sqlalchemy.BIGINT"
] | [((1030, 1057), 'alembic.op.drop_table', 'op.drop_table', (['"""user_signs"""'], {}), "('user_signs')\n", (1043, 1057), False, 'from alembic import op\n'), ((1557, 1590), 'alembic.op.drop_table', 'op.drop_table', (['"""user_sign_hashes"""'], {}), "('user_sign_hashes')\n", (1570, 1590), False, 'from alembic import op\n'... |
# This file is part of the Blockchain-based Fair Exchange Benchmark Tool
# https://gitlab.com/MatthiasLohr/bfebench
#
# Copyright 2021-2022 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy o... | [
"os.path.join",
"time.sleep",
"bfebench.utils.json_stream.JsonObjectSocketStreamForwarder",
"tempfile.mkdtemp",
"shutil.rmtree"
] | [((1088, 1120), 'tempfile.mkdtemp', 'mkdtemp', ([], {'prefix': '"""bfebench-test-"""'}), "(prefix='bfebench-test-')\n", (1095, 1120), False, 'from tempfile import mkdtemp\n'), ((1162, 1183), 'shutil.rmtree', 'rmtree', (['self._tmp_dir'], {}), '(self._tmp_dir)\n', (1168, 1183), False, 'from shutil import rmtree\n'), ((1... |
from django.utils import timezone
from django.shortcuts import get_object_or_404
from backend.cuida24.serializers import *
logger = logging.getLogger("mylogger")
def habitsFrontToBackJSON(request_data, user):
request_data['caregiver'] = get_object_or_404(Caregiver, info=user.pk).pk
return request_data
def ... | [
"django.utils.timezone.now",
"django.shortcuts.get_object_or_404"
] | [((600, 614), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (612, 614), False, 'from django.utils import timezone\n'), ((245, 287), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Caregiver'], {'info': 'user.pk'}), '(Caregiver, info=user.pk)\n', (262, 287), False, 'from django.shortcuts i... |
# -*- coding: utf-8 -*-
"""
Low level tool for writing percent difference reports. Typically, this
is called via: :func:`cla.DR_Results.rptpct`.
"""
from io import StringIO
from types import SimpleNamespace
import warnings
import numpy as np
import matplotlib.pyplot as plt
from pyyeti import ytools, locate, writer
from... | [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.interactive",
"pyyeti.writer.vecwrite",
"numpy.arange",
"numpy.atleast_2d",
"pyyeti.locate.list_intersect",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"pyyeti.writer.formheader",
"io.StringIO",
"pyyeti.locate.... | [((514, 548), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'legacy': '"""1.13"""'}), "(legacy='1.13')\n", (533, 548), True, 'import numpy as np\n'), ((933, 953), 'numpy.atleast_1d', 'np.atleast_1d', (['value'], {}), '(value)\n', (946, 953), True, 'import numpy as np\n'), ((7410, 7450), 'pyyeti.ytools.histogra... |
import argparse
from attacks.image_save_runner import ImageSaveAttackRunner
from attacks.selective_universal import SelectiveUniversal
from dataset import Dataset
from models import create_ensemble
from models.model_configs import config_from_string
parser = argparse.ArgumentParser(description='Defence')
parser.add_a... | [
"dataset.Dataset",
"argparse.ArgumentParser",
"models.model_configs.config_from_string",
"attacks.selective_universal.SelectiveUniversal",
"models.create_ensemble",
"attacks.image_save_runner.ImageSaveAttackRunner"
] | [((261, 307), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Defence"""'}), "(description='Defence')\n", (284, 307), False, 'import argparse\n'), ((1215, 1254), 'dataset.Dataset', 'Dataset', (['args.input_dir'], {'target_file': '""""""'}), "(args.input_dir, target_file='')\n", (1222, 125... |
from google.appengine.ext import ndb
CACHE_DATA = {}
def get(cache_key):
full_cache_key = '{}:{}'.format(cache_key, ndb.get_context().__hash__())
return CACHE_DATA.get(full_cache_key, None)
def set(cache_key, value):
full_cache_key = '{}:{}'.format(cache_key, ndb.get_context().__hash__())
CACHE_DA... | [
"google.appengine.ext.ndb.get_context"
] | [((124, 141), 'google.appengine.ext.ndb.get_context', 'ndb.get_context', ([], {}), '()\n', (139, 141), False, 'from google.appengine.ext import ndb\n'), ((278, 295), 'google.appengine.ext.ndb.get_context', 'ndb.get_context', ([], {}), '()\n', (293, 295), False, 'from google.appengine.ext import ndb\n')] |
"""
Tools for calculations
"""
import warnings
from aiida.tools import CalculationTools
from aiida.common import InputValidationError
from aiida.orm import CalcJobNode, Dict
from aiida.common.links import LinkType
from aiida.plugins import DataFactory
from aiida.engine import CalcJob, ProcessBuilder
from aiida_castep... | [
"aiida.orm.Dict",
"aiida.orm.load_node",
"deepdiff.DeepDiff",
"os.path.split",
"aiida_castep.data.get_pseudos_from_structure",
"aiida.plugins.CalculationFactory",
"warnings.warn"
] | [((3393, 3443), 'aiida_castep.data.get_pseudos_from_structure', 'get_pseudos_from_structure', (['structure', 'family_name'], {}), '(structure, family_name)\n', (3419, 3443), False, 'from aiida_castep.data import get_pseudos_from_structure\n'), ((7393, 7458), 'warnings.warn', 'warnings.warn', (['"""No existing Dict node... |
from nose.tools import eq_
import amo.tests
from addons.models import (Addon, attach_categories, attach_tags,
attach_translations)
from addons.search import extract
class TestExtract(amo.tests.TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(T... | [
"addons.models.Addon.objects.filter",
"addons.search.extract"
] | [((674, 709), 'addons.models.Addon.objects.filter', 'Addon.objects.filter', ([], {'id__in': '[3615]'}), '(id__in=[3615])\n', (694, 709), False, 'from addons.models import Addon, attach_categories, attach_tags, attach_translations\n'), ((825, 844), 'addons.search.extract', 'extract', (['self.addon'], {}), '(self.addon)\... |
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import torchvision
import torch
from torchvision import models, datasets
class CRNN_Base(nn.Module):
def __init__(self, class_num, c, h, w, k, filters, poolings, dropout_rate, g... | [
"torch.nn.BatchNorm2d",
"torch.nn.Softmax",
"torch.nn.Sequential",
"torch.nn.Dropout2d",
"torchvision.models.resnet18",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"torch.nn.ELU",
"torch.nn.GRU"
] | [((458, 488), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'c'}), '(num_features=c)\n', (472, 488), True, 'import torch.nn as nn\n'), ((585, 634), 'torch.nn.Conv2d', 'nn.Conv2d', (['c', 'filters[0]'], {'kernel_size': 'k', 'stride': '(1)'}), '(c, filters[0], kernel_size=k, stride=1)\n', (594, 634), Tr... |
import pandas as pd
from sqlalchemy import create_engine
from config import DATABASE_URI
from predictions.common_predictor import CommonPredictor
pd.set_option("display.width", 1000)
pd.set_option("display.max_columns", 50)
class DotaPredictor(CommonPredictor):
def __init__(self, debug: bool = False):
su... | [
"mlflow.set_tracking_uri",
"sqlalchemy.create_engine",
"pandas.merge",
"mlflow.set_experiment",
"pandas.set_option",
"pandas.read_sql_table"
] | [((147, 183), 'pandas.set_option', 'pd.set_option', (['"""display.width"""', '(1000)'], {}), "('display.width', 1000)\n", (160, 183), True, 'import pandas as pd\n'), ((184, 224), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(50)'], {}), "('display.max_columns', 50)\n", (197, 224), True, 'import... |
from django.conf.urls import url
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns=[
url('^$',views.diet,name = 'diet'),
url('^$',views.health_of_day,name='healthToday'),
url(r'^search/', views.search_results, name='search_results'),
url(r'... | [
"django.conf.urls.static.static",
"django.conf.urls.url"
] | [((151, 185), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.diet'], {'name': '"""diet"""'}), "('^$', views.diet, name='diet')\n", (154, 185), False, 'from django.conf.urls import url\n'), ((191, 241), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.health_of_day'], {'name': '"""healthToday"""'}), "('^$', view... |
import os
import sys
from gradslam.config import CfgNode as CN
cfg = CN()
cfg.TRAIN = CN()
cfg.TRAIN.HYPERPARAM_1 = 0.9
| [
"gradslam.config.CfgNode"
] | [((72, 76), 'gradslam.config.CfgNode', 'CN', ([], {}), '()\n', (74, 76), True, 'from gradslam.config import CfgNode as CN\n'), ((90, 94), 'gradslam.config.CfgNode', 'CN', ([], {}), '()\n', (92, 94), True, 'from gradslam.config import CfgNode as CN\n')] |
#
# GitHubNeo.py
#
# note: i tried using bulbs, which would be easier to
# migrate to other tinkerpop graph engines, but had
# trouble authenticating
#
#
"""
package oompa.tracking.github
experiments with working on github graphs in neo
uses py2neo
TODO: i think bulb seems to have better object modeling... | [
"py2neo.Graph",
"datetime.datetime.utcnow",
"datetime.datetime.strptime",
"py2neo.authenticate",
"datetime.timedelta"
] | [((2428, 2478), 'datetime.timedelta', 'timedelta', ([], {'days': 'days', 'hours': 'hours', 'minutes': 'minutes'}), '(days=days, hours=hours, minutes=minutes)\n', (2437, 2478), False, 'from datetime import timedelta\n'), ((3276, 3327), 'py2neo.authenticate', 'py2neo.authenticate', (['neo_host', 'neo_user', 'neo_passwd']... |
from src.dgol_worker.cell_env import CellEnv
ce = CellEnv() | [
"src.dgol_worker.cell_env.CellEnv"
] | [((51, 60), 'src.dgol_worker.cell_env.CellEnv', 'CellEnv', ([], {}), '()\n', (58, 60), False, 'from src.dgol_worker.cell_env import CellEnv\n')] |
from flask import Flask, Response, send_from_directory
import random, time
app = Flask(__name__, static_folder='www')
@app.route('/')
def index():
return ''
@app.route('/stream')
def stream():
def event():
while True:
yield "data: " + random.choice(['a', 'b', 'c', 'd']) + "nn"
with app.app_context():
ti... | [
"random.choice",
"time.sleep",
"flask.Flask"
] | [((81, 117), 'flask.Flask', 'Flask', (['__name__'], {'static_folder': '"""www"""'}), "(__name__, static_folder='www')\n", (86, 117), False, 'from flask import Flask, Response, send_from_directory\n'), ((318, 331), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (328, 331), False, 'import random, time\n'), ((244, 27... |
import setuptools
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name="django-osm-field",
author="<NAME>",
author_email="<EMAIL>",
description="Django OpenStreetMap Field",
license="MIT",
long_description=long_description,
long_description_content_type=... | [
"setuptools.find_packages"
] | [((687, 821), 'setuptools.find_packages', 'setuptools.find_packages', ([], {'exclude': "['*.example', '*.example.*', 'example.*', 'example', '*.tests', '*.tests.*',\n 'tests.*', 'tests']"}), "(exclude=['*.example', '*.example.*', 'example.*',\n 'example', '*.tests', '*.tests.*', 'tests.*', 'tests'])\n", (711, 821... |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted.
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANT... | [
"platform.node",
"urllib.quote",
"sys.exit",
"os.path.islink",
"os.walk",
"os.remove",
"os.path.exists",
"time.ctime",
"os.path.splitdrive",
"shutil.move",
"zlib.adler32",
"os.path.split",
"os.path.isdir",
"optparse.OptionGroup",
"requests.packages.urllib3.disable_warnings",
"os.path.i... | [((1404, 1470), 'requests.packages.urllib3.disable_warnings', 'requests.packages.urllib3.disable_warnings', (['InsecureRequestWarning'], {}), '(InsecureRequestWarning)\n', (1446, 1470), False, 'import requests\n'), ((1981, 2134), 'optparse.OptionParser', 'OptionParser', ([], {'usage': '"""usage: %prog [options] list"""... |
import torch.nn as nn
import numpy as np
import torch
import os
from detectron2.config import get_cfg
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import COCOEvaluator, verify_results
from yolov3 import add_yolov3_config
def load_darknet_wei... | [
"numpy.fromfile",
"yolov3.add_yolov3_config",
"detectron2.config.get_cfg",
"detectron2.engine.DefaultTrainer.build_model",
"os.path.join",
"torch.from_numpy",
"detectron2.engine.launch",
"detectron2.engine.default_setup",
"detectron2.engine.default_argument_parser"
] | [((2929, 2938), 'detectron2.config.get_cfg', 'get_cfg', ([], {}), '()\n', (2936, 2938), False, 'from detectron2.config import get_cfg\n'), ((2943, 2965), 'yolov3.add_yolov3_config', 'add_yolov3_config', (['cfg'], {}), '(cfg)\n', (2960, 2965), False, 'from yolov3 import add_yolov3_config\n'), ((3064, 3088), 'detectron2.... |
"""This module contains script entrypoints for shreddit.
"""
import argparse
import yaml
import logging
import os
import pkg_resources
from shreddit import default_config
from shreddit.shredder import Shredder
CONFIG_FILE_PATH = "/app/config/shreddit.yml"
def generate_empty_config(path: str):
print("Writing shre... | [
"os.path.isfile",
"shreddit.shredder.Shredder",
"yaml.safe_load",
"pkg_resources.resource_string"
] | [((1068, 1092), 'shreddit.shredder.Shredder', 'Shredder', (['default_config'], {}), '(default_config)\n', (1076, 1092), False, 'from shreddit.shredder import Shredder\n'), ((487, 519), 'os.path.isfile', 'os.path.isfile', (['CONFIG_FILE_PATH'], {}), '(CONFIG_FILE_PATH)\n', (501, 519), False, 'import os\n'), ((896, 914),... |
from PyPDF3 import PdfFileReader
class BNPConverter:
def __init__(self, input_file, start_number=1):
self.input_file = input_file
self.start_number = start_number
def get_text_lines(self):
pdf_data = PdfFileReader(self.input_file)
text_lines = []
for page in range(pdf_... | [
"PyPDF3.PdfFileReader"
] | [((235, 265), 'PyPDF3.PdfFileReader', 'PdfFileReader', (['self.input_file'], {}), '(self.input_file)\n', (248, 265), False, 'from PyPDF3 import PdfFileReader\n')] |
from flask import Blueprint
from flask import g
from flask import flash
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
from werkzeug.exceptions import abort
from werkzeug.utils import secure_filename
from bson import ObjectId
from blog.auth imp... | [
"flask.render_template",
"flask.flash",
"flask.request.form.getlist",
"flask.request.form.get",
"flask.url_for",
"werkzeug.utils.secure_filename",
"flask.request.files.get",
"bson.ObjectId",
"flask.Blueprint",
"blog.db.get_db"
] | [((375, 402), 'flask.Blueprint', 'Blueprint', (['"""user"""', '__name__'], {}), "('user', __name__)\n", (384, 402), False, 'from flask import Blueprint\n'), ((475, 511), 'flask.render_template', 'render_template', (['"""user/profile.html"""'], {}), "('user/profile.html')\n", (490, 511), False, 'from flask import render... |
#
# Created on Sat Dec 25 2021
#
# The MIT License (MIT)
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the ri... | [
"django.shortcuts.render",
"django.shortcuts.HttpResponseRedirect",
"django.contrib.messages.success",
"django.urls.reverse_lazy"
] | [((2066, 2092), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""chasislist"""'], {}), "('chasislist')\n", (2078, 2092), False, 'from django.urls import reverse_lazy\n'), ((2985, 3011), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""chasislist"""'], {}), "('chasislist')\n", (2997, 3011), False, 'from django.urls im... |
import matplotlib.pyplot as plt
import numpy as np
import cv2
from skimage.data import astronaut
from skimage.color import rgb2gray
from skimage.filters import sobel
from skimage.segmentation import felzenszwalb, slic, quickshift, watershed
from skimage.segmentation import mark_boundaries
from skimage.util import img_... | [
"skimage.color.rgb2gray",
"skimage.segmentation.mark_boundaries",
"numpy.unique",
"matplotlib.pyplot.show",
"skimage.segmentation.watershed",
"skimage.segmentation.felzenszwalb",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.tight_layout",
"skimage.segmentation.quickshift",
"skimage.segmentatio... | [((425, 447), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (435, 447), False, 'import cv2\n'), ((507, 559), 'skimage.segmentation.felzenszwalb', 'felzenszwalb', (['img'], {'scale': '(100)', 'sigma': '(0.5)', 'min_size': '(50)'}), '(img, scale=100, sigma=0.5, min_size=50)\n', (519, 559), False, 'f... |
# mpcdata/tests/test_query.py
# import pytest
# Third-party imports
import os
# Import the specific package/module/function we are testing
import mpcdata.params as params
# from .context import mpcdata
def test_required_dictionaries_exist():
"""
Does params.py contain all of the required dictionaries ?
"... | [
"os.path.dirname",
"os.path.join"
] | [((1039, 1068), 'os.path.join', 'os.path.join', (['topDir', '"""share"""'], {}), "(topDir, 'share')\n", (1051, 1068), False, 'import os\n'), ((1087, 1133), 'os.path.join', 'os.path.join', (['topDir', '"""share"""', '"""data_external"""'], {}), "(topDir, 'share', 'data_external')\n", (1099, 1133), False, 'import os\n'),... |
#45-crie um programa que faça o computador jogar jokenpo com voce.
print('=====JOKENPO=====')
print('')
from random import randint
from time import sleep
itens = ('pedra','papel','tesoura')
computador = randint(0, 2)
print('''FAÇA SUA ESCOLHA
[ 0 ] pedra
[ 1 ] papel
[ 2 ] tesoura
''')
jogador = int(input('Qual a sua j... | [
"random.randint",
"time.sleep"
] | [((203, 216), 'random.randint', 'randint', (['(0)', '(2)'], {}), '(0, 2)\n', (210, 216), False, 'from random import randint\n'), ((345, 353), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (350, 353), False, 'from time import sleep\n'), ((367, 375), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (372, 375), False, 'fro... |
from flask import Flask
from config import DefaultConfig
# factory method for creating app objects
def create_app(config=DefaultConfig()):
app = Flask(__name__, instance_relative_config=True)
app.config.from_object(config)
# initialize database and migrations
from meep.models import db, migrate
... | [
"meep.models.migrate.init_app",
"config.DefaultConfig",
"meep.models.db.init_app",
"flask.Flask"
] | [((124, 139), 'config.DefaultConfig', 'DefaultConfig', ([], {}), '()\n', (137, 139), False, 'from config import DefaultConfig\n'), ((152, 198), 'flask.Flask', 'Flask', (['__name__'], {'instance_relative_config': '(True)'}), '(__name__, instance_relative_config=True)\n', (157, 198), False, 'from flask import Flask\n'), ... |
import os
import uuid
import pytest # type: ignore
from hopeit.testing.apps import execute_event
from hopeit.server.version import APPS_API_VERSION
from model import Something
from simple_example.collector.collect_spawn import ItemsInfo, ItemsCollected
APP_VERSION = APPS_API_VERSION.replace('.', "x")
@pytest.fix... | [
"simple_example.collector.collect_spawn.ItemsCollected",
"os.makedirs",
"uuid.uuid4",
"simple_example.collector.collect_spawn.ItemsInfo",
"hopeit.server.version.APPS_API_VERSION.replace",
"hopeit.testing.apps.execute_event"
] | [((272, 306), 'hopeit.server.version.APPS_API_VERSION.replace', 'APPS_API_VERSION.replace', (['"""."""', '"""x"""'], {}), "('.', 'x')\n", (296, 306), False, 'from hopeit.server.version import APPS_API_VERSION\n'), ((969, 996), 'simple_example.collector.collect_spawn.ItemsInfo', 'ItemsInfo', (['*sample_file_ids'], {}), ... |
from flask import Flask, jsonify, render_template
import pandas as pd
import os
import pymongo
from flask import send_from_directory
from pymongo import MongoClient
# initialize flask app
app = Flask(__name__)
app.config['JSON_SORT_KEYS'] = False
# read the data and merge it
df_labels = pd.read_csv('train_labels.csv... | [
"flask.render_template",
"pandas.read_csv",
"flask.Flask",
"pandas.merge",
"os.environ.get",
"os.path.join",
"pymongo.MongoClient",
"flask.jsonify"
] | [((195, 210), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (200, 210), False, 'from flask import Flask, jsonify, render_template\n'), ((291, 322), 'pandas.read_csv', 'pd.read_csv', (['"""train_labels.csv"""'], {}), "('train_labels.csv')\n", (302, 322), True, 'import pandas as pd\n'), ((335, 366), 'pandas... |
import torch
import yaml
import argparse
from dataset.BSD500 import BSD500Dataset
from models.HED import HED
###############
# parse cfg
###############
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', dest='cfg', required=True, help='path to config file')
args = parser.parse_known_args()
args = pars... | [
"models.HED.HED",
"yaml.load",
"argparse.ArgumentParser"
] | [((166, 191), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (189, 191), False, 'import argparse\n'), ((530, 538), 'models.HED.HED', 'HED', (['cfg'], {}), '(cfg)\n', (533, 538), False, 'from models.HED import HED\n'), ((452, 464), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (461, 464), Fals... |
# Generated by Django 2.2.5 on 2019-12-03 08:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('forums', '0006_auto_20191203_0758'),
]
operations = [
migrations.AlterField(
model_name='post',
... | [
"django.db.models.ForeignKey"
] | [((365, 474), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""posts"""', 'to': '"""forums.Thread"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='posts', to='forums.Thread')\n", (382, 474), False, 'from django.db im... |
import cv2
OPENCV_OBJECT_TRACKERS = {
"csrt": cv2.TrackerCSRT_create,
"kcf": cv2.TrackerKCF_create,
"mil": cv2.TrackerMIL_create
}
class Track:
"""
Seguimiento de una persona
"""
def __init__(self, tracker_name, first_frame, bbox, id, references):
self._tracker... | [
"cv2.dnn.NMSBoxes"
] | [((5571, 5644), 'cv2.dnn.NMSBoxes', 'cv2.dnn.NMSBoxes', (['bb', '[1.0, 0.9]', 'self.CONF_THRESHOLD', 'self.NMS_THRESHOLD'], {}), '(bb, [1.0, 0.9], self.CONF_THRESHOLD, self.NMS_THRESHOLD)\n', (5587, 5644), False, 'import cv2\n')] |
""" General purpose functions """
import hashlib
LOGGING_FORMAT = '%(asctime)s %(levelname)s: %(message)s'
def hash_from_strings(items):
""" Produce a hash value from the combination of all str elements """
JOIN_KEY = '+|+'
item_text = JOIN_KEY.join(items).encode('utf-8')
return hashlib.sha256(item_t... | [
"hashlib.sha256"
] | [((299, 324), 'hashlib.sha256', 'hashlib.sha256', (['item_text'], {}), '(item_text)\n', (313, 324), False, 'import hashlib\n')] |
from .changemanager_base import BaseChangeManager
from ..utils.psdict import PsDict
from ..table.tablechanges import TableChanges
from .slot import Slot
import copy
class DictChangeManager(BaseChangeManager):
"""
Manage changes that occured in a DataFrame between runs.
"""
def __init__(self,
... | [
"copy.copy"
] | [((1699, 1714), 'copy.copy', 'copy.copy', (['data'], {}), '(data)\n', (1708, 1714), False, 'import copy\n')] |
import os
import torch
from classifier.classes.utils.Params import Params
class Loader:
def __init__(self, modality: str, for_submodule: bool = False):
self._modality = modality
self._modality_params = Params.load_modality_params(self._modality)
experiment_params = Params.load_experime... | [
"classifier.classes.utils.Params.Params.load_dataset_params",
"os.path.join",
"classifier.classes.utils.Params.Params.load_experiment_params",
"classifier.classes.utils.Params.Params.load_modality_params",
"classifier.classes.utils.Params.Params.load_network_params"
] | [((227, 270), 'classifier.classes.utils.Params.Params.load_modality_params', 'Params.load_modality_params', (['self._modality'], {}), '(self._modality)\n', (254, 270), False, 'from classifier.classes.utils.Params import Params\n'), ((300, 331), 'classifier.classes.utils.Params.Params.load_experiment_params', 'Params.lo... |
import pytest
from hyperloop.Python.mission import lat_long
import numpy as np
from openmdao.api import Group, Problem
def create_problem(component):
root = Group()
prob = Problem(root)
prob.root.add('comp', component)
return prob
class TestMissionDrag(object):
def test_case1_vs_npss(self):
... | [
"hyperloop.Python.mission.lat_long.LatLong",
"numpy.isclose",
"openmdao.api.Problem",
"openmdao.api.Group"
] | [((162, 169), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (167, 169), False, 'from openmdao.api import Group, Problem\n'), ((181, 194), 'openmdao.api.Problem', 'Problem', (['root'], {}), '(root)\n', (188, 194), False, 'from openmdao.api import Group, Problem\n'), ((338, 356), 'hyperloop.Python.mission.lat_long.Lat... |
# coding: utf-8
# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
import os
import pkgutil
from os.path import abspath
from inspect import getsourcefile
CLIENT_MAP = {}
MODULE_TO_TYPE_MAPPINGS = {}
ALL_SERVICES_DIR = "services"
this_file_path = abspath(getsourcefile(lambda: 0))
if "site-p... | [
"inspect.getsourcefile",
"os.path.join",
"pkgutil.iter_modules"
] | [((563, 614), 'os.path.join', 'os.path.join', (['python_cli_root_dir', 'ALL_SERVICES_DIR'], {}), '(python_cli_root_dir, ALL_SERVICES_DIR)\n', (575, 614), False, 'import os\n'), ((811, 852), 'pkgutil.iter_modules', 'pkgutil.iter_modules', ([], {'path': '[services_dir]'}), '(path=[services_dir])\n', (831, 852), False, 'i... |
# reference: http://icrawler.readthedocs.io/en/latest/usage.html
from icrawler.builtin import GoogleImageCrawler
import os
dataset_base_dir = 'D:/Workspace/Dataset/fake_image_detection/task_2'
keyword_lists = ['snapchat face swap', 'MSQRD']
for keyword in keyword_lists:
folder_path = dataset_base_dir + '/' + ke... | [
"os.path.exists",
"icrawler.builtin.GoogleImageCrawler",
"os.makedirs"
] | [((488, 586), 'icrawler.builtin.GoogleImageCrawler', 'GoogleImageCrawler', ([], {'parser_threads': '(2)', 'downloader_threads': '(4)', 'storage': "{'root_dir': folder_path}"}), "(parser_threads=2, downloader_threads=4, storage={\n 'root_dir': folder_path})\n", (506, 586), False, 'from icrawler.builtin import GoogleI... |
# Databricks notebook source
# MAGIC %run ../app/bootstrap
# COMMAND ----------
from pyspark.sql.dataframe import DataFrame
from datalakebundle.imports import transformation
# COMMAND ----------
datasets = [
{
"id": "123",
"name": "knihydobrovsky_cz",
"custom_attrs": {
105: "... | [
"datalakebundle.imports.transformation"
] | [((618, 661), 'datalakebundle.imports.transformation', 'transformation', (['"""%datalake.base_base_path%"""'], {}), "('%datalake.base_base_path%')\n", (632, 661), False, 'from datalakebundle.imports import transformation\n'), ((909, 925), 'datalakebundle.imports.transformation', 'transformation', ([], {}), '()\n', (923... |
from pyspark.sql.types import StructField
from cishouseholds.pyspark_utils import convert_cerberus_schema_to_pyspark
def test_conversion():
cerberus_schema = {"id": {"type": "string"}, "whole_number": {"type": "integer"}}
pyspark_schema = convert_cerberus_schema_to_pyspark(cerberus_schema)
assert len(p... | [
"cishouseholds.pyspark_utils.convert_cerberus_schema_to_pyspark"
] | [((251, 302), 'cishouseholds.pyspark_utils.convert_cerberus_schema_to_pyspark', 'convert_cerberus_schema_to_pyspark', (['cerberus_schema'], {}), '(cerberus_schema)\n', (285, 302), False, 'from cishouseholds.pyspark_utils import convert_cerberus_schema_to_pyspark\n')] |
from typing import List, Optional, Type
import pyspark.sql.functions as F
from pyspark.sql import DataFrame as SparkDataFrame
from pyspark.sql.types import DataType
import src.sparkcleaner.helpers.verify as verify
def remove_leading_zeros(df: SparkDataFrame,
col_name: str,
... | [
"src.sparkcleaner.helpers.verify.verify_func_input",
"pyspark.sql.functions.col"
] | [((1820, 1871), 'src.sparkcleaner.helpers.verify.verify_func_input', 'verify.verify_func_input', (['input_vals', 'expected_vals'], {}), '(input_vals, expected_vals)\n', (1844, 1871), True, 'import src.sparkcleaner.helpers.verify as verify\n'), ((4594, 4645), 'src.sparkcleaner.helpers.verify.verify_func_input', 'verify.... |
from tkinter import Tk, Canvas
# This is an emulated display with the same API interface as for the Unicorn HAT/pHAT hardware.
# Thus, it relies upon (in part) code from: https://github.com/pimoroni/unicorn-hat/blob/master/library/UnicornHat/unicornhat.py
# Note that only the pHAT is supported, and rotation of the di... | [
"tkinter.Canvas",
"tkinter.Tk"
] | [((2613, 2617), 'tkinter.Tk', 'Tk', ([], {}), '()\n', (2615, 2617), False, 'from tkinter import Tk, Canvas\n'), ((900, 940), 'tkinter.Canvas', 'Canvas', (['self.master'], {'width': '(30)', 'height': '(30)'}), '(self.master, width=30, height=30)\n', (906, 940), False, 'from tkinter import Tk, Canvas\n')] |
# Python plan -> Open Workbench XML converter.
#
# Python plan defines a Work Breakdown Structure where
# tasks are dictionaries and children are defined in a list.
# Children can contain sequences, to simplify data input;
# sequenced tasks are automatically chained (dependencies).
import sys
import math
from ... | [
"datetime.datetime",
"datetime.timedelta",
"math.floor"
] | [((573, 610), 'datetime.datetime', 'datetime', ([], {'year': '(2016)', 'month': '(10)', 'day': '(10)'}), '(year=2016, month=10, day=10)\n', (581, 610), False, 'from datetime import datetime, timedelta\n'), ((1356, 1393), 'math.floor', 'math.floor', (['((_effort_in_days - 1) / 5)'], {}), '((_effort_in_days - 1) / 5)\n',... |
import rdkit.Chem as Chem
import pickle
def smi_tokenizer(smi):
"""
Tokenize a SMILES molecule or reaction
"""
import re
pattern = "(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\\\|\/|:|~|@|\?|>|\*|\$|\%[0-9]{2}|[0-9])"
regex = re.compile(pattern)
tokens = [token for token... | [
"pickle.dump",
"re.compile",
"rdkit.Chem.MolFromSmiles",
"rdkit.Chem.MolToSmiles",
"rdkit.Chem.RemoveHs"
] | [((271, 290), 're.compile', 're.compile', (['pattern'], {}), '(pattern)\n', (281, 290), False, 'import re\n'), ((2471, 2497), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles'], {}), '(smiles)\n', (2489, 2497), True, 'import rdkit.Chem as Chem\n'), ((2508, 2526), 'rdkit.Chem.RemoveHs', 'Chem.RemoveHs', (['mo... |
from queue import Queue
from datetime import datetime, timedelta
from .INewslistScraper import INewslistScraper
from .. import article
from .. import driver
class Scraper(INewslistScraper):
def __init__(self, limit: int = 100):
INewslistScraper.__init__(self, limit)
self._tag_to_url = {
... | [
"datetime.datetime.today",
"datetime.timedelta"
] | [((1812, 1828), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (1826, 1828), False, 'from datetime import datetime, timedelta\n'), ((2087, 2103), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (2101, 2103), False, 'from datetime import datetime, timedelta\n'), ((2106, 2135), 'datetime.ti... |
#!/usr/bin/python
import regret as r
import sys
import os
n=int(sys.argv[1])
fout=open(sys.argv[3],'w')
print >>fout, n
for i in range(0,n):
print >>fout, i, r.mult_valuation(sys.argv[2],i)
| [
"regret.mult_valuation"
] | [((169, 201), 'regret.mult_valuation', 'r.mult_valuation', (['sys.argv[2]', 'i'], {}), '(sys.argv[2], i)\n', (185, 201), True, 'import regret as r\n')] |
from __future__ import print_function
import os
def build_is_triggered():
"""
If a build is being triggered via Github directly (either by a comment, or
automatically) then the ``ghprb`` will probably be involded. When that is
the case, that plugin injects a wealth of environment variables, which can
... | [
"os.path.join",
"os.environ.get",
"os.getenv"
] | [((818, 848), 'os.getenv', 'os.getenv', (['"""GITHUB_REPOSITORY"""'], {}), "('GITHUB_REPOSITORY')\n", (827, 848), False, 'import os\n'), ((866, 889), 'os.getenv', 'os.getenv', (['"""GITHUB_SHA"""'], {}), "('GITHUB_SHA')\n", (875, 889), False, 'import os\n'), ((1010, 1044), 'os.path.join', 'os.path.join', (['base_url', ... |
#!/usr/bin/env python
"""
Dummy DARPA scoring server
"""
import os
import sys
import csv
import math
import json
import logging
from collections import defaultdict
from http.server import BaseHTTPRequestHandler, HTTPServer
from mimetypes import guess_type
g_logger = logging.getLogger(__name__)
def dist3d(xyz, xy... | [
"logging.getLogger",
"logging.basicConfig",
"json.loads",
"argparse.ArgumentParser",
"http.server.HTTPServer",
"collections.defaultdict",
"csv.reader"
] | [((272, 299), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (289, 299), False, 'import logging\n'), ((2630, 2768), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s %(name)-12s %(levelname)-8s %(message)s"""', 'datefmt': '"""%Y-%m-%d ... |
import pytest
from scrapy import Request
from scrapy.http import Response
from scrapy.spiders import Spider
from scrapy.utils.test import get_crawler
from twisted.internet.error import DNSLookupError
from scrapy_fake_useragent.middleware import RetryUserAgentMiddleware
@pytest.fixture
def retry_middleware_response(... | [
"scrapy_fake_useragent.middleware.RetryUserAgentMiddleware.from_crawler",
"twisted.internet.error.DNSLookupError",
"scrapy.utils.test.get_crawler",
"pytest.mark.parametrize",
"scrapy.Request",
"scrapy.http.Response"
] | [((1450, 1571), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""retry_middleware_response"""', "(({'FAKEUSERAGENT_FALLBACK': 'firefox'}, 503),)"], {'indirect': '(True)'}), "('retry_middleware_response', (({\n 'FAKEUSERAGENT_FALLBACK': 'firefox'}, 503),), indirect=True)\n", (1473, 1571), False, 'import py... |
"""Base Manager class."""
import logging
from pathlib import Path
from typing import Optional
import nowcasting_dataset.utils as nd_utils
from nowcasting_dataset import config
from nowcasting_dataset.data_sources import ALL_DATA_SOURCE_NAMES, MAP_DATA_SOURCE_NAME_TO_CLASS
logger = logging.getLogger(__name__)
class... | [
"logging.getLogger",
"nowcasting_dataset.config.set_git_commit",
"nowcasting_dataset.utils.remove_regex_pattern_from_keys",
"nowcasting_dataset.config.load_yaml_configuration"
] | [((285, 312), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (302, 312), False, 'import logging\n'), ((1190, 1230), 'nowcasting_dataset.config.load_yaml_configuration', 'config.load_yaml_configuration', (['filename'], {}), '(filename)\n', (1220, 1230), False, 'from nowcasting_dataset impo... |
import cherrypy
import requests
import json
from CommunicationLayer import ServiceRegistry
@cherrypy.popargs('imageName')
class ImageAPI(object):
address = "http://127.0.0.1:8761/"
@cherrypy.expose()
def index(self, imageName):
#Get data centaras
servicesArray = ServiceRegistry.getServi... | [
"requests.Session",
"cherrypy.popargs",
"cherrypy.expose",
"cherrypy.HTTPError",
"CommunicationLayer.ServiceRegistry.getServices"
] | [((94, 123), 'cherrypy.popargs', 'cherrypy.popargs', (['"""imageName"""'], {}), "('imageName')\n", (110, 123), False, 'import cherrypy\n'), ((194, 211), 'cherrypy.expose', 'cherrypy.expose', ([], {}), '()\n', (209, 211), False, 'import cherrypy\n'), ((296, 331), 'CommunicationLayer.ServiceRegistry.getServices', 'Servic... |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
SeeKeR Search Decision Tasks.
"""
from typing import Optional
from parlai.core.opt import Opt
from parlai.core.param... | [
"parlai.utils.logging.warning"
] | [((3964, 4017), 'parlai.utils.logging.warning', 'logging.warning', (['f"""overriding mutators to {mutators}"""'], {}), "(f'overriding mutators to {mutators}')\n", (3979, 4017), True, 'import parlai.utils.logging as logging\n'), ((1311, 1364), 'parlai.utils.logging.warning', 'logging.warning', (['f"""overriding mutators... |
from django.contrib import admin
from django.urls import path, include
from django101 import cities
from django101.cities.views import index, list_phones, test_index, create_person
urlpatterns = [
path('admin/', admin.site.urls),
path('test/', test_index),
path('create/', create_person, name='c... | [
"django.urls.path",
"django.urls.include"
] | [((212, 243), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (216, 243), False, 'from django.urls import path, include\n'), ((250, 275), 'django.urls.path', 'path', (['"""test/"""', 'test_index'], {}), "('test/', test_index)\n", (254, 275), False, 'from django.ur... |
from __future__ import print_function
from datetime import date, datetime, timedelta
import os
import tempfile
PERIODS = {
'y': {'name': 'yesterday', 'description': 'Yesterday'},
'lw': {'name': 'lastweek', 'description': 'Last work week'},
'cw': {'name': 'currentweek', 'description': 'Current work week'},
'fl... | [
"datetime.datetime.now",
"datetime.timedelta",
"tempfile.gettempdir",
"datetime.date.today"
] | [((1888, 1902), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1900, 1902), False, 'from datetime import date, datetime, timedelta\n'), ((1905, 1952), 'datetime.timedelta', 'timedelta', ([], {'days': 'days_ahead_of_weekday_last_week'}), '(days=days_ahead_of_weekday_last_week)\n', (1914, 1952), False, 'from... |
import os
import re
import statistics
def find_all_key_files_path(directory, keyfile_name):
fn = re.compile(".*"+keyfile_name + ".*")
path=[]
for root, dirs, files in os.walk(directory):
for file in files:
if fn.match(file) is not None:
#print(file)
path.... | [
"statistics.mean",
"os.path.join",
"os.walk",
"re.compile"
] | [((102, 140), 're.compile', 're.compile', (["('.*' + keyfile_name + '.*')"], {}), "('.*' + keyfile_name + '.*')\n", (112, 140), False, 'import re\n'), ((180, 198), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (187, 198), False, 'import os\n'), ((327, 351), 'os.path.join', 'os.path.join', (['root', 'file'... |
# -*- coding: utf-8 -*-
"""Module where all interfaces, events and exceptions live."""
from . import _
from plone.app.vocabularies.catalog import CatalogSource
from plone.namedfile.field import NamedBlobImage
from plone.supermodel import model
from z3c.relationfield.schema import RelationChoice
from zope import schema... | [
"plone.app.vocabularies.catalog.CatalogSource",
"plone.supermodel.model.primary"
] | [((401, 453), 'plone.app.vocabularies.catalog.CatalogSource', 'CatalogSource', ([], {'portal_type': "('Document', 'News Item')"}), "(portal_type=('Document', 'News Item'))\n", (414, 453), False, 'from plone.app.vocabularies.catalog import CatalogSource\n'), ((990, 1012), 'plone.supermodel.model.primary', 'model.primary... |
#!/usr/bin/env python
# encoding: utf-8
__author__ = 'hasee'
import json
from datetime import datetime
class BlockInfo(object):
def __init__(self):
# 块hash
self.block_id = ''
# 块高度
self.block_num = 0
# 块大小
self.block_size = 0
# 上个块的块hash
self.pre... | [
"datetime.datetime.now"
] | [((1648, 1662), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1660, 1662), False, 'from datetime import datetime\n')] |
"""
Command line options tests
"""
import os
import re
from six import iteritems, StringIO
try:
# Python 3
from unittest.mock import patch
except ImportError:
from mock import patch
from dirsync.options import ArgParser
from dirsync.run import sync
from ._base import DirSyncTestCase
fr... | [
"mock.patch",
"os.path.join",
"dirsync.run.sync",
"dirsync.options.ArgParser",
"six.iteritems"
] | [((1150, 1192), 'mock.patch', 'patch', (['"""sys.stdout"""'], {'new_callable': 'StringIO'}), "('sys.stdout', new_callable=StringIO)\n", (1155, 1192), False, 'from mock import patch\n'), ((491, 505), 'dirsync.run.sync', 'sync', ([], {}), '(**kwargs)\n', (495, 505), False, 'from dirsync.run import sync\n'), ((1937, 1955)... |
#
# Copyright 2021 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
#
import logging
import lzma
from hashlib import sha256
from typing import Optional, Tuple
from eth_typing.encoding import HexStr
from flask import Response, request
from flask_sieve import validate
from ocean_provider.requests_session ... | [
"logging.getLogger",
"ocean_provider.utils.basics.get_web3",
"ocean_provider.utils.util.get_request_data",
"flask_sieve.validate",
"ocean_provider.utils.data_nft_factory.is_nft_deployed_from_factory",
"hashlib.sha256",
"ocean_provider.utils.data_nft.get_metadata",
"ocean_provider.requests_session.get_... | [((1014, 1035), 'ocean_provider.utils.basics.get_provider_wallet', 'get_provider_wallet', ([], {}), '()\n', (1033, 1035), False, 'from ocean_provider.utils.basics import get_config, get_provider_wallet, get_web3\n'), ((1055, 1077), 'ocean_provider.requests_session.get_requests_session', 'get_requests_session', ([], {})... |
from __future__ import annotations
import os
from datetime import datetime
from twisted.python import log
import cowrie.core.output
from cowrie.core.config import CowrieConfig
token = CowrieConfig.get("output_csirtg", "token", fallback="<PASSWORD>")
if token == "<PASSWORD>":
log.msg("output_csirtg: token not fou... | [
"twisted.python.log.msg",
"cowrie.core.config.CowrieConfig.get",
"cowrie.core.config.CowrieConfig.getboolean",
"datetime.datetime.now",
"csirtgsdk.indicator.Indicator"
] | [((187, 252), 'cowrie.core.config.CowrieConfig.get', 'CowrieConfig.get', (['"""output_csirtg"""', '"""token"""'], {'fallback': '"""<PASSWORD>"""'}), "('output_csirtg', 'token', fallback='<PASSWORD>')\n", (203, 252), False, 'from cowrie.core.config import CowrieConfig\n'), ((283, 346), 'twisted.python.log.msg', 'log.msg... |
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
fn = sys.argv[1]
pal = sns.color_palette()
with open(fn) as f:
toPlot = []
names = []
goodness = []
xs = []
ys = []
ps = []
sns.set()
for line in f:
tokens = line.split(' ')
if len(tokens) ==... | [
"seaborn.set",
"seaborn.color_palette",
"matplotlib.use",
"matplotlib.pyplot.clf",
"seaborn.lineplot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.title",
"matplotlib.pyplot.cla"
] | [((29, 50), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (43, 50), False, 'import matplotlib\n'), ((128, 147), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (145, 147), True, 'import seaborn as sns\n'), ((243, 252), 'seaborn.set', 'sns.set', ([], {}), '()\n', (250, 252), True,... |
from unittest import TestCase
from apps.profiles.models import Profile
from apps.recipes.models import Recipe, Ingredient
from apps.recommendations.utils import ComparableInventory
# TODO: written for manual testing with preloaded db; for general use should create resources in setUp()
class ComparableInventoryTest(T... | [
"apps.recipes.models.Recipe.objects.get",
"apps.profiles.models.Profile.objects.get",
"apps.recipes.models.Ingredient.objects.filter",
"apps.recommendations.utils.ComparableInventory"
] | [((366, 391), 'apps.profiles.models.Profile.objects.get', 'Profile.objects.get', ([], {'pk': '(3)'}), '(pk=3)\n', (385, 391), False, 'from apps.profiles.models import Profile\n'), ((702, 726), 'apps.recommendations.utils.ComparableInventory', 'ComparableInventory', (['inv'], {}), '(inv)\n', (721, 726), False, 'from app... |
# Generated by Django 2.2.13 on 2020-07-28 15:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0075_profile_last_frontend_login'),
]
operations = [
migrations.RemoveField(
model_name='fieldreport',
name='cases',... | [
"django.db.migrations.RemoveField"
] | [((233, 295), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""fieldreport"""', 'name': '"""cases"""'}), "(model_name='fieldreport', name='cases')\n", (255, 295), False, 'from django.db import migrations\n'), ((340, 412), 'django.db.migrations.RemoveField', 'migrations.RemoveField',... |
import serial
import json
import io
import time
ser = serial.Serial("COM24" , 9600, timeout=2)
topic = "hydro/light1"
payload = 1
#data = json.dumps({"topic":topic,"payload":payload})
data = "{\"topic\":\"hydro/light1\",\"payload\":1}"
data = data.encode()
print(data)
ser.write(b'A')
hello = ser.readline()#.dec... | [
"serial.Serial"
] | [((56, 95), 'serial.Serial', 'serial.Serial', (['"""COM24"""', '(9600)'], {'timeout': '(2)'}), "('COM24', 9600, timeout=2)\n", (69, 95), False, 'import serial\n')] |
import random
aluno1 = input('Nome aluno 1: ')
aluno2 = input('Nome aluno 2: ')
aluno3 = input('Nome aluno 3: ')
aluno4 = input('Nome aluno 4: ')
sorteado = random.choice([aluno1, aluno2, aluno3, aluno4])
print('O sorteado para apagar o quadro foi: {}'.format(sorteado))
| [
"random.choice"
] | [((159, 206), 'random.choice', 'random.choice', (['[aluno1, aluno2, aluno3, aluno4]'], {}), '([aluno1, aluno2, aluno3, aluno4])\n', (172, 206), False, 'import random\n')] |
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect, HttpResponse
from django.http import HttpResponse
from django.urls import reverse
from django.views.generic import ListView,DetailView
from .models import Poll, Choice
class IndexView(ListView):
context_object_name... | [
"django.shortcuts.render",
"django.shortcuts.get_object_or_404",
"django.urls.reverse"
] | [((633, 672), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Poll'], {'pk': 'question_id'}), '(Poll, pk=question_id)\n', (650, 672), False, 'from django.shortcuts import get_object_or_404, render\n'), ((864, 976), 'django.shortcuts.render', 'render', (['request', '"""polls/detail.html"""', '{\'question\'... |
import os
import unittest
import invirtualenv.contextmanager
class TestContextmanager(unittest.TestCase):
def test__revert_file(self):
with invirtualenv.contextmanager.InTemporaryDirectory():
with open('testfile', 'w') as fh:
fh.write('original')
self.assertEqual('o... | [
"os.path.exists"
] | [((839, 862), 'os.path.exists', 'os.path.exists', (['tempdir'], {}), '(tempdir)\n', (853, 862), False, 'import os\n')] |