code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
from dagster import ModeDefinition, pipeline
from .database_resources import postgres_database, sqlite_database
from .solids_with_resources import generate_table_1, generate_table_2
@pipeline(
mode_defs=[
ModeDefinition("local_dev", resource_defs={"database": sqlite_database}),
ModeDefinition("pr... | [
"dagster.ModeDefinition"
] | [((220, 292), 'dagster.ModeDefinition', 'ModeDefinition', (['"""local_dev"""'], {'resource_defs': "{'database': sqlite_database}"}), "('local_dev', resource_defs={'database': sqlite_database})\n", (234, 292), False, 'from dagster import ModeDefinition, pipeline\n'), ((302, 371), 'dagster.ModeDefinition', 'ModeDefinitio... |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-15 12:13
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("apostello", "0006_userprofile_show_tour")]
operations = [
m... | [
"django.db.models.BooleanField"
] | [((391, 424), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (410, 424), False, 'from django.db import migrations, models\n')] |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 eminga
# Licensed under MIT License
import datetime, pytz, re, helper
def grab(channel, timespan):
tz = pytz.timezone("Europe/Berlin")
now = datetime.datetime.now(tz)
shows = []
a = 0
if now.time().hour < 7:
a = -1
for i in range(a, 14):
date = now + datetime.... | [
"pytz.timezone",
"helper.cleanup",
"datetime.datetime.min.replace",
"datetime.datetime.now",
"helper.cut",
"datetime.timedelta",
"helper.download",
"helper.split",
"re.search"
] | [((154, 184), 'pytz.timezone', 'pytz.timezone', (['"""Europe/Berlin"""'], {}), "('Europe/Berlin')\n", (167, 184), False, 'import datetime, pytz, re, helper\n'), ((192, 217), 'datetime.datetime.now', 'datetime.datetime.now', (['tz'], {}), '(tz)\n', (213, 217), False, 'import datetime, pytz, re, helper\n'), ((1877, 1897)... |
from genericpath import exists
import math
import numpy as np
import os
import re
from PIL import Image
import matplotlib.pyplot as plt
from matplotlib import cm
# append line to log file
def log(file, line, doPrint=True):
f = open(file, "a+")
f.wrtite(line + "\n")
f.close()
if doPrint:
print(l... | [
"numpy.copy",
"os.path.exists",
"PIL.Image.fromarray",
"numpy.mean",
"numpy.abs",
"os.makedirs",
"numpy.arange",
"math.pow",
"PIL.Image.new",
"matplotlib.cm.magma",
"numpy.asarray",
"numpy.max",
"matplotlib.pyplot.figure",
"numpy.min",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.sho... | [((460, 482), 'numpy.asarray', 'np.asarray', (['history_L1'], {}), '(history_L1)\n', (470, 482), True, 'import numpy as np\n'), ((496, 521), 'numpy.asarray', 'np.asarray', (['history_L1val'], {}), '(history_L1val)\n', (506, 521), True, 'import numpy as np\n'), ((527, 539), 'matplotlib.pyplot.figure', 'plt.figure', ([],... |
#!/usr/bin/env python
# Copyright (c) 2002-2005 ActiveState Corp.
# See LICENSE.txt for license details.
# Author:
# <NAME> (<EMAIL>)
# Home:
# http://trentm.com/projects/px/
"""Test p4lib.py's interface to 'p4 delete'."""
import os
import sys
import unittest
import types
import pprint
import testsupport
from p4... | [
"os.chdir",
"unittest.makeSuite",
"p4lib.P4",
"os.getcwd"
] | [((5507, 5541), 'unittest.makeSuite', 'unittest.makeSuite', (['DeleteTestCase'], {}), '(DeleteTestCase)\n', (5525, 5541), False, 'import unittest\n'), ((429, 433), 'p4lib.P4', 'P4', ([], {}), '()\n', (431, 433), False, 'from p4lib import P4, P4LibError\n'), ((448, 459), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (457,... |
from typing import Any, Generic, Protocol, Type, TypeVar
from loguru import logger
from sqlalchemy import select, func
from sqlalchemy.ext.asyncio import AsyncSession
class ModelBase(Protocol):
id: Any
def __init__(*args, **kwargs):
...
T = TypeVar("T")
ModelT = TypeVar("ModelT", bound=ModelBase)... | [
"sqlalchemy.func.count",
"sqlalchemy.select",
"loguru.logger.debug",
"typing.TypeVar"
] | [((263, 275), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (270, 275), False, 'from typing import Any, Generic, Protocol, Type, TypeVar\n'), ((286, 320), 'typing.TypeVar', 'TypeVar', (['"""ModelT"""'], {'bound': 'ModelBase'}), "('ModelT', bound=ModelBase)\n", (293, 320), False, 'from typing import Any, Ge... |
from __future__ import annotations
from .configs import *
from . import shared as td
import hashlib
# if TYPE_CHECKING:
# from ..opentele import *
class AuthKeyType(IntEnum):
"""
Type of `AuthKey`
### Attributes:
Generated (`IntEnum`):
Generated key
Temporary (`IntEnum`... | [
"hashlib.sha1"
] | [((1662, 1686), 'hashlib.sha1', 'hashlib.sha1', (['self.__key'], {}), '(self.__key)\n', (1674, 1686), False, 'import hashlib\n'), ((1914, 1962), 'hashlib.sha1', 'hashlib.sha1', (['(msgKey[:16] + self.__key[x:x + 32])'], {}), '(msgKey[:16] + self.__key[x:x + 32])\n', (1926, 1962), False, 'import hashlib\n'), ((1992, 208... |
import os
from pandas import DataFrame
import time
times = [0]
sizes = [0]
run_time_seconds = 200
def run_storage_analysis():
path = 'temporary'
# initialize the size\
total_size = 0
# use the walk() method to navigate through directory tree
for dirpath, dirnames, filenames in os.wa... | [
"os.path.getsize",
"os.path.join",
"time.sleep",
"pandas.DataFrame",
"os.walk"
] | [((315, 328), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (322, 328), False, 'import os\n'), ((846, 895), 'pandas.DataFrame', 'DataFrame', (["{'Time': times, 'Size (bytes)': sizes}"], {}), "({'Time': times, 'Size (bytes)': sizes})\n", (855, 895), False, 'from pandas import DataFrame\n'), ((1199, 1212), 'time.slee... |
"""
Test multilevel overriding of java methods in jythonc.
"""
from java.util import Date
class SubDate(Date):
def toString(self):
s = Date.toString(self)
return 'SubDate -> Date'
class SubSubDate(SubDate):
def toString(self):
return 'SubSubDate -> ' + SubDate.toString(self)
asser... | [
"java.util.Date.toString"
] | [((150, 169), 'java.util.Date.toString', 'Date.toString', (['self'], {}), '(self)\n', (163, 169), False, 'from java.util import Date\n')] |
# -*- coding: utf-8 -*-
"""
Created on 2017-8-24
@author: cheng.li
"""
import bisect
import datetime as dt
from typing import Iterable
from typing import Union
import numpy as np
import pandas as pd
from simpleutils.asserts import require
from PyFin.DateUtilities import Period
from PyFin.api import BizDayConventions... | [
"alphamind.data.processing.factor_processing",
"numpy.unique",
"pandas.DataFrame",
"datetime.datetime.strptime",
"pandas.DatetimeIndex",
"pandas.merge",
"PyFin.api.makeSchedule",
"PyFin.DateUtilities.Period",
"bisect.bisect_right",
"alphamind.utilities.alpha_logger.info",
"PyFin.api.advanceDateB... | [((1853, 1874), 'numpy.unique', 'np.unique', (['date_label'], {}), '(date_label)\n', (1862, 1874), True, 'import numpy as np\n'), ((2571, 2728), 'PyFin.api.makeSchedule', 'makeSchedule', (['start_date', 'end_date', 'frequency'], {'calendar': '"""china.sse"""', 'dateRule': 'BizDayConventions.Following', 'dateGenerationR... |
import logging
import sys
from AppleFluenza.bot import auto_load_cogs, bot
from utils.getenv import getenv
from utils.cli import header, option_parser
if __name__ == "__main__":
header()
auto_load_cogs(bot)
optparser = option_parser()
(options, args) = optparser.parse_args(sys.argv)
token = g... | [
"logging.getLogger",
"utils.getenv.getenv",
"utils.cli.option_parser",
"AppleFluenza.bot.auto_load_cogs",
"AppleFluenza.bot.bot.run",
"AppleFluenza.bot.bot.logger.info",
"utils.cli.header"
] | [((186, 194), 'utils.cli.header', 'header', ([], {}), '()\n', (192, 194), False, 'from utils.cli import header, option_parser\n'), ((200, 219), 'AppleFluenza.bot.auto_load_cogs', 'auto_load_cogs', (['bot'], {}), '(bot)\n', (214, 219), False, 'from AppleFluenza.bot import auto_load_cogs, bot\n'), ((237, 252), 'utils.cli... |
# %% Packages
import os
import pickle
from pyhocon import ConfigTree
# %% Functions
def load_pickle(loading_path: str):
"""This method loads the file at the specified path
:param loading_path: Path at which object is saved
:type loading_path: str
:return: Desired file
:rtype: Could be basically... | [
"os.listdir",
"pickle.load"
] | [((397, 414), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (408, 414), False, 'import pickle\n'), ((1449, 1471), 'os.listdir', 'os.listdir', (['image_path'], {}), '(image_path)\n', (1459, 1471), False, 'import os\n')] |
import logging
import blueforge.apis.telegram as tg
import requests
from blueforge.apis.facebook import Message, ImageAttachment, QuickReply, QuickReplyTextItem, TemplateAttachment, \
GenericTemplate, Element, PostBackButton
from chatbrick.util import get_items_from_xml, UNKNOWN_ERROR_MSG
import time
logger = lo... | [
"logging.getLogger",
"blueforge.apis.telegram.CallbackButton",
"blueforge.apis.facebook.Message",
"blueforge.apis.telegram.SendPhoto",
"blueforge.apis.facebook.PostBackButton",
"chatbrick.util.get_items_from_xml",
"requests.get",
"blueforge.apis.facebook.QuickReplyTextItem",
"blueforge.apis.telegram... | [((318, 345), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (335, 345), False, 'import logging\n'), ((4882, 5220), 'requests.get', 'requests.get', ([], {'url': "('http://openapi.airport.kr/openapi/service/StatusOfDepartures/getDeparturesCongestion?serviceKey=%s&terno=%s'\n % (input_d... |
from django.db import models
from django.contrib.auth.models import User
class Flight(models.Model):
STATUSES = (
('SCHEDULED', 'SCHEDULED'),
('DELAYED', 'DELAYED'),
('ON_TIME', 'ON TIME'),
('ARRIVED', 'ARRIVED'),
('LATE', 'LATE')
)
number = models.CharField(max_len... | [
"django.db.models.DateTimeField",
"django.db.models.CharField",
"django.db.models.BooleanField",
"django.db.models.ForeignKey"
] | [((296, 327), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (312, 327), False, 'from django.db import models\n'), ((349, 371), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (369, 371), False, 'from django.db import models\n'), ((391, 4... |
import geopandas as gpd
# not used anymore - converts esri jobs shapefile to a csv
# see assign_jobs_lat_lng.py
gdf = gpd.GeoDataFrame.from_file("est10_esri_gt1.shp")
gdf = gdf.to_crs(epsg=4326)
fname_map = {
'Duns_Numbe': 'duns_number',
'Business_N': 'business_name',
'Emp_Total': 'total_employment',
... | [
"geopandas.GeoDataFrame.from_file"
] | [((120, 168), 'geopandas.GeoDataFrame.from_file', 'gpd.GeoDataFrame.from_file', (['"""est10_esri_gt1.shp"""'], {}), "('est10_esri_gt1.shp')\n", (146, 168), True, 'import geopandas as gpd\n')] |
import argparse
import random
import operator
import os
def parse_grammar(file_path):
"""
Generate a grammar from a file describing the production rules.
Note that the symbols are inferred from the production rules.
For more information on the format of the file, please reffer to
the README.md or the the sample ... | [
"operator.itemgetter",
"random.choice",
"argparse.ArgumentParser"
] | [((3060, 3112), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Grammar utils"""'}), "(description='Grammar utils')\n", (3083, 3112), False, 'import argparse\n'), ((2052, 2074), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (2071, 2074), False, 'import operator\n')... |
#TODO: use only one (RGB) channel
import numpy as np
import pandas as pd
import os
from torch.utils import data
from torch.utils.data.dataloader import DataLoader as DataLoader
import torch
from torchvision import transforms
from natsort import natsorted, ns
import cv2
from PIL import Image
import matplotlib.pyplot as ... | [
"torch.nn.MSELoss",
"torch.cuda.is_available",
"torch.nn.init.xavier_uniform_",
"torch.utils.data.dataloader.DataLoader",
"torchvision.transforms.ToTensor",
"torch.utils.data.random_split",
"numpy.floor",
"torch.transpose",
"torch.is_tensor",
"torchvision.transforms.Resize",
"torch.nn.BCEWithLog... | [((647, 672), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (670, 672), False, 'import torch\n'), ((5829, 5894), 'torch.utils.data.random_split', 'torch.utils.data.random_split', (['dataset', '[train_split, test_split]'], {}), '(dataset, [train_split, test_split])\n', (5858, 5894), False, 'imp... |
# imports - module imports
from deeply.exception import (
DeeplyError
)
# imports - test imports
import pytest
def test_deeply_error():
with pytest.raises(DeeplyError):
raise DeeplyError | [
"pytest.raises"
] | [((151, 177), 'pytest.raises', 'pytest.raises', (['DeeplyError'], {}), '(DeeplyError)\n', (164, 177), False, 'import pytest\n')] |
""" Common setup and patching for tests """
#pylint: disable=wrong-import-order
from datetime import datetime as orig_datetime, timedelta
from mock import patch
import threading
#pylint: disable=W0401,W0614
from test.fixtures import *
_thread_state = threading.local()
def _new_utcnow():
""" Return last set datet... | [
"threading.local",
"mock.patch",
"datetime.datetime.utcnow",
"datetime.datetime.now",
"datetime.timedelta"
] | [((253, 270), 'threading.local', 'threading.local', ([], {}), '()\n', (268, 270), False, 'import threading\n'), ((1088, 1125), 'mock.patch', 'patch', (['"""datetime.datetime"""'], {}), "('datetime.datetime', **_config)\n", (1093, 1125), False, 'from mock import patch\n'), ((774, 785), 'datetime.timedelta', 'timedelta',... |
import re
import numpy as np
import pandas as pd
import scipy.stats as stats
R_REGEX = re.compile('(.*):(.*)-(.*)')
R_REGEX_STRAND = re.compile('(.*):(.*)-(.*):(.*)')
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
# https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-even... | [
"StringIO.StringIO",
"pandas.Series",
"zlib.decompressobj",
"re.compile",
"scipy.stats.norm.ppf",
"urllib2.Request",
"numpy.array",
"scipy.stats.beta",
"pandas.DataFrame",
"urllib2.build_opener"
] | [((89, 117), 're.compile', 're.compile', (['"""(.*):(.*)-(.*)"""'], {}), "('(.*):(.*)-(.*)')\n", (99, 117), False, 'import re\n'), ((135, 168), 're.compile', 're.compile', (['"""(.*):(.*)-(.*):(.*)"""'], {}), "('(.*):(.*)-(.*):(.*)')\n", (145, 168), False, 'import re\n'), ((2426, 2448), 'urllib2.build_opener', 'urllib2... |
import os
import sys
import argparse
import difflib
import json
import logging
import pluggy
import colorama
import boto3
from pathlib import Path
from . lib.autoawsume import create_autoawsume_profile
from ..autoawsume.process import kill, kill_autoawsume
from . lib.profile import aggregate_profiles, get_role_chain, ... | [
"argparse.RawDescriptionHelpFormatter",
"json.loads",
"pathlib.Path",
"pluggy.PluginManager",
"difflib.get_close_matches",
"colorama.init"
] | [((1085, 1114), 'colorama.init', 'colorama.init', ([], {'autoreset': '(True)'}), '(autoreset=True)\n', (1098, 1114), False, 'import colorama\n'), ((1236, 1266), 'pluggy.PluginManager', 'pluggy.PluginManager', (['"""awsume"""'], {}), "('awsume')\n", (1256, 1266), False, 'import pluggy\n'), ((7989, 8010), 'json.loads', '... |
# Generated by Django 3.0.5 on 2020-04-28 15:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("meadow", "0006_mmake_isbn_charfield"),
]
operations = [
migrations.AddField(model_name="book", name="is_approved", field=models.BooleanField... | [
"django.db.models.BooleanField"
] | [((301, 335), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (320, 335), False, 'from django.db import migrations, models\n')] |
import cv2 as cv
"""
Choose background substractor
"""
algo = 'MOG2'
input = 'videos/shine.mp4'
if algo == 'MOG2':
backSub = cv.createBackgroundSubtractorMOG2()
else:
backSub = cv.createBackgroundSubtractorKNN()
capture = cv.VideoCapture(input)
if not capture.isOpened():
print('Unable to open: ' + inpu... | [
"cv2.createBackgroundSubtractorMOG2",
"cv2.rectangle",
"cv2.imshow",
"cv2.VideoCapture",
"cv2.createBackgroundSubtractorKNN",
"cv2.waitKey"
] | [((235, 257), 'cv2.VideoCapture', 'cv.VideoCapture', (['input'], {}), '(input)\n', (250, 257), True, 'import cv2 as cv\n'), ((132, 167), 'cv2.createBackgroundSubtractorMOG2', 'cv.createBackgroundSubtractorMOG2', ([], {}), '()\n', (165, 167), True, 'import cv2 as cv\n'), ((188, 222), 'cv2.createBackgroundSubtractorKNN',... |
"Common functions that may be used everywhere"
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import os
import sys
from distutils.util import strtobool
try:
input = raw_input
except NameError:
pass
def yes_no_query(question):
"""Ask the user... | [
"os.path.exists",
"sys.exit",
"os.remove"
] | [((1004, 1024), 'os.path.exists', 'os.path.exists', (['dest'], {}), '(dest)\n', (1018, 1024), False, 'import os\n'), ((1068, 1083), 'os.remove', 'os.remove', (['dest'], {}), '(dest)\n', (1077, 1083), False, 'import os\n'), ((1110, 1145), 'sys.exit', 'sys.exit', (['"""Cancelling operation..."""'], {}), "('Cancelling ope... |
import inspect
from typing import Union, Callable, Any, Iterable
from pytest_lambda.exceptions import DisabledFixtureError, NotImplementedFixtureError
from pytest_lambda.impl import LambdaFixture
__all__ = ['lambda_fixture', 'static_fixture', 'error_fixture',
'disabled_fixture', 'not_implemented_fixture']
... | [
"pytest_lambda.exceptions.NotImplementedFixtureError",
"pytest_lambda.impl.LambdaFixture",
"inspect.signature",
"pytest_lambda.exceptions.DisabledFixtureError"
] | [((1430, 1549), 'pytest_lambda.impl.LambdaFixture', 'LambdaFixture', (['fixture_names_or_lambda'], {'bind': 'bind', 'scope': 'scope', 'params': 'params', 'autouse': 'autouse', 'ids': 'ids', 'name': 'name'}), '(fixture_names_or_lambda, bind=bind, scope=scope, params=\n params, autouse=autouse, ids=ids, name=name)\n',... |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import hashlib
import sys
from datetime import datetime
import sentry_sdk
from authlib.oauth2 import OAut... | [
"flask.render_template",
"indico.web.flask.templating.get_template_module",
"indico.util.i18n._",
"indico.core.oauth.require_oauth.raise_error_response",
"sys.exc_info",
"werkzeug.exceptions.BadRequest",
"flask.session.clear",
"flask.request.headers.get",
"flask.jsonify",
"flask.g.get",
"werkzeu... | [((3012, 3051), 'indico.web.flask.templating.get_template_module', 'get_template_module', (['"""forms/_form.html"""'], {}), "('forms/_form.html')\n", (3031, 3051), False, 'from indico.web.flask.templating import get_template_module\n'), ((4433, 4453), 'flask.jsonify', 'jsonify', ([], {}), '(**json_data)\n', (4440, 4453... |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Created on Mon Aug 17 11:31:32 2020
Distance-Controlled Boundaries Coefficient (DCBC) evaluation
for a functional parcellation of brain cortex
INPUTS:
sn: The return subject number
hems: Hemisphere to test. 'L' - left hemisphere; 'R'... | [
"numpy.abs",
"numpy.sqrt",
"numpy.reshape",
"nibabel.load",
"numpy.where",
"numpy.delete",
"scipy.io.loadmat",
"numpy.floor",
"numpy.square",
"numpy.append",
"numpy.sum",
"numpy.zeros",
"numpy.count_nonzero",
"numpy.nanmean",
"pandas.read_table",
"scipy.sparse.find"
] | [((1943, 2009), 'pandas.read_table', 'pd.read_table', (['"""DCBC/sc1_sc2_taskConds.txt"""'], {'delim_whitespace': '(True)'}), "('DCBC/sc1_sc2_taskConds.txt', delim_whitespace=True)\n", (1956, 2009), True, 'import pandas as pd\n'), ((2028, 2056), 'numpy.floor', 'np.floor', (['(maxDist / binWidth)'], {}), '(maxDist / bin... |
# coding: utf-8
"""
Container Security API
# Authentication You must authenticate to the Qualys Cloud Platform using Qualys account credentials (user name and password) and get the JSON Web Token (JWT) before you can start using the Container Security APIs. Use the Qualys Authentication API to get the JWT. *... | [
"six.iteritems"
] | [((4957, 4990), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (4970, 4990), False, 'import six\n')] |
import torch.nn as nn
import torch.nn.functional as F
from torchvision.transforms import functional
import numpy as np
class Rotate(nn.Module):
"""
Rotate the image by random angle between -degrees and degrees.
"""
def __init__(self, degrees, interpolation_method='nearest'):
super(Rotate, self... | [
"torchvision.transforms.functional.rotate",
"numpy.random.uniform"
] | [((488, 534), 'numpy.random.uniform', 'np.random.uniform', (['(-self.degrees)', 'self.degrees'], {}), '(-self.degrees, self.degrees)\n', (505, 534), True, 'import numpy as np\n'), ((608, 655), 'torchvision.transforms.functional.rotate', 'functional.rotate', (['noised_image', 'rotation_angle'], {}), '(noised_image, rota... |
import random
import string
def random_string_digits(string_length=10):
"""Generate a random string of letters and digits."""
letters_and_digits = string.ascii_letters + string.digits
return ''.join(random.choice(letters_and_digits) for _ in range(string_length))
| [
"random.choice"
] | [((213, 246), 'random.choice', 'random.choice', (['letters_and_digits'], {}), '(letters_and_digits)\n', (226, 246), False, 'import random\n')] |
#!/usr/bin/env python3
import sys
import json
import time
import subprocess
cats = {
"any": { "id": "vdoq4xvk", "output_file": "all.json", "output_file2": "all2.json", },
"100": { "id": "xk9jv4gd", "output_file": "100.json", "output_file2": "1002.json", },
'amq': { "id": "n2yj3r82", "output_file": "amq.j... | [
"json.loads",
"subprocess.run",
"time.sleep",
"sys.exit",
"json.dump"
] | [((941, 991), 'subprocess.run', 'subprocess.run', (["['curl', URL]"], {'capture_output': '(True)'}), "(['curl', URL], capture_output=True)\n", (955, 991), False, 'import subprocess\n'), ((1005, 1025), 'json.loads', 'json.loads', (['p.stdout'], {}), '(p.stdout)\n', (1015, 1025), False, 'import json\n'), ((1595, 1608), '... |
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright (C) 2006 Fluendo, S.A. (www.fluendo.com).
# Copyright 2006, <NAME> <<EMAIL>>
# Copyright 2018, <NAME> <<EMAIL>>
'''
Devices
=======
This module contains two classes describing UPnP devices.
:class:`Device`
---------------
... | [
"traceback.format_exc",
"eventdispatcher.EventDispatcher.__init__",
"eventdispatcher.Property",
"coherence.log.LogAble.__init__",
"coherence.upnp.core.utils.getPage",
"twisted.internet.defer.DeferredList",
"urllib.parse.urljoin",
"lxml.etree.fromstring",
"time.time",
"eventdispatcher.ListProperty"... | [((1665, 1679), 'eventdispatcher.Property', 'Property', (['None'], {}), '(None)\n', (1673, 1679), False, 'from eventdispatcher import EventDispatcher, Property, ListProperty\n'), ((2303, 2319), 'eventdispatcher.ListProperty', 'ListProperty', (['[]'], {}), '([])\n', (2315, 2319), False, 'from eventdispatcher import Even... |
"""
DO NOT MODIFY
Dataloder for parts 2 and 3
We will also call this file when loading test data
"""
import os
import glob
import io
from torchtext import data
class IMDB(data.Dataset):
name = 'imdb'
dirname = 'aclImdb'
def __init__(self, path, text_field, label_field, **kwargs):
fields = [('text... | [
"torchtext.data.Example.fromlist",
"os.path.join",
"io.open"
] | [((456, 490), 'os.path.join', 'os.path.join', (['path', 'label', '"""*.txt"""'], {}), "(path, label, '*.txt')\n", (468, 490), False, 'import os\n'), ((514, 551), 'io.open', 'io.open', (['fname', '"""r"""'], {'encoding': '"""utf-8"""'}), "(fname, 'r', encoding='utf-8')\n", (521, 551), False, 'import io\n'), ((630, 674),... |
#!/usr/bin/env python3
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless requir... | [
"os.path.isdir",
"fontTools.ttLib.TTFont",
"os.path.basename"
] | [((1471, 1490), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1484, 1490), False, 'import os\n'), ((1590, 1607), 'fontTools.ttLib.TTFont', 'TTFont', (['font_file'], {}), '(font_file)\n', (1596, 1607), False, 'from fontTools.ttLib import TTFont\n'), ((1756, 1783), 'os.path.basename', 'os.path.basename',... |
import io
from banner import print_banner
def test_print_banner(monkeypatch) -> None:
horizontal = "1"
vertical = "1"
centered = "1"
char = "*"
statement = "O" # only capital letters
set_page = "2"
monkeypatch.setattr(
"sys.stdin",
io.StringIO(
f"{horizontal}\... | [
"banner.print_banner",
"io.StringIO"
] | [((399, 413), 'banner.print_banner', 'print_banner', ([], {}), '()\n', (411, 413), False, 'from banner import print_banner\n'), ((280, 369), 'io.StringIO', 'io.StringIO', (['f"""{horizontal}\n{vertical}\n{centered}\n{char}\n{statement}\n{set_page}"""'], {}), '(\n f"""{horizontal}\n{vertical}\n{centered}\n{char}\n{st... |
#
# Copyright 2019 BrainPad Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, me... | [
"os.path.join",
"os.getcwd"
] | [((843, 854), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (852, 854), False, 'import os\n'), ((923, 956), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""project"""'], {}), "(BASE_DIR, 'project')\n", (935, 956), False, 'import os\n'), ((1032, 1064), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""common"""'], {}), ... |
import copy
import os
import time
from collections import OrderedDict
from sklearn.model_selection import train_test_split
from torchvision import models
import torch
from torch.utils.tensorboard import SummaryWriter
import pandas as pd
from skimage.io import imread
from self_driving_ai.utils import *
"""
Credit: h... | [
"torch.utils.tensorboard.SummaryWriter",
"collections.OrderedDict",
"os.listdir",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"os.path.join",
"torch.utils.data.SubsetRandomSampler",
"torch.nn.MSELoss",
"skimage.io.imread",
"torch.cuda.is_available",
"torch.nn.Linear",
"torch... | [((1923, 1967), 'torch.device', 'torch.device', (["('cuda:0' if USE_GPU else 'cpu')"], {}), "('cuda:0' if USE_GPU else 'cpu')\n", (1935, 1967), False, 'import torch\n'), ((1981, 2018), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['"""runs/self_driving_ai"""'], {}), "('runs/self_driving_ai')\n", (1994, 20... |
#!/usr/bin/env python
# -------- BEGIN LICENSE BLOCK --------
# Copyright 2022 FZI Forschungszentrum Informatik
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the ab... | [
"ros_bt_py.nodes.topic.TopicPublisher",
"rostest.rosrun",
"rospy.init_node",
"threading.Lock",
"std_msgs.msg.Int32",
"rospy.wait_for_message",
"rospy.sleep",
"rospy.Subscriber"
] | [((3967, 4009), 'rospy.init_node', 'rospy.init_node', (['"""test_topic_publish_leaf"""'], {}), "('test_topic_publish_leaf')\n", (3982, 4009), False, 'import rospy\n'), ((4148, 4252), 'rostest.rosrun', 'rostest.rosrun', (['PKG', '"""test_topic_publish_leaf"""', 'TestTopicPublisherLeaf'], {'sysargs': "(sys.argv + ['--cov... |
# -*- coding: utf-8 -*-
import functools
import click
import tensorflow as tf
from tensorflow.contrib.framework import arg_scope, add_arg_scope
from tfsnippet.bayes import BayesianNet
from tfsnippet.distributions import Normal, Bernoulli
from tfsnippet.examples.datasets import load_mnist, bernoulli_flow
from tfsnippe... | [
"tfsnippet.examples.datasets.load_mnist",
"tfsnippet.bayes.BayesianNet",
"tfsnippet.examples.nn.dense",
"click.option",
"tensorflow.placeholder",
"tensorflow.trainable_variables",
"tensorflow.train.AdamOptimizer",
"click.command",
"tfsnippet.utils.flatten",
"tensorflow.zeros",
"tfsnippet.utils.c... | [((2935, 2950), 'click.command', 'click.command', ([], {}), '()\n', (2948, 2950), False, 'import click\n'), ((2952, 3056), 'click.option', 'click.option', (['"""--result-dir"""'], {'help': '"""The result directory."""', 'metavar': '"""PATH"""', 'required': '(False)', 'type': 'str'}), "('--result-dir', help='The result ... |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import time
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import copy
# In[22]:
# helps from: https://www.geeksforgeeks.org/merge-sort/
def RecursiveMergeSort(input_array, is_first = True):
time_start = time.time(... | [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"numpy.savetxt",
"copy.deepcopy",
"matplotlib.pyplot.title",
"numpy.loadtxt",
"time.time",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((3620, 3656), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data0.1024"""', 'int'], {}), "('./data/data0.1024', int)\n", (3630, 3656), True, 'import numpy as np\n'), ((3672, 3708), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data0.2048"""', 'int'], {}), "('./data/data0.2048', int)\n", (3682, 3708), True, 'import numpy ... |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 8 19:47:52 2019
@author: Zhou
"""
import torch
from Utils import load
from Data import load_data
from Modules import BasicDecoder, RNNEncoder
from Models import Model, MetaTranslator
from Train import MetaTrainer
import warnings
warnings.filterwarnings("igno... | [
"Modules.BasicDecoder",
"torch.load",
"Models.MetaTranslator",
"Utils.batch_bleu",
"torch.cuda.is_available",
"Utils.batch_rouge",
"Utils.batch_meteor",
"Train.MetaTrainer",
"Modules.RNNEncoder",
"Utils.load",
"Data.load_data",
"warnings.filterwarnings",
"Models.Model"
] | [((291, 324), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (314, 324), False, 'import warnings\n'), ((345, 370), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (368, 370), False, 'import torch\n'), ((570, 680), 'Data.load_data', 'load_data', (['"... |
# Generated by Django 4.0 on 2021-12-13 17:54
from django.db import migrations
_CAR_GOODS = 'Автотовары'
_APPLIANCES = 'Бытовая техника'
def _create_categories(apps, schema_editor) -> None:
"""Создает две категории"""
# noinspection PyPep8Naming
Category = apps.get_model('shop', 'Category')
Catego... | [
"django.db.migrations.RunPython"
] | [((1091, 1181), 'django.db.migrations.RunPython', 'migrations.RunPython', ([], {'code': '_create_categories', 'reverse_code': 'migrations.RunPython.noop'}), '(code=_create_categories, reverse_code=migrations.\n RunPython.noop)\n', (1111, 1181), False, 'from django.db import migrations\n'), ((1221, 1309), 'django.db.... |
from ibapi.client import EClient
from ibapi.wrapper import EWrapper
from ibapi.contract import Contract
from ibapi.order import Order
from ibapi.scanner import ScannerSubscription
from ibapi.ticktype import TickTypeEnum
from ibapi.common import *
from ibapi.tag_value import TagValue
from ibapi.execution import Executio... | [
"numpy.abs",
"ibapi.client.EClient.__init__",
"time.sleep",
"ibapi.tag_value.TagValue",
"ibapi.contract.Contract",
"ibapi.order.Order",
"pandas.DataFrame",
"ibapi.execution.ExecutionFilter",
"pandas.to_datetime"
] | [((1277, 1293), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (1282, 1293), False, 'from time import sleep, strftime, localtime, time\n'), ((1517, 1527), 'ibapi.contract.Contract', 'Contract', ([], {}), '()\n', (1525, 1527), False, 'from ibapi.contract import Contract\n'), ((1725, 1732), 'ibapi.order.Ord... |
import argparse
import os, socket
from datetime import datetime
import shutil
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from model import UNet
from warp import WarpingLayerBWFlow
from torch.utils.tensorboard import SummaryWriter
from dataloader import llenDataset
from torch.utils.... | [
"torch.utils.tensorboard.SummaryWriter",
"os.makedirs",
"argparse.ArgumentParser",
"torch.nn.L1Loss",
"os.path.join",
"dataloader.llenDataset",
"os.getcwd",
"datetime.datetime.now",
"warp.WarpingLayerBWFlow",
"model.UNet",
"torch.save",
"torch.utils.data.DataLoader",
"socket.gethostname"
] | [((602, 662), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Low light enhancement"""'}), "(description='Low light enhancement')\n", (625, 662), False, 'import argparse\n'), ((1553, 1594), 'dataloader.llenDataset', 'llenDataset', (['args.data_path'], {'type': '"""train"""'}), "(args.data... |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 30 21:25:24 2015
@author: Konrad
"""
import copy
import numpy as np
import matplotlib.pyplot as plt
import scipy.special as sc_p
def gen_clusters(means, num_each):
tup = ();
for m in means:
tup = tup + (np.random.multivariate_normal(m, np.... | [
"numpy.sqrt",
"numpy.ones",
"matplotlib.pyplot.show",
"scipy.special.gamma",
"numpy.concatenate",
"copy.deepcopy",
"matplotlib.pyplot.subplots",
"numpy.random.shuffle"
] | [((10939, 10949), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10947, 10949), True, 'import matplotlib.pyplot as plt\n'), ((362, 381), 'numpy.concatenate', 'np.concatenate', (['tup'], {}), '(tup)\n', (376, 381), True, 'import numpy as np\n'), ((388, 411), 'numpy.random.shuffle', 'np.random.shuffle', (['data... |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - <NAME> (<EMAIL>)
# - <NAME> (<EMAIL>)
from mpi4py import MPI
from pandayoda.yodaco... | [
"pandayoda.yodacore.Interaction.Receiver",
"pandayoda.yodacore.Interaction.Requester"
] | [((419, 441), 'pandayoda.yodacore.Interaction.Receiver', 'Interaction.Receiver', ([], {}), '()\n', (439, 441), False, 'from pandayoda.yodacore import Interaction\n'), ((706, 729), 'pandayoda.yodacore.Interaction.Requester', 'Interaction.Requester', ([], {}), '()\n', (727, 729), False, 'from pandayoda.yodacore import In... |
import re
from collections import OrderedDict
import struct
import os
import decoder748
REG_EXP = re.compile(r'^\s*<([0-9a-f]+)>\s+<([0-9a-f]+)>\s+(\d+)$', re.M)
class CMap:
MAP_STRING = ''
def __init__(self):
self.codePoints = set()
self.cid2unicode = {}
self._feed()
def _feed... | [
"re.findall",
"decoder748.encoding",
"struct.pack",
"re.compile"
] | [((100, 166), 're.compile', 're.compile', (['"""^\\\\s*<([0-9a-f]+)>\\\\s+<([0-9a-f]+)>\\\\s+(\\\\d+)$"""', 're.M'], {}), "('^\\\\s*<([0-9a-f]+)>\\\\s+<([0-9a-f]+)>\\\\s+(\\\\d+)$', re.M)\n", (110, 166), False, 'import re\n'), ((356, 392), 're.findall', 're.findall', (['REG_EXP', 'self.MAP_STRING'], {}), '(REG_EXP, sel... |
import os
import json
import shutil
import numpy as np
from typing import Any
from typing import Dict
from typing import List
from typing import Type
from typing import Tuple
from typing import Union
from typing import Callable
from typing import Optional
from typing import NamedTuple
from tqdm.autonotebook import tq... | [
"os.listdir",
"os.path.join",
"cftool.ml.utils.Estimator",
"cftool.ml.utils.Comparer",
"cftool.misc.shallow_copy_dict",
"os.path.isdir",
"cftool.dist.Parallel",
"tqdm.autonotebook.tqdm",
"shutil.rmtree",
"json.load",
"cftool.misc.update_dict"
] | [((5137, 5167), 'cftool.ml.utils.Comparer', 'Comparer', (['patterns', 'estimators'], {}), '(patterns, estimators)\n', (5145, 5167), False, 'from cftool.ml.utils import Comparer\n'), ((5423, 5469), 'os.path.join', 'os.path.join', (['workplace', 'ML_PIPELINE_SAVE_NAME'], {}), '(workplace, ML_PIPELINE_SAVE_NAME)\n', (5435... |
"""
one agent chooses an action, says it. other agent does it. both get a point if right
this file was forked from mll/discrete_bottleneck_discrete_input.py
"""
import torch
import torch.nn.functional as F
from torch import nn, optim
# from envs.world3c import World
from ulfs import alive_sieve, rl_common
from ulfs.s... | [
"ulfs.stochastic_trajectory.StochasticTrajectory",
"ulfs.stats.Stats",
"torch.LongTensor",
"ulfs.lexicon_recorder.LexiconRecorder",
"ulfs.alive_sieve.AliveSieve",
"ulfs.rl_common.draw_categorical_sample",
"torch.nn.Linear",
"torch.zeros",
"torch.nn.functional.softmax",
"torch.nn.Embedding",
"tor... | [((10652, 10661), 'ulfs.stats.Stats', 'Stats', (['[]'], {}), '([])\n', (10657, 10661), False, 'from ulfs.stats import Stats\n'), ((4997, 5068), 'ulfs.rl_common.draw_categorical_sample', 'rl_common.draw_categorical_sample', ([], {'action_probs': 'probs', 'batch_idxes': 'None'}), '(action_probs=probs, batch_idxes=None)\n... |
#
# Copyright 2020 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... | [
"random.uniform",
"random.choice",
"numpy.minimum",
"numpy.maximum",
"random.randint",
"numpy.round"
] | [((15258, 15281), 'numpy.minimum', 'np.minimum', (['(1.0)', 'scalar'], {}), '(1.0, scalar)\n', (15268, 15281), True, 'import numpy as np\n'), ((15299, 15322), 'numpy.maximum', 'np.maximum', (['(0.0)', 'scalar'], {}), '(0.0, scalar)\n', (15309, 15322), True, 'import numpy as np\n'), ((16816, 16827), 'numpy.round', 'np.r... |
import numpy as np
from sklearn.kernel_ridge import KernelRidge
from sklearn.model_selection import GridSearchCV
from sklearn.gaussian_process import GaussianProcessRegressor
import sklearn.gaussian_process.kernels as Kernels
from scipy.optimize import minimize
from numpy.linalg import norm
import tensorflow as tf
fr... | [
"scipy.optimize.minimize",
"numpy.array",
"numpy.random.randint",
"numpy.zeros",
"sklearn.kernel_ridge.KernelRidge",
"numpy.logspace"
] | [((479, 513), 'numpy.array', 'np.array', (['[[1, 2], [2, 3], [3, 4]]'], {}), '([[1, 2], [2, 3], [3, 4]])\n', (487, 513), True, 'import numpy as np\n'), ((513, 538), 'numpy.array', 'np.array', (['[[1], [2], [3]]'], {}), '([[1], [2], [3]])\n', (521, 538), True, 'import numpy as np\n'), ((2240, 2263), 'numpy.zeros', 'np.z... |
from functools import partial
from .landmark import asf_importer, pts_importer
asf_image_importer = partial(asf_importer, image_origin=True)
asf_image_importer.__doc__ = asf_importer.__doc__
pts_image_importer = partial(pts_importer, image_origin=True)
pts_image_importer.__doc__ = pts_importer.__doc__
| [
"functools.partial"
] | [((103, 143), 'functools.partial', 'partial', (['asf_importer'], {'image_origin': '(True)'}), '(asf_importer, image_origin=True)\n', (110, 143), False, 'from functools import partial\n'), ((216, 256), 'functools.partial', 'partial', (['pts_importer'], {'image_origin': '(True)'}), '(pts_importer, image_origin=True)\n', ... |
import os
# exemplo alterado de EX_10.5.py para 10_5.py
for nome in os.listdir('./Minicurso/Minicurso API'):
# alterar conforme sua necessidade de geração de nomes e layout de arquivos
os.rename("./Minicurso/Minicurso API/"+nome, "./Minicurso/Minicurso API/"+nome+"_Minicurso_API.png")
print("arquivo ... | [
"os.rename",
"os.listdir"
] | [((70, 109), 'os.listdir', 'os.listdir', (['"""./Minicurso/Minicurso API"""'], {}), "('./Minicurso/Minicurso API')\n", (80, 109), False, 'import os\n'), ((200, 310), 'os.rename', 'os.rename', (["('./Minicurso/Minicurso API/' + nome)", "('./Minicurso/Minicurso API/' + nome + '_Minicurso_API.png')"], {}), "('./Minicurso/... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
if sys.version_info < (3, 5):
raise RuntimeError("The minimum support Python 3.5")
from setuptools import find_packages
from setuptools import setup
from HTMLReport import __version__, __author__
try:
from pypandoc import convert
r... | [
"pypandoc.convert",
"setuptools.find_packages"
] | [((329, 356), 'pypandoc.convert', 'convert', (['"""README.md"""', '"""rst"""'], {}), "('README.md', 'rst')\n", (336, 356), False, 'from pypandoc import convert\n'), ((959, 974), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (972, 974), False, 'from setuptools import find_packages\n')] |
import os
import torch
import torch.nn as nn
import numpy as np
import pickle
class BaseModel(nn.Module):
def __init__(self):
super(BaseModel, self).__init__()
def name(self):
return 'BaseModel'
def initialize(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
... | [
"pickle.dump",
"os.path.exists",
"torch.load",
"os.path.join"
] | [((450, 493), 'os.path.join', 'os.path.join', (['opt.checkpoints_dir', 'opt.name'], {}), '(opt.checkpoints_dir, opt.name)\n', (462, 493), False, 'import os\n'), ((1203, 1245), 'os.path.join', 'os.path.join', (['self.save_dir', 'save_filename'], {}), '(self.save_dir, save_filename)\n', (1215, 1245), False, 'import os\n'... |
from JumpScale import j
def cb():
from .ms1 import MS1Factory
return MS1Factory()
j.base.loader.makeAvailable(j, 'tools')
j.tools._register('ms1', cb)
| [
"JumpScale.j.tools._register",
"JumpScale.j.base.loader.makeAvailable"
] | [((92, 131), 'JumpScale.j.base.loader.makeAvailable', 'j.base.loader.makeAvailable', (['j', '"""tools"""'], {}), "(j, 'tools')\n", (119, 131), False, 'from JumpScale import j\n'), ((132, 160), 'JumpScale.j.tools._register', 'j.tools._register', (['"""ms1"""', 'cb'], {}), "('ms1', cb)\n", (149, 160), False, 'from JumpSc... |
## temp utility
from __future__ import print_function
import frappe
from erpnext.utilities.activation import get_level
from frappe.utils import cstr
def update_doctypes():
for d in frappe.db.sql("""select df.parent, df.fieldname
from tabDocField df, tabDocType dt where df.fieldname
like "%description%" and df.par... | [
"frappe.db.get_single_value",
"erpnext.utilities.activation.get_level",
"frappe.db.sql",
"frappe.get_doc",
"frappe.utils.cstr"
] | [((183, 377), 'frappe.db.sql', 'frappe.db.sql', (['"""select df.parent, df.fieldname\n\t\tfrom tabDocField df, tabDocType dt where df.fieldname\n\t\tlike "%description%" and df.parent = dt.name and dt.istable = 1"""'], {'as_dict': '(1)'}), '(\n """select df.parent, df.fieldname\n\t\tfrom tabDocField df, tabDocType d... |
"""Simulate a Map Reduce Scenario where timeout prevention is required.
In this simulation we are using an Optimizer created for map reduce scenarios.
This improves the distribution of the computation no matter how the interest is formated.
Scenario consists of two NFN nodes and a Client. Goal of the simulation is to ... | [
"threading.Thread.__init__",
"time.sleep",
"PiCN.Packets.Name",
"os.mkdir",
"shutil.rmtree",
"os.stat",
"os.remove"
] | [((1371, 1402), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (1396, 1402), False, 'import threading\n'), ((5732, 5745), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (5742, 5745), False, 'import time\n'), ((9476, 9494), 'PiCN.Packets.Name', 'Name', (['"""/lib/func1"""'], {... |
from requests import Session
import urllib.parse
import json
class ServerFunctions:
SITE_INFO = "core_webservice_get_site_info"
ALL_COURSES = "core_course_get_courses_by_field"
USER_COURSES = "core_enrol_get_users_courses"
COURSE_CONTENTS = "core_course_get_contents"
ASSIGNMENTS = "mod_assign_get_a... | [
"json.loads",
"requests.Session"
] | [((756, 765), 'requests.Session', 'Session', ([], {}), '()\n', (763, 765), False, 'from requests import Session\n'), ((976, 1004), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (986, 1004), False, 'import json\n')] |
import numpy as np
from gym import spaces
from agents import SimpleAgentClass
# Create agents for the CMA-ES, NEAT and WANN agents
# defined in the weight-agnostic paper repo:
# https://github.com/google/brain-tokyo-workshop/tree/master/WANNRelease/
# ---------------------------------------------------------------... | [
"numpy.multiply",
"numpy.reshape",
"numpy.maximum",
"numpy.min",
"numpy.ndim",
"numpy.tanh",
"numpy.sum",
"numpy.zeros",
"numpy.dot",
"numpy.isnan",
"numpy.cos",
"numpy.random.uniform",
"numpy.sin",
"numpy.cumsum",
"numpy.shape",
"numpy.load"
] | [((644, 659), 'numpy.min', 'np.min', (['weights'], {}), '(weights)\n', (650, 659), True, 'import numpy as np\n'), ((727, 745), 'numpy.cumsum', 'np.cumsum', (['weights'], {}), '(weights)\n', (736, 745), True, 'import numpy as np\n'), ((757, 789), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'cumVal[-1]'], {}), ... |
import os
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image
class Anime_Dataset(Dataset):
def __init__(self, config, transform):
self.config = config
self.transform = transform
self.lines = open(config.... | [
"torchvision.transforms.Scale",
"os.path.join",
"torch.Tensor",
"torchvision.transforms.RandomHorizontalFlip",
"numpy.random.randint",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"torchvision.transforms.ToTensor"
] | [((3071, 3158), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset', 'config.batch_size'], {'shuffle': '(True)', 'num_workers': '(4)', 'drop_last': '(True)'}), '(dataset, config.batch_size, shuffle=True, num_workers=4,\n drop_last=True)\n', (3081, 3158), False, 'from torch.utils.data import Dataset, DataLoader\... |
# Quickie script for refreshing the local objects.inv cache
# OVERWRITES EXISTING FILES, WITH PRE-DELETION
def pullobjs():
import os
import urllib.request as urlrq
import certifi
# Open conf.py, retrieve content and compile
with open(os.path.join(os.pardir, 'conf.py'), 'r') as f:
confc... | [
"certifi.where",
"os.path.join"
] | [((260, 294), 'os.path.join', 'os.path.join', (['os.pardir', '"""conf.py"""'], {}), "(os.pardir, 'conf.py')\n", (272, 294), False, 'import os\n'), ((904, 919), 'certifi.where', 'certifi.where', ([], {}), '()\n', (917, 919), False, 'import certifi\n')] |
import os,sys
import shutil
import pandas as pd
data=pd.read_csv('D:/MachineLearning/AnimalClassification/train.csv')
Im_id=data['Image_id']
Animal=data['Animal']
dic_data=dict()
for i in range(0,len(Im_id)):
dic_data[Im_id[i].strip()]=Animal[i].strip()
source_dir='D:/MachineLearning/AnimalClassification/Images/trai... | [
"os.path.exists",
"os.listdir",
"os.makedirs",
"pandas.read_csv",
"shutil.move",
"os.path.join",
"os.path.isfile",
"shutil.copy"
] | [((54, 118), 'pandas.read_csv', 'pd.read_csv', (['"""D:/MachineLearning/AnimalClassification/train.csv"""'], {}), "('D:/MachineLearning/AnimalClassification/train.csv')\n", (65, 118), True, 'import pandas as pd\n'), ((384, 406), 'os.listdir', 'os.listdir', (['source_dir'], {}), '(source_dir)\n', (394, 406), False, 'imp... |
#!/usr/bin/env python
import os
import sys
import glob
import hashlib
sys.path.insert(0, os.pardir)
from testing_harness import PyAPITestHarness
from input_set import PinCellInputSet
import openmc
import openmc.mgxs
class MGXSTestHarness(PyAPITestHarness):
def _build_inputs(self):
# Set the input set to ... | [
"openmc.run",
"os.path.exists",
"sys.path.insert",
"input_set.PinCellInputSet",
"os.remove",
"os.getcwd",
"openmc.mgxs.Library",
"openmc.StatePoint",
"openmc.mgxs.EnergyGroups",
"openmc.Tallies"
] | [((71, 100), 'sys.path.insert', 'sys.path.insert', (['(0)', 'os.pardir'], {}), '(0, os.pardir)\n', (86, 100), False, 'import sys\n'), ((368, 385), 'input_set.PinCellInputSet', 'PinCellInputSet', ([], {}), '()\n', (383, 385), False, 'from input_set import PinCellInputSet\n'), ((561, 618), 'openmc.mgxs.EnergyGroups', 'op... |
import os
from setuptools import setup, find_packages
from cloudwatch_metrics.version import VERSION
with open(os.path.join(os.path.dirname(__file__),
'README.md')) as readme:
README = readme.read()
setup(
name='cloudwatch_metrics',
version=VERSION,
description='The Cloudwatch... | [
"os.path.dirname",
"setuptools.find_packages"
] | [((1220, 1253), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['tests*']"}), "(exclude=['tests*'])\n", (1233, 1253), False, 'from setuptools import setup, find_packages\n'), ((127, 152), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (142, 152), False, 'import os\n')] |
import matplotlib.pyplot as plt
from skimage import measure, morphology
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import numpy as np
import pandas as pd
def plot_slice(img, slice=80):
# Show some slice in the middle
plt.imshow(img[slice])
plt.show()
def plot_3d(image, threshold=-100):
#... | [
"matplotlib.pyplot.imshow",
"mpl_toolkits.mplot3d.art3d.Poly3DCollection",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"numpy.random.permutation",
"matplotlib.pyplot.figure",
"skimage.measure.marching_cubes",
"numpy.savez_compressed",
"numpy.load",
"matplotlib.pyplot.show"
] | [((239, 261), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img[slice]'], {}), '(img[slice])\n', (249, 261), True, 'import matplotlib.pyplot as plt\n'), ((266, 276), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (274, 276), True, 'import matplotlib.pyplot as plt\n'), ((481, 517), 'skimage.measure.marching_cube... |
import unittest
from translator import french_to_english, english_to_french
class TestFrenchToEnglish(unittest.TestCase):
def test1(self):
self.assertEqual(french_to_english("Bonjour"), "Hello") # test when "Bonjour" is given as input the output is "Hello".
with self.assertRaises(ValueE... | [
"unittest.main",
"translator.french_to_english",
"translator.english_to_french"
] | [((826, 841), 'unittest.main', 'unittest.main', ([], {}), '()\n', (839, 841), False, 'import unittest\n'), ((170, 198), 'translator.french_to_english', 'french_to_english', (['"""Bonjour"""'], {}), "('Bonjour')\n", (187, 198), False, 'from translator import french_to_english, english_to_french\n'), ((427, 450), 'transl... |
import os
import torch
import numpy as np
from tqdm import tqdm
import json
from torch.utils.data import Dataset, DataLoader
from arcface.resnet import ResNet
from arcface.googlenet import GoogLeNet
from arcface.inception_v4 import InceptionV4
from arcface.inceptionresnet_v2 import InceptionResNetV2
from arcface.densen... | [
"autoaugment.rand_augment_transform",
"numpy.random.rand",
"torch.from_numpy",
"numpy.array",
"arcface.inception_v4.InceptionV4",
"os.listdir",
"arcface.inceptionresnet_v2.InceptionResNetV2",
"config.get_args_arcface",
"random.randint",
"random.sample",
"random.choice",
"torch.Tensor",
"arcf... | [((47781, 47799), 'config.get_args_arcface', 'get_args_arcface', ([], {}), '()\n', (47797, 47799), False, 'from config import get_args_arcface\n'), ((989, 1034), 'torch.utils.data.DataLoader', 'DataLoader', (['arcfaceDataset'], {}), '(arcfaceDataset, **training_params)\n', (999, 1034), False, 'from torch.utils.data imp... |
if __name__ == "__main__":
import logSetup
logSetup.initLogging()
import pickle
from common import database
import config
import common.LogBase
import WebMirror.rules
from WebMirror.OutputFilters.util.MessageConstructors import pack_message
import WebMirror.TimedTriggers.TriggerBase
import common.get_rpyc
# impo... | [
"common.database.delete_db_session",
"logSetup.initLogging",
"pickle.loads",
"WebMirror.OutputFilters.util.MessageConstructors.pack_message"
] | [((48, 70), 'logSetup.initLogging', 'logSetup.initLogging', ([], {}), '()\n', (68, 70), False, 'import logSetup\n'), ((1401, 1441), 'WebMirror.OutputFilters.util.MessageConstructors.pack_message', 'pack_message', (['"""system-feed-counts"""', 'data'], {}), "('system-feed-counts', data)\n", (1413, 1441), False, 'from We... |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 30 13:44:34 2018
@author: Moha-Thinkpad
"""
from tensorflow.keras import optimizers
from tensorflow.keras.models import Model
import datetime
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import tensorflow.keras
import argpa... | [
"matplotlib.pyplot.grid",
"numpy.array",
"tensorflow.keras.models.load_model",
"scipy.ndimage.gaussian_filter",
"tensorflow.set_random_seed",
"numpy.genfromtxt",
"matplotlib.pyplot.imshow",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"numpy.max",
"mat... | [((228, 249), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (242, 249), False, 'import matplotlib\n'), ((1462, 1508), 'tensorflow.keras.layers.Lambda', 'Lambda', (['lrelu'], {'output_shape': 'lrelu_output_shape'}), '(lrelu, output_shape=lrelu_output_shape)\n', (1468, 1508), False, 'from tensorfl... |
"""This file contains functions for converting and storing jupyter notebooks."""
import nbformat
import pickle
import numpy as np
import os
from nbconvert import PythonExporter
from pathlib import Path # for windows-Unix compatibility
def nbconvert_python(path):
"""Use nbconvert to convert jupyter notebook to py... | [
"os.path.exists",
"pickle.dump",
"pathlib.Path",
"pickle.dumps",
"nbformat.read",
"numpy.asanyarray",
"nbconvert.PythonExporter",
"numpy.savez_compressed"
] | [((4025, 4045), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4039, 4045), False, 'import os\n'), ((5177, 5211), 'pathlib.Path', 'Path', (['"""docs/getting_started.ipynb"""'], {}), "('docs/getting_started.ipynb')\n", (5181, 5211), False, 'from pathlib import Path\n'), ((5307, 5364), 'pathlib.Path', '... |
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas.util.testing as tm
from pandas.compat import long
from pandas.tseries import offsets
from pandas import Timestamp, Timedelta
class TestTimestampArithmetic(object):
def test_overflow_offset(self):
... | [
"datetime.datetime",
"pandas.Timestamp",
"pandas.compat.long",
"datetime.timedelta",
"pytest.raises",
"numpy.timedelta64",
"pandas.tseries.offsets.Day",
"pandas.util.testing.assert_produces_warning"
] | [((469, 511), 'pandas.Timestamp', 'Timestamp', (['"""2017-01-13 00:00:00"""'], {'freq': '"""D"""'}), "('2017-01-13 00:00:00', freq='D')\n", (478, 511), False, 'from pandas import Timestamp, Timedelta\n'), ((1005, 1027), 'datetime.datetime', 'datetime', (['(2013)', '(10)', '(12)'], {}), '(2013, 10, 12)\n', (1013, 1027),... |
import urllib, urllib2
from parse_data import taxid
#UniProt column names are found at
#https://www.uniprot.org/help/uniprotkb_column_names
class UniProtAPI():
def __init__(self, columns):
self.columns = columns
self.url = 'https://www.uniprot.org/uniprot/'
self.batch_size = 350 #491 is limit
self.raw_dat... | [
"urllib2.Request",
"urllib2.urlopen",
"urllib.urlencode",
"parse_data.taxid"
] | [((354, 383), 'urllib.urlencode', 'urllib.urlencode', (['self.params'], {}), '(self.params)\n', (370, 383), False, 'import urllib, urllib2\n'), ((396, 427), 'urllib2.Request', 'urllib2.Request', (['self.url', 'data'], {}), '(self.url, data)\n', (411, 427), False, 'import urllib, urllib2\n'), ((441, 465), 'urllib2.urlop... |
import json
from pathlib import Path
import numpy as np
from matplotlib import path
current_dir = Path(__file__).parent
__all__ = list(p.stem for p in current_dir.glob("*.json"))
def __getattr__(name: str) -> path.Path:
file_path = current_dir / (name + ".json")
if file_path.exists():
data = json.lo... | [
"numpy.array",
"pathlib.Path"
] | [((100, 114), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (104, 114), False, 'from pathlib import Path\n'), ((418, 451), 'numpy.array', 'np.array', (["data['codes']", 'np.uint8'], {}), "(data['codes'], np.uint8)\n", (426, 451), True, 'import numpy as np\n')] |
import asyncio
import os
import subprocess
from threading import Thread
from typing import Dict, Set
from .plugin_settings import PluginSettings
from .rpc.api.daemon import DaemonConnectedEvent
from .project import CurrentProject
from .rpc import FlutterRpcProcess, FlutterRpcClient
from .env import Env
import sublime... | [
"sublime.load_settings",
"asyncio.new_event_loop",
"threading.Thread",
"sublime.error_message"
] | [((775, 799), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (797, 799), False, 'import asyncio\n'), ((1207, 1361), 'sublime.error_message', 'sublime.error_message', (['"""Unable to determine the path to the Flutter SDK. Please define "FLUTTER_ROOT" under the "env" key in LSP-Dart settings."""'],... |
#!/usr/bin/env python2
"""Context for all tests."""
from __future__ import absolute_import
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)) + "../lcovparse"))
import lcovparse # pylint: disable=wrong-import-position,unused-import
| [
"os.path.realpath"
] | [((159, 185), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (175, 185), False, 'import os\n')] |
import argparse
from functools import partial
from numbers import Number
from typing import Callable, Union, Tuple, Optional
import numpy as np
from skimage import img_as_uint
from starfish.errors import DataFormatWarning
from starfish.image import ImageStack
from starfish.pipeline.filter.gaussian_low_pass import Gau... | [
"skimage.img_as_uint",
"starfish.errors.DataFormatWarning",
"functools.partial",
"starfish.pipeline.filter.gaussian_low_pass.GaussianLowPass.low_pass"
] | [((2741, 2779), 'starfish.pipeline.filter.gaussian_low_pass.GaussianLowPass.low_pass', 'GaussianLowPass.low_pass', (['image', 'sigma'], {}), '(image, sigma)\n', (2765, 2779), False, 'from starfish.pipeline.filter.gaussian_low_pass import GaussianLowPass\n'), ((3469, 3510), 'functools.partial', 'partial', (['self.high_p... |
from typing import Any, List
import factom_core.blocks as blocks
from factom_core.db import FactomdLevelDB
from .pending_block import PendingBlock
class BaseBlockchain:
"""The base class for all Blockchain objects"""
network_id: bytes = None
vms: List[Any] = None
data_path: str = None
db: Fact... | [
"factom_core.blocks.FactoidBlock",
"factom_core.blocks.EntryCreditBlock",
"factom_core.blocks.DirectoryBlock",
"factom_core.db.FactomdLevelDB",
"factom_core.blocks.DirectoryBlockBody",
"factom_core.blocks.AdminBlock",
"factom_core.blocks.EntryBlock"
] | [((894, 948), 'factom_core.db.FactomdLevelDB', 'FactomdLevelDB', ([], {'path': 'data_path', 'create_if_missing': '(True)'}), '(path=data_path, create_if_missing=True)\n', (908, 948), False, 'from factom_core.db import FactomdLevelDB\n'), ((3977, 4034), 'factom_core.blocks.EntryCreditBlock', 'blocks.EntryCreditBlock', (... |
import keras
import os
def load_model(version, new_model, retrain=False, *args):
"""
:param version: model version
:param new_model: method for call to get a new model e.g. my_ResNet.my_ResNet
:param retrain: True: load new model
:return:
"""
create_new_model = False
# load model
i... | [
"keras.models.model_from_json"
] | [((559, 599), 'keras.models.model_from_json', 'keras.models.model_from_json', (['model_json'], {}), '(model_json)\n', (587, 599), False, 'import keras\n')] |
import argparse
from bioplottemplates.libs import libcli, libio
from bioplottemplates.plots import label_dots
ap = libcli.CustomParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
ap.add_argument(
'data_csv',
help='The CSVs files to plot',
nargs='+',
)
... | [
"bioplottemplates.libs.libio.extract_labels_data",
"bioplottemplates.plots.label_dots.plot",
"bioplottemplates.libs.libcli.CustomParser"
] | [((118, 219), 'bioplottemplates.libs.libcli.CustomParser', 'libcli.CustomParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n ArgumentDefaultsHelpFormatter)\n', (137, 219), False, 'from bioplottemplates.libs import ... |
# coding: utf-8
from django.db import models
from django.contrib.auth.models import User
from django.utils.text import slugify
class SharedFolder(models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField(max_length=255, null=True, blank=True)
users = models.ManyToManyField(User, throu... | [
"django.utils.text.slugify",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.db.models.ManyToManyField",
"django.db.models.FileField",
"django.db.models.BooleanField",
"django.db.models.SlugField",
"django.db.models.DateTimeField",
"django.db.m... | [((175, 206), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (191, 206), False, 'from django.db import models\n'), ((218, 273), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(255)', 'null': '(True)', 'blank': '(True)'}), '(max_length=255, nu... |
import sys
import pytest
from pysh import shwords, shwords_f
def test_conversions():
with pytest.raises(ValueError):
shwords('{:{}}', 1, 2)
assert '{:{}}'.format(1, 2) == ' 1' # by contrast
def test_multiword():
assert shwords('touch {!@}', ['a', 'b']) \
== ['touch', 'a', 'b']
with pytest.raises(... | [
"pysh.shwords",
"pytest.raises",
"pysh.shwords_f"
] | [((96, 121), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (109, 121), False, 'import pytest\n'), ((127, 149), 'pysh.shwords', 'shwords', (['"""{:{}}"""', '(1)', '(2)'], {}), "('{:{}}', 1, 2)\n", (134, 149), False, 'from pysh import shwords, shwords_f\n'), ((236, 269), 'pysh.shwords', 'shwor... |
import wandb
import main
# Load project config
config = main.load_config()
# Initialize wandb
wandb.init()
# Replace project config hyperparameters with the ones loaded from wandb sweep server
sweep_hparams = wandb.Config._as_dict(wandb.config)
for key, value in sweep_hparams.items():
if key != "_wandb":
... | [
"main.load_config",
"wandb.Config._as_dict",
"wandb.init",
"main.main"
] | [((58, 76), 'main.load_config', 'main.load_config', ([], {}), '()\n', (74, 76), False, 'import main\n'), ((97, 109), 'wandb.init', 'wandb.init', ([], {}), '()\n', (107, 109), False, 'import wandb\n'), ((213, 248), 'wandb.Config._as_dict', 'wandb.Config._as_dict', (['wandb.config'], {}), '(wandb.config)\n', (234, 248), ... |
# Top of main python script
import os
os.environ["PYOPENGL_PLATFORM"] = "egl"
import sys
import random
import argparse
import numpy as np
import trimesh
import imageio
import open3d as o3d
from mathutils import Matrix
import h5py
import json
from mesh_to_sdf import get_surface_point_cloud
import pyrender
import uti... | [
"numpy.sqrt",
"util.look_at",
"numpy.array",
"util.cv_cam2world_to_bcam2world",
"numpy.sin",
"util.sample_spherical",
"os.path.exists",
"pyrender.IntrinsicsCamera",
"os.listdir",
"argparse.ArgumentParser",
"numpy.stack",
"util.depth_2_normal",
"util.get_world2cam_from_blender_cam",
"numpy.... | [((324, 345), 'numpy.random.seed', 'np.random.seed', (['(12433)'], {}), '(12433)\n', (338, 345), True, 'import numpy as np\n'), ((346, 364), 'random.seed', 'random.seed', (['(12433)'], {}), '(12433)\n', (357, 364), False, 'import random\n'), ((1295, 1393), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'de... |
from fetcher.source.fetcher import Fetcher
from fetcher.source.managers.notification import FetcherException
def fetch(config_dict):
f = Fetcher(config_dict)
f.start()
return "Job has been finished"
| [
"fetcher.source.fetcher.Fetcher"
] | [((143, 163), 'fetcher.source.fetcher.Fetcher', 'Fetcher', (['config_dict'], {}), '(config_dict)\n', (150, 163), False, 'from fetcher.source.fetcher import Fetcher\n')] |
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import UserProfile
class UserRegistrationForm(UserCreationForm):
def __init__(self, *args, **kwargs):
super(UserRegistrationForm, self).__init__(*args, **kwargs)
... | [
"django.contrib.auth.models.User.objects.create_user"
] | [((653, 764), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': "self.cleaned_data['username']", 'password': "self.cleaned_data['<PASSWORD>']"}), "(username=self.cleaned_data['username'], password=\n self.cleaned_data['<PASSWORD>'])\n", (677, 764), False, 'from djan... |
# -*- coding: utf-8 -*-
from django.conf.urls import url
from . import views
urlpatterns = [
# Index Page
url(r'^$', views.index, name='index'),
# 전자명세서 발행
url(r'^CheckMgtKeyInUse$', views.checkMgtKeyInUse, name='CheckMgtKeyInUse'),
url(r'^RegistIssue$', views.registIssue, name='RegistIssue'),
... | [
"django.conf.urls.url"
] | [((116, 152), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.index'], {'name': '"""index"""'}), "('^$', views.index, name='index')\n", (119, 152), False, 'from django.conf.urls import url\n'), ((175, 249), 'django.conf.urls.url', 'url', (['"""^CheckMgtKeyInUse$"""', 'views.checkMgtKeyInUse'], {'name': '"""CheckMgt... |
from setuptools import setup
with open('README.rst') as README:
long_description = README.read()
long_description = long_description[long_description.index('Description'):]
setup(name='timeme',
version='0.1.1',
description='Decorator that prints the running time of a function',
long_descript... | [
"setuptools.setup"
] | [((183, 511), 'setuptools.setup', 'setup', ([], {'name': '"""timeme"""', 'version': '"""0.1.1"""', 'description': '"""Decorator that prints the running time of a function"""', 'long_description': 'long_description', 'url': '"""http://github.com/enricobacis/timeme"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAI... |
import time
from pymongo import MongoClient
from datetime import datetime
from threading import Thread, Lock
start = datetime.now()
client = MongoClient("mongodb://username:password@127.0.0.1")
database = client["database_name"]
collection = database["collection_name"]
threads_count = 0
lock = Lock()
package = []
... | [
"threading.Lock",
"time.sleep",
"datetime.datetime.now",
"pymongo.MongoClient",
"threading.Thread"
] | [((118, 132), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (130, 132), False, 'from datetime import datetime\n'), ((143, 195), 'pymongo.MongoClient', 'MongoClient', (['"""mongodb://username:password@127.0.0.1"""'], {}), "('mongodb://username:password@127.0.0.1')\n", (154, 195), False, 'from pymongo import... |
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
class PinCard(models.Model):
token = models.CharField(max_length=32, db_index=True, editable=False)
display_n... | [
"django.db.models.GenericIPAddressField",
"django.contrib.auth.get_user_model",
"django.db.models.EmailField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.DateTimeField",
"django.db.models.DecimalField",
"django.db.models.PositiveSmallIntegerField",
"django.db.m... | [((185, 201), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (199, 201), False, 'from django.contrib.auth import get_user_model\n'), ((244, 306), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'db_index': '(True)', 'editable': '(False)'}), '(max_length=32, db_in... |
import unittest #Importing the unittest module
from contact import Contact #Importing the contact class
#import pyperclip #Pyperclip will allow us to copy and paste items to our clipboard
class TestContact(unittest.TestCase):
def setUp(self):
self.new_contact = Contact("Lyn","Muthoni","0796654066","<EMAIL>... | [
"contact.Contact.contact_exist",
"contact.Contact.find_by_number",
"contact.Contact",
"unittest.main",
"contact.Contact.display_contacts"
] | [((2889, 2904), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2902, 2904), False, 'import unittest\n'), ((275, 325), 'contact.Contact', 'Contact', (['"""Lyn"""', '"""Muthoni"""', '"""0796654066"""', '"""<EMAIL>"""'], {}), "('Lyn', 'Muthoni', '0796654066', '<EMAIL>')\n", (282, 325), False, 'from contact import Co... |
from celery import Celery
import os
# 为celery设置django默认配置
if not os.getenv('DJANGO_SETTINGS_MODULE'):
os.environ['DJANGO_SETTINGS_MODULE'] = 'mlh.settings.dev'
# 创建对象,命名为meiduo,并指明broker
celery_app = Celery('mlh',broker='redis://127.0.0.1:6379/15')
# 自动注册任务
celery_app.autodiscover_tasks(['celery_tasks.sms',]) | [
"celery.Celery",
"os.getenv"
] | [((208, 257), 'celery.Celery', 'Celery', (['"""mlh"""'], {'broker': '"""redis://127.0.0.1:6379/15"""'}), "('mlh', broker='redis://127.0.0.1:6379/15')\n", (214, 257), False, 'from celery import Celery\n'), ((66, 101), 'os.getenv', 'os.getenv', (['"""DJANGO_SETTINGS_MODULE"""'], {}), "('DJANGO_SETTINGS_MODULE')\n", (75, ... |
import unittest
from pywiktionary.parsers import basic_parser
def get_pizza_html_extract():
with open('tests/file/html-responses/pizza-it.html', 'r', encoding='utf-8') as pizza_html_file:
pizza_html = pizza_html_file.read()
return pizza_html
class BasicParseTestCase(unittest.TestCase):
def test_... | [
"unittest.main"
] | [((660, 675), 'unittest.main', 'unittest.main', ([], {}), '()\n', (673, 675), False, 'import unittest\n')] |
import logging, traceback, time
from bottle import request
from snuggle import configuration
from snuggle import mediawiki
from snuggle import errors
from snuggle.data import types
from snuggle.web.util import responses, user_data
logger = logging.getLogger("snuggle.web.processing.users")
class Events:
def __init__... | [
"logging.getLogger",
"snuggle.web.util.responses.success",
"traceback.format_exc",
"snuggle.data.types.ActionRequest.serialize",
"snuggle.web.util.user_data",
"snuggle.web.util.responses.database_error",
"time.time"
] | [((242, 291), 'logging.getLogger', 'logging.getLogger', (['"""snuggle.web.processing.users"""'], {}), "('snuggle.web.processing.users')\n", (259, 291), False, 'import logging, traceback, time\n'), ((403, 437), 'snuggle.data.types.ActionRequest.serialize', 'types.ActionRequest.serialize', (['doc'], {}), '(doc)\n', (432,... |
# =======================================================================
#
# Copyright (C) 2018, Hisilicon Technologies Co., Ltd. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1 Redistrib... | [
"logging.getLogger",
"common.presenter_message_pb2.OpenChannelRequest",
"facial_recognition.src.facial_recognition_handler.FacialRecognitionHandler",
"common.app_manager.AppManager",
"numpy.array",
"numpy.linalg.norm",
"facial_recognition.src.facial_recognition_message_pb2.RegisterApp",
"logging.info"... | [((26636, 26650), 'facial_recognition.src.config_parser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (26648, 26650), False, 'from facial_recognition.src.config_parser import ConfigParser\n'), ((26689, 26748), 'os.path.join', 'os.path.join', (['ConfigParser.root_path', '"""config/logging.conf"""'], {}), "(ConfigPars... |
import numpy as np
from pathlib import Path
import sys, os
if __name__ == "__main__":
"""
Jobs:
1) VAE (VAE loss) for data=[dsprites, celeba, chairs]
2) VAE (beta-TC loss with alpha=beta=gamma=1) for data=[dsprites, celeba, chairs]
3) beta-TCVAE for alpha=gamma=[0.5, 1, 2], for beta=[3,6],... | [
"os.system",
"pathlib.Path"
] | [((1086, 1110), 'os.system', 'os.system', (['f"""rm {fname}"""'], {}), "(f'rm {fname}')\n", (1095, 1110), False, 'import sys, os\n'), ((404, 418), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (408, 418), False, 'from pathlib import Path\n')] |
# downloaded from https://raw.githubusercontent.com/TadasBaltrusaitis/OpenFace/master/lib/local/LandmarkDetector/model/pdms/In-the-wild_aligned_PDM_68.txt
import pickle
import pathlib
THIS_FILE_PATH = pathlib.Path(__file__)
MODEL_FILE_PATH = pathlib.Path.joinpath(THIS_FILE_PATH.parent, "face_model.bin")
with... | [
"pickle.load",
"pathlib.Path.joinpath",
"pathlib.Path"
] | [((208, 230), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (220, 230), False, 'import pathlib\n'), ((250, 312), 'pathlib.Path.joinpath', 'pathlib.Path.joinpath', (['THIS_FILE_PATH.parent', '"""face_model.bin"""'], {}), "(THIS_FILE_PATH.parent, 'face_model.bin')\n", (271, 312), False, 'import path... |
from requests_oauthlib import OAuth2Session
from flask import Flask, request, redirect, session, url_for
from flask.json import jsonify
import logging
import datetime
import json
import os
import pickle
import requests
import time
import win32crypt
from typing import Dict
from typing import List
if __package__:
f... | [
"logging.getLogger",
"logging.StreamHandler",
"requests.Session",
"flask.Flask",
"win32crypt.CryptUnprotectData",
"sys.exit",
"os.path.exists",
"flask.request.environ.get",
"json.dumps",
"webbrowser.open_new_tab",
"os.urandom",
"requests.Request",
"flask.redirect",
"time.time",
"requests... | [((831, 858), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (848, 858), False, 'import logging\n'), ((940, 955), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (945, 955), False, 'from flask import Flask, request, redirect, session, url_for\n'), ((908, 931), 'logging.StreamH... |
#*****************************************************************************
#
# Copyright (c) 2000 - 2014, Lawrence Livermore National Security, LLC
# Produced at the Lawrence Livermore National Laboratory
# LLNL-CODE-442911
# All rights reserved.
#
# This file is part of VisIt. For details, see https://visit.llnl... | [
"os.path.join",
"os.path.split",
"os.path.isdir",
"os.mkdir",
"unittest.main"
] | [((2570, 2598), 'os.path.join', 'pjoin', (['output_dir', '"""qannote"""'], {}), "(output_dir, 'qannote')\n", (2575, 2598), True, 'from os.path import join as pjoin\n'), ((2664, 2682), 'os.path.join', 'pjoin', (['odir', 'fname'], {}), '(odir, fname)\n', (2669, 2682), True, 'from os.path import join as pjoin\n'), ((5051,... |
# This program runs the search engine program
import ast,sys,os,json, time, re,math
from objects import *
import lexicon_engine as lexicon_engine
from snippet_engine import *
def bm25_top_10(doc_no_to_internal_id,internal_id_to_metadata,inverted_index,tokens_to_id):
# Retrieving Query from user
print()
que... | [
"json.loads",
"lexicon_engine.read_inverted_index",
"math.log",
"ast.literal_eval",
"lexicon_engine.read_tokens_to_id",
"re.sub",
"time.time"
] | [((351, 362), 'time.time', 'time.time', ([], {}), '()\n', (360, 362), False, 'import ast, sys, os, json, time, re, math\n'), ((401, 427), 're.sub', 're.sub', (['"""\\\\W+"""', '""" """', 'query'], {}), "('\\\\W+', ' ', query)\n", (407, 427), False, 'import ast, sys, os, json, time, re, math\n'), ((2855, 2866), 'time.ti... |