_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q8500
|
RequestHandler.get
|
train
|
def get(self, url, params=None):
"""
Initiate a GET request
"""
r = self.session.get(url, params=params)
return self._response_parser(r, expect_json=False)
|
python
|
{
"resource": ""
}
|
q8501
|
RequestHandler.post
|
train
|
def post(self, url, data, params=None):
"""
Initiate a POST request
"""
r = self.session.post(url, data=data, params=params)
return self._response_parser(r, expect_json=False)
|
python
|
{
"resource": ""
}
|
q8502
|
RequestHandler.send
|
train
|
def send(self, request, expect_json=True, ignore_content=False):
"""
Send a formatted API request
:param request: a formatted request object
:type request: :class:`.Request`
:param bool expect_json: if True, raise :class:`.InvalidAPIAccess` if
response is not in JSON format
:param bool ignore_content: whether to ignore setting content of the
Response object
"""
r = self.session.request(method=request.method,
url=request.url,
params=request.params,
data=request.data,
files=request.files,
headers=request.headers)
return self._response_parser(r, expect_json, ignore_content)
|
python
|
{
"resource": ""
}
|
q8503
|
API.login
|
train
|
def login(self, username=None, password=None,
section='default'):
"""
Created the passport with ``username`` and ``password`` and log in.
If either ``username`` or ``password`` is None or omitted, the
credentials file will be parsed.
:param str username: username to login (email, phone number or user ID)
:param str password: password
:param str section: section name in the credential file
:raise: raises :class:`.AuthenticationError` if failed to login
"""
if self.has_logged_in:
return True
if username is None or password is None:
credential = conf.get_credential(section)
username = credential['username']
password = credential['password']
passport = Passport(username, password)
r = self.http.post(LOGIN_URL, passport.form)
if r.state is True:
# Bind this passport to API
self.passport = passport
passport.data = r.content['data']
self._user_id = r.content['data']['USER_ID']
return True
else:
msg = None
if 'err_name' in r.content:
if r.content['err_name'] == 'account':
msg = 'Account does not exist.'
elif r.content['err_name'] == 'passwd':
msg = 'Password is incorrect.'
raise AuthenticationError(msg)
|
python
|
{
"resource": ""
}
|
q8504
|
API.user_id
|
train
|
def user_id(self):
"""
User id of the current API user
"""
if self._user_id is None:
if self.has_logged_in:
self._user_id = self._req_get_user_aq()['data']['uid']
else:
raise AuthenticationError('Not logged in.')
return self._user_id
|
python
|
{
"resource": ""
}
|
q8505
|
API.username
|
train
|
def username(self):
"""
Username of the current API user
"""
if self._username is None:
if self.has_logged_in:
self._username = self._get_username()
else:
raise AuthenticationError('Not logged in.')
return self._username
|
python
|
{
"resource": ""
}
|
q8506
|
API.has_logged_in
|
train
|
def has_logged_in(self):
"""Check whether the API has logged in"""
r = self.http.get(CHECKPOINT_URL)
if r.state is False:
return True
# If logged out, flush cache
self._reset_cache()
return False
|
python
|
{
"resource": ""
}
|
q8507
|
API.receiver_directory
|
train
|
def receiver_directory(self):
"""Parent directory of the downloads directory"""
if self._receiver_directory is None:
self._receiver_directory = self.downloads_directory.parent
return self._receiver_directory
|
python
|
{
"resource": ""
}
|
q8508
|
API.add_task_bt
|
train
|
def add_task_bt(self, filename, select=False):
"""
Add a new BT task
:param str filename: path to torrent file to upload
:param bool select: whether to select files in the torrent.
* True: it returns the opened torrent (:class:`.Torrent`) and
can then iterate files in :attr:`.Torrent.files` and
select/unselect them before calling :meth:`.Torrent.submit`
* False: it will submit the torrent with default selected files
"""
filename = eval_path(filename)
u = self.upload(filename, self.torrents_directory)
t = self._load_torrent(u)
if select:
return t
return t.submit()
|
python
|
{
"resource": ""
}
|
q8509
|
API.get_storage_info
|
train
|
def get_storage_info(self, human=False):
"""
Get storage info
:param bool human: whether return human-readable size
:return: total and used storage
:rtype: dict
"""
res = self._req_get_storage_info()
if human:
res['total'] = humanize.naturalsize(res['total'], binary=True)
res['used'] = humanize.naturalsize(res['used'], binary=True)
return res
|
python
|
{
"resource": ""
}
|
q8510
|
API.upload
|
train
|
def upload(self, filename, directory=None):
"""
Upload a file ``filename`` to ``directory``
:param str filename: path to the file to upload
:param directory: destionation :class:`.Directory`, defaults to
:attribute:`.API.downloads_directory` if None
:return: the uploaded file
:rtype: :class:`.File`
"""
filename = eval_path(filename)
if directory is None:
directory = self.downloads_directory
# First request
res1 = self._req_upload(filename, directory)
data1 = res1['data']
file_id = data1['file_id']
# Second request
res2 = self._req_file(file_id)
data2 = res2['data'][0]
data2.update(**data1)
return _instantiate_uploaded_file(self, data2)
|
python
|
{
"resource": ""
}
|
q8511
|
API.download
|
train
|
def download(self, obj, path=None, show_progress=True, resume=True,
auto_retry=True, proapi=False):
"""
Download a file
:param obj: :class:`.File` object
:param str path: local path
:param bool show_progress: whether to show download progress
:param bool resume: whether to resume on unfinished downloads
identified by filename
:param bool auto_retry: whether to retry automatically upon closed
transfer until the file's download is finished
:param bool proapi: whether to use pro API
"""
url = obj.get_download_url(proapi)
download(url, path=path, session=self.http.session,
show_progress=show_progress, resume=resume,
auto_retry=auto_retry)
|
python
|
{
"resource": ""
}
|
q8512
|
API.search
|
train
|
def search(self, keyword, count=30):
"""
Search files or directories
:param str keyword: keyword
:param int count: number of entries to be listed
"""
kwargs = {}
kwargs['search_value'] = keyword
root = self.root_directory
entries = root._load_entries(func=self._req_files_search,
count=count, page=1, **kwargs)
res = []
for entry in entries:
if 'pid' in entry:
res.append(_instantiate_directory(self, entry))
else:
res.append(_instantiate_file(self, entry))
return res
|
python
|
{
"resource": ""
}
|
q8513
|
API._req_offline_space
|
train
|
def _req_offline_space(self):
"""Required before accessing lixian tasks"""
url = 'http://115.com/'
params = {
'ct': 'offline',
'ac': 'space',
'_': get_timestamp(13)
}
_sign = os.environ.get('U115_BROWSER_SIGN')
if _sign is not None:
_time = os.environ.get('U115_BROWSER_TIME')
if _time is None:
msg = 'U115_BROWSER_TIME is required given U115_BROWSER_SIGN.'
raise APIError(msg)
params['sign'] = _sign
params['time'] = _time
params['uid'] = self.user_id
req = Request(url=url, params=params)
r = self.http.send(req)
if r.state:
self._signatures['offline_space'] = r.content['sign']
self._lixian_timestamp = r.content['time']
else:
msg = 'Failed to retrieve signatures.'
raise RequestFailure(msg)
|
python
|
{
"resource": ""
}
|
q8514
|
API._req_lixian_task_lists
|
train
|
def _req_lixian_task_lists(self, page=1):
"""
This request will cause the system to create a default downloads
directory if it does not exist
"""
url = 'http://115.com/lixian/'
params = {'ct': 'lixian', 'ac': 'task_lists'}
self._load_signatures()
data = {
'page': page,
'uid': self.user_id,
'sign': self._signatures['offline_space'],
'time': self._lixian_timestamp,
}
req = Request(method='POST', url=url, params=params, data=data)
res = self.http.send(req)
if res.state:
self._task_count = res.content['count']
self._task_quota = res.content['quota']
return res.content['tasks']
else:
msg = 'Failed to get tasks.'
raise RequestFailure(msg)
|
python
|
{
"resource": ""
}
|
q8515
|
API._req_lixian_get_id
|
train
|
def _req_lixian_get_id(self, torrent=False):
"""Get `cid` of lixian space directory"""
url = 'http://115.com/'
params = {
'ct': 'lixian',
'ac': 'get_id',
'torrent': 1 if torrent else None,
'_': get_timestamp(13)
}
req = Request(method='GET', url=url, params=params)
res = self.http.send(req)
return res.content
|
python
|
{
"resource": ""
}
|
q8516
|
API._req_files_edit
|
train
|
def _req_files_edit(self, fid, file_name=None, is_mark=0):
"""Edit a file or directory"""
url = self.web_api_url + '/edit'
data = locals()
del data['self']
req = Request(method='POST', url=url, data=data)
res = self.http.send(req)
if res.state:
return True
else:
raise RequestFailure('Failed to access files API.')
|
python
|
{
"resource": ""
}
|
q8517
|
API._req_directory
|
train
|
def _req_directory(self, cid):
"""Return name and pid of by cid"""
res = self._req_files(cid=cid, offset=0, limit=1, show_dir=1)
path = res['path']
count = res['count']
for d in path:
if str(d['cid']) == str(cid):
res = {
'cid': d['cid'],
'name': d['name'],
'pid': d['pid'],
'count': count,
}
return res
else:
raise RequestFailure('No directory found.')
|
python
|
{
"resource": ""
}
|
q8518
|
API._req_upload
|
train
|
def _req_upload(self, filename, directory):
"""Raw request to upload a file ``filename``"""
self._upload_url = self._load_upload_url()
self.http.get('http://upload.115.com/crossdomain.xml')
b = os.path.basename(filename)
target = 'U_1_' + str(directory.cid)
files = {
'Filename': ('', quote(b), ''),
'target': ('', target, ''),
'Filedata': (quote(b), open(filename, 'rb'), ''),
'Upload': ('', 'Submit Query', ''),
}
req = Request(method='POST', url=self._upload_url, files=files)
res = self.http.send(req)
if res.state:
return res.content
else:
msg = None
if res.content['code'] == 990002:
msg = 'Invalid parameter.'
elif res.content['code'] == 1001:
msg = 'Torrent upload failed. Please try again later.'
raise RequestFailure(msg)
|
python
|
{
"resource": ""
}
|
q8519
|
API._load_root_directory
|
train
|
def _load_root_directory(self):
"""
Load root directory, which has a cid of 0
"""
kwargs = self._req_directory(0)
self._root_directory = Directory(api=self, **kwargs)
|
python
|
{
"resource": ""
}
|
q8520
|
API._load_torrents_directory
|
train
|
def _load_torrents_directory(self):
"""
Load torrents directory
If it does not exist yet, this request will cause the system to create
one
"""
r = self._req_lixian_get_id(torrent=True)
self._downloads_directory = self._load_directory(r['cid'])
|
python
|
{
"resource": ""
}
|
q8521
|
API._load_downloads_directory
|
train
|
def _load_downloads_directory(self):
"""
Load downloads directory
If it does not exist yet, this request will cause the system to create
one
"""
r = self._req_lixian_get_id(torrent=False)
self._downloads_directory = self._load_directory(r['cid'])
|
python
|
{
"resource": ""
}
|
q8522
|
API._parse_src_js_var
|
train
|
def _parse_src_js_var(self, variable):
"""Parse JavaScript variables in the source page"""
src_url = 'http://115.com'
r = self.http.get(src_url)
soup = BeautifulSoup(r.content)
scripts = [script.text for script in soup.find_all('script')]
text = '\n'.join(scripts)
pattern = "%s\s*=\s*(.*);" % (variable.upper())
m = re.search(pattern, text)
if not m:
msg = 'Cannot parse source JavaScript for %s.' % variable
raise APIError(msg)
return json.loads(m.group(1).strip())
|
python
|
{
"resource": ""
}
|
q8523
|
BaseFile.delete
|
train
|
def delete(self):
"""
Delete this file or directory
:return: whether deletion is successful
:raise: :class:`.APIError` if this file or directory is already deleted
"""
fcid = None
pid = None
if isinstance(self, File):
fcid = self.fid
pid = self.cid
elif isinstance(self, Directory):
fcid = self.cid
pid = self.pid
else:
raise APIError('Invalid BaseFile instance.')
if not self._deleted:
if self.api._req_rb_delete(fcid, pid):
self._deleted = True
return True
else:
raise APIError('This file or directory is already deleted.')
|
python
|
{
"resource": ""
}
|
q8524
|
BaseFile.edit
|
train
|
def edit(self, name, mark=False):
"""
Edit this file or directory
:param str name: new name for this entry
:param bool mark: whether to bookmark this entry
"""
self.api.edit(self, name, mark)
|
python
|
{
"resource": ""
}
|
q8525
|
File.directory
|
train
|
def directory(self):
"""Directory that holds this file"""
if self._directory is None:
self._directory = self.api._load_directory(self.cid)
return self._directory
|
python
|
{
"resource": ""
}
|
q8526
|
File.get_download_url
|
train
|
def get_download_url(self, proapi=False):
"""
Get this file's download URL
:param bool proapi: whether to use pro API
"""
if self._download_url is None:
self._download_url = \
self.api._req_files_download_url(self.pickcode, proapi)
return self._download_url
|
python
|
{
"resource": ""
}
|
q8527
|
File.download
|
train
|
def download(self, path=None, show_progress=True, resume=True,
auto_retry=True, proapi=False):
"""Download this file"""
self.api.download(self, path, show_progress, resume, auto_retry,
proapi)
|
python
|
{
"resource": ""
}
|
q8528
|
File.reload
|
train
|
def reload(self):
"""
Reload file info and metadata
* name
* sha
* pickcode
"""
res = self.api._req_file(self.fid)
data = res['data'][0]
self.name = data['file_name']
self.sha = data['sha1']
self.pickcode = data['pick_code']
|
python
|
{
"resource": ""
}
|
q8529
|
Directory.parent
|
train
|
def parent(self):
"""Parent directory that holds this directory"""
if self._parent is None:
if self.pid is not None:
self._parent = self.api._load_directory(self.pid)
return self._parent
|
python
|
{
"resource": ""
}
|
q8530
|
Directory.reload
|
train
|
def reload(self):
"""
Reload directory info and metadata
* `name`
* `pid`
* `count`
"""
r = self.api._req_directory(self.cid)
self.pid = r['pid']
self.name = r['name']
self._count = r['count']
|
python
|
{
"resource": ""
}
|
q8531
|
Directory.list
|
train
|
def list(self, count=30, order='user_ptime', asc=False, show_dir=True,
natsort=True):
"""
List directory contents
:param int count: number of entries to be listed
:param str order: order of entries, originally named `o`. This value
may be one of `user_ptime` (default), `file_size` and `file_name`
:param bool asc: whether in ascending order
:param bool show_dir: whether to show directories
:param bool natsort: whether to use natural sort
Return a list of :class:`.File` or :class:`.Directory` objects
"""
if self.cid is None:
return False
self.reload()
kwargs = {}
# `cid` is the only required argument
kwargs['cid'] = self.cid
kwargs['asc'] = 1 if asc is True else 0
kwargs['show_dir'] = 1 if show_dir is True else 0
kwargs['natsort'] = 1 if natsort is True else 0
kwargs['o'] = order
# When the downloads directory exists along with its parent directory,
# the receiver directory, its parent's count (receiver directory's
# count) does not include the downloads directory. This behavior is
# similar to its parent's parent (root), the count of which does not
# include the receiver directory.
# The following code fixed this behavior so that a directory's
# count correctly reflects the actual number of entries in it
# The side-effect that this code may ensure that downloads directory
# exists, causing the system to create the receiver directory and
# downloads directory, if they do not exist.
if self.is_root or self == self.api.receiver_directory:
self._count += 1
if self.count <= count:
# count should never be greater than self.count
count = self.count
try:
entries = self._load_entries(func=self.api._req_files,
count=count, page=1, **kwargs)
# When natsort=1 and order='file_name', API access will fail
except RequestFailure as e:
if natsort is True and order == 'file_name':
entries = \
self._load_entries(func=self.api._req_aps_natsort_files,
count=count, page=1, **kwargs)
else:
raise e
res = []
for entry in entries:
if 'pid' in entry:
res.append(_instantiate_directory(self.api, entry))
else:
res.append(_instantiate_file(self.api, entry))
return res
|
python
|
{
"resource": ""
}
|
q8532
|
Task.status_human
|
train
|
def status_human(self):
"""
Human readable status
:return:
* `DOWNLOADING`: the task is downloading files
* `BEING TRANSFERRED`: the task is being transferred
* `TRANSFERRED`: the task has been transferred to downloads \
directory
* `SEARCHING RESOURCES`: the task is searching resources
* `FAILED`: the task is failed
* `DELETED`: the task is deleted
* `UNKNOWN STATUS`
:rtype: str
"""
res = None
if self._deleted:
return 'DELETED'
if self.status == 1:
res = 'DOWNLOADING'
elif self.status == 2:
if self.move == 0:
res = 'BEING TRANSFERRED'
elif self.move == 1:
res = 'TRANSFERRED'
elif self.move == 2:
res = 'PARTIALLY TRANSFERRED'
elif self.status == 4:
res = 'SEARCHING RESOURCES'
elif self.status == -1:
res = 'FAILED'
if res is not None:
return res
return 'UNKNOWN STATUS'
|
python
|
{
"resource": ""
}
|
q8533
|
Task.directory
|
train
|
def directory(self):
"""Associated directory, if any, with this task"""
if not self.is_directory:
msg = 'This task is a file task with no associated directory.'
raise TaskError(msg)
if self._directory is None:
if self.is_transferred:
self._directory = self.api._load_directory(self.cid)
if self._directory is None:
msg = 'No directory assciated with this task: Task is %s.' % \
self.status_human.lower()
raise TaskError(msg)
return self._directory
|
python
|
{
"resource": ""
}
|
q8534
|
Task.list
|
train
|
def list(self, count=30, order='user_ptime', asc=False, show_dir=True,
natsort=True):
"""
List files of the associated directory to this task.
:param int count: number of entries to be listed
:param str order: originally named `o`
:param bool asc: whether in ascending order
:param bool show_dir: whether to show directories
"""
return self.directory.list(count, order, asc, show_dir, natsort)
|
python
|
{
"resource": ""
}
|
q8535
|
Torrent.submit
|
train
|
def submit(self):
"""Submit this torrent and create a new task"""
if self.api._req_lixian_add_task_bt(self):
self.submitted = True
return True
return False
|
python
|
{
"resource": ""
}
|
q8536
|
Command.get_needful_files
|
train
|
def get_needful_files(self):
"""
Returns currently used static files.
Assumes that manifest staticfiles.json is up-to-date.
"""
manifest = self.storage.load_manifest()
if self.keep_unhashed_files:
if PY3:
needful_files = set(manifest.keys() | manifest.values())
else:
needful_files = set(manifest.keys() + manifest.values())
needful_files = {self.storage.clean_name(file) for file in needful_files}
else:
needful_files = set(manifest.values())
return {self.process_file(file) for file in needful_files}
|
python
|
{
"resource": ""
}
|
q8537
|
Command.model_file_fields
|
train
|
def model_file_fields(self, model):
"""
Generator yielding all instances of FileField and its subclasses of a model.
"""
for field in model._meta.fields:
if isinstance(field, models.FileField):
yield field
|
python
|
{
"resource": ""
}
|
q8538
|
Command.get_resource_types
|
train
|
def get_resource_types(self):
"""
Returns set of resource types of FileFields of all registered models.
Needed by Cloudinary as resource type is needed to browse or delete specific files.
"""
resource_types = set()
for model in self.models():
for field in self.model_file_fields(model):
resource_type = field.storage.RESOURCE_TYPE
resource_types.add(resource_type)
return resource_types
|
python
|
{
"resource": ""
}
|
q8539
|
Command.get_needful_files
|
train
|
def get_needful_files(self):
"""
Returns set of media files associated with models.
Those files won't be deleted.
"""
needful_files = []
for model in self.models():
media_fields = []
for field in self.model_file_fields(model):
media_fields.append(field.name)
if media_fields:
exclude_options = {media_field: '' for media_field in media_fields}
model_uploaded_media = model.objects.exclude(**exclude_options).values_list(*media_fields)
needful_files.extend(model_uploaded_media)
return set(chain.from_iterable(needful_files))
|
python
|
{
"resource": ""
}
|
q8540
|
Command.get_files_to_remove
|
train
|
def get_files_to_remove(self):
"""
Returns orphaned media files to be removed grouped by resource type.
All files which paths start with any of exclude paths are ignored.
"""
files_to_remove = {}
needful_files = self.get_needful_files()
for resources_type, resources in self.get_uploaded_resources():
exclude_paths = self.get_exclude_paths()
resources = {resource for resource in resources if not resource.startswith(exclude_paths)}
files_to_remove[resources_type] = resources - needful_files
return files_to_remove
|
python
|
{
"resource": ""
}
|
q8541
|
StaticCloudinaryStorage._get_resource_type
|
train
|
def _get_resource_type(self, name):
"""
Implemented as static files can be of different resource types.
Because web developers are the people who control those files, we can distinguish them
simply by looking at their extensions, we don't need any content based validation.
"""
extension = self._get_file_extension(name)
if extension is None:
return self.RESOURCE_TYPE
elif extension in app_settings.STATIC_IMAGES_EXTENSIONS:
return RESOURCE_TYPES['IMAGE']
elif extension in app_settings.STATIC_VIDEOS_EXTENSIONS:
return RESOURCE_TYPES['VIDEO']
else:
return self.RESOURCE_TYPE
|
python
|
{
"resource": ""
}
|
q8542
|
StaticCloudinaryStorage._remove_extension_for_non_raw_file
|
train
|
def _remove_extension_for_non_raw_file(self, name):
"""
Implemented as image and video files' Cloudinary public id
shouldn't contain file extensions, otherwise Cloudinary url
would contain doubled extension - Cloudinary adds extension to url
to allow file conversion to arbitrary file, like png to jpg.
"""
file_resource_type = self._get_resource_type(name)
if file_resource_type is None or file_resource_type == self.RESOURCE_TYPE:
return name
else:
extension = self._get_file_extension(name)
return name[:-len(extension) - 1]
|
python
|
{
"resource": ""
}
|
q8543
|
StaticCloudinaryStorage._exists_with_etag
|
train
|
def _exists_with_etag(self, name, content):
"""
Checks whether a file with a name and a content is already uploaded to Cloudinary.
Uses ETAG header and MD5 hash for the content comparison.
"""
url = self._get_url(name)
response = requests.head(url)
if response.status_code == 404:
return False
etag = response.headers['ETAG'].split('"')[1]
hash = self.file_hash(name, content)
return etag.startswith(hash)
|
python
|
{
"resource": ""
}
|
q8544
|
StaticCloudinaryStorage._save
|
train
|
def _save(self, name, content):
"""
Saves only when a file with a name and a content is not already uploaded to Cloudinary.
"""
name = self.clean_name(name) # to change to UNIX style path on windows if necessary
if not self._exists_with_etag(name, content):
content.seek(0)
super(StaticCloudinaryStorage, self)._save(name, content)
return self._prepend_prefix(name)
|
python
|
{
"resource": ""
}
|
q8545
|
BaseCart.add
|
train
|
def add(self, pk, quantity=1, **kwargs):
"""Add an item to the cart.
If the item is already in the cart, then its quantity will be
increased by `quantity` units.
Parameters
----------
pk : str or int
The primary key of the item.
quantity : int-convertible
A number of units of to add.
**kwargs
Extra keyword arguments to pass to the item class
constructor.
Raises
------
ItemNotInDatabase
NegativeItemQuantity
NonConvertibleItemQuantity
TooLargeItemQuantity
ZeroItemQuantity
"""
pk = str(pk)
if pk in self.items:
existing_item = self.items[pk]
existing_item.quantity += _clean_quantity(quantity)
else:
queryset = self.get_queryset([pk])
try:
obj = queryset[0]
except IndexError:
raise ItemNotInDatabase(pk=pk)
obj = self.process_object(obj)
self.items[pk] = self.item_class(obj, quantity, **kwargs)
self.update()
|
python
|
{
"resource": ""
}
|
q8546
|
BaseCart.change_quantity
|
train
|
def change_quantity(self, pk, quantity):
"""Change the quantity of an item.
Parameters
----------
pk : str or int
The primary key of the item.
quantity : int-convertible
A new quantity.
Raises
------
ItemNotInCart
NegativeItemQuantity
NonConvertibleItemQuantity
TooLargeItemQuantity
ZeroItemQuantity
"""
pk = str(pk)
try:
item = self.items[pk]
except KeyError:
raise ItemNotInCart(pk=pk)
item.quantity = quantity
self.update()
|
python
|
{
"resource": ""
}
|
q8547
|
BaseCart.remove
|
train
|
def remove(self, pk):
"""Remove an item from the cart.
Parameters
----------
pk : str or int
The primary key of the item.
Raises
------
ItemNotInCart
"""
pk = str(pk)
try:
del self.items[pk]
except KeyError:
raise ItemNotInCart(pk=pk)
self.update()
|
python
|
{
"resource": ""
}
|
q8548
|
BaseCart.list_items
|
train
|
def list_items(self, sort_key=None, reverse=False):
"""Return a list of cart items.
Parameters
----------
sort_key : func
A function to customize the list order, same as the 'key'
argument to the built-in :func:`sorted`.
reverse: bool
If set to True, the sort order will be reversed.
Returns
-------
list
List of :attr:`item_class` instances.
Examples
--------
>>> cart = Cart(request)
>>> cart.list_items(lambda item: item.obj.name)
[<CartItem: obj=bar, quantity=3>,
<CartItem: obj=foo, quantity=1>,
<CartItem: obj=nox, quantity=5>]
>>> cart.list_items(lambda item: item.quantity, reverse=True)
[<CartItem: obj=nox, quantity=5>,
<CartItem: obj=bar, quantity=3>,
<CartItem: obj=foo, quantity=1>]
"""
items = list(self.items.values())
if sort_key:
items.sort(key=sort_key, reverse=reverse)
return items
|
python
|
{
"resource": ""
}
|
q8549
|
BaseCart.encode
|
train
|
def encode(self, formatter=None):
"""Return a representation of the cart as a JSON-response.
Parameters
----------
formatter : func, optional
A function that accepts the cart representation and returns
its formatted version.
Returns
-------
django.http.JsonResponse
Examples
--------
Assume that items with primary keys "1" and "4" are already in
the cart.
>>> cart = Cart(request)
>>> def format_total_price(cart_repr):
... return intcomma(cart_repr['totalPrice'])
...
>>> json_response = cart.encode(format_total_price)
>>> json_response.content
b'{
"items": {
'1': {"price": 100, "quantity": 10, "total": 1000},
'4': {"price": 50, "quantity": 20, "total": 1000},
},
"itemCount": 2,
"totalPrice": "2,000",
}'
"""
items = {}
# The prices are converted to strings, because they may have a
# type that can't be serialized to JSON (e.g. Decimal).
for item in self.items.values():
pk = str(item.obj.pk)
items[pk] = {
'price': str(item.price),
'quantity': item.quantity,
'total': item.total,
}
cart_repr = {
'items': items,
'itemCount': self.item_count,
'totalPrice': str(self.total_price),
}
if formatter:
cart_repr = formatter(cart_repr)
return JsonResponse(cart_repr)
|
python
|
{
"resource": ""
}
|
q8550
|
BaseCart.create_items
|
train
|
def create_items(self, session_items):
"""Instantiate cart items from session data.
The value returned by this method is used to populate the
cart's `items` attribute.
Parameters
----------
session_items : dict
A dictionary of pk-quantity mappings (each pk is a string).
For example: ``{'1': 5, '3': 2}``.
Returns
-------
dict
A map between the `session_items` keys and instances of
:attr:`item_class`. For example::
{'1': <CartItem: obj=foo, quantity=5>,
'3': <CartItem: obj=bar, quantity=2>}
"""
pks = list(session_items.keys())
items = {}
item_class = self.item_class
process_object = self.process_object
for obj in self.get_queryset(pks):
pk = str(obj.pk)
obj = process_object(obj)
items[pk] = item_class(obj, **session_items[pk])
if len(items) < len(session_items):
self._stale_pks = set(session_items).difference(items)
return items
|
python
|
{
"resource": ""
}
|
q8551
|
BaseCart.update
|
train
|
def update(self):
"""Update the cart.
First this method updates attributes dependent on the cart's
`items`, such as `total_price` or `item_count`.
After that, it saves the new cart state to the session.
Generally, you'll need to call this method by yourself, only
when implementing new methods that directly change the `items`
attribute.
"""
self.item_count = self.count_items()
self.total_price = self.count_total_price()
# Update the session
session = self.request.session
session_items = {}
for pk, item in self.items.items():
session_items[pk] = dict(quantity=item.quantity, **item._kwargs)
session_data = session[session_key]
session_data['items'] = session_items
session_data['itemCount'] = self.item_count
# The price can be of a type that can't be serialized to JSON
session_data['totalPrice'] = str(self.total_price)
session.modified = True
|
python
|
{
"resource": ""
}
|
q8552
|
BaseCart.count_items
|
train
|
def count_items(self, unique=True):
"""Count items in the cart.
Parameters
----------
unique : bool-convertible, optional
Returns
-------
int
If `unique` is truthy, then the result is the number of
items in the cart. Otherwise, it's the sum of all item
quantities.
"""
if unique:
return len(self.items)
return sum([item.quantity for item in self.items.values()])
|
python
|
{
"resource": ""
}
|
q8553
|
hill_climber
|
train
|
def hill_climber(objective_function,
initial_array,
lower_bound=-float('inf'),
acceptance_criteria=None,
max_iterations=10 ** 3):
"""
Implement a basic hill climbing algorithm.
Has two stopping conditions:
1. Maximum number of iterations;
2. A known lower bound, a none is passed then this is not used.
If acceptance_criteria (a callable) is not None then this is used to obtain
an upper bound on some other measure (different to the objective function).
In practice this is used when optimising the objective function to ensure
that we don't accept a solution that improves the objective function but tht
adds more constraint violations.
"""
X = initial_array
if acceptance_criteria is not None:
acceptance_bound = acceptance_criteria(X)
iterations = 0
current_energy = objective_function(X)
while current_energy > lower_bound and iterations <= max_iterations:
iterations += 1
candidate = element_from_neighbourhood(X)
candidate_energy = objective_function(candidate)
if (candidate_energy < current_energy and
(acceptance_criteria is None or
acceptance_criteria(candidate) <= acceptance_bound)):
X = candidate
current_energy = candidate_energy
if lower_bound > -float('inf') and current_energy != lower_bound:
warnings.warn(f"Lower bound {lower_bound} not achieved after {max_iterations} iterations")
return X
|
python
|
{
"resource": ""
}
|
q8554
|
simulated_annealing
|
train
|
def simulated_annealing(objective_function,
initial_array,
initial_temperature=10 ** 4,
cooldown_rate=0.7,
acceptance_criteria=None,
lower_bound=-float('inf'),
max_iterations=10 ** 3):
"""
Implement a simulated annealing algorithm with exponential cooling
Has two stopping conditions:
1. Maximum number of iterations;
2. A known lower bound, a none is passed then this is not used.
Note that starting with an initial_temperature corresponds to a hill
climbing algorithm
"""
X = initial_array
if acceptance_criteria is not None:
acceptance_bound = acceptance_criteria(X)
best_X = X
iterations = 0
current_energy = objective_function(X)
best_energy = current_energy
temperature = initial_temperature
while current_energy > lower_bound and iterations <= max_iterations:
iterations += 1
candidate = element_from_neighbourhood(X)
candidate_energy = objective_function(candidate)
delta = candidate_energy - current_energy
if (candidate_energy < best_energy and
(acceptance_criteria is None or
acceptance_criteria(candidate) <= acceptance_bound)):
best_energy = candidate_energy
best_X = candidate
if delta < 0 or (temperature > 0 and
np.random.random() < np.exp(-delta / temperature)):
X = candidate
current_energy = candidate_energy
temperature *= (cooldown_rate) ** iterations
if lower_bound > -float('inf') and current_energy != lower_bound:
warnings.warn(f"Lower bound {lower_bound} not achieved after {max_iterations} iterations")
return best_X
|
python
|
{
"resource": ""
}
|
q8555
|
_events_available_in_scheduled_slot
|
train
|
def _events_available_in_scheduled_slot(events, slots, X, **kwargs):
"""
Constraint that ensures that an event is scheduled in slots for which it is
available
"""
slot_availability_array = lpu.slot_availability_array(slots=slots,
events=events)
label = 'Event scheduled when not available'
for row, event in enumerate(slot_availability_array):
for col, availability in enumerate(event):
if availability == 0:
yield Constraint(
f'{label} - event: {row}, slot: {col}',
X[row, col] <= availability
)
|
python
|
{
"resource": ""
}
|
q8556
|
_events_available_during_other_events
|
train
|
def _events_available_during_other_events(
events, slots, X, summation_type=None, **kwargs
):
"""
Constraint that ensures that an event is not scheduled at the same time as
another event for which it is unavailable. Unavailability of events is
either because it is explicitly defined or because they share a tag.
"""
summation = lpu.summation_functions[summation_type]
event_availability_array = lpu.event_availability_array(events)
label = 'Event clashes with another event'
for slot1, slot2 in lpu.concurrent_slots(slots):
for row, event in enumerate(event_availability_array):
if events[row].unavailability:
for col, availability in enumerate(event):
if availability == 0:
yield Constraint(
f'{label} - event: {row} and event: {col}',
summation(
(X[row, slot1], X[col, slot2])
) <= 1 + availability
)
|
python
|
{
"resource": ""
}
|
q8557
|
_upper_bound_on_event_overflow
|
train
|
def _upper_bound_on_event_overflow(
events, slots, X, beta, summation_type=None, **kwargs
):
"""
This is an artificial constraint that is used by the objective function
aiming to minimise the maximum overflow in a slot.
"""
label = 'Artificial upper bound constraint'
for row, event in enumerate(events):
for col, slot in enumerate(slots):
yield Constraint(
f'{label} - slot: {col} and event: {row}',
event.demand * X[row, col] - slot.capacity <= beta)
|
python
|
{
"resource": ""
}
|
q8558
|
heuristic
|
train
|
def heuristic(events,
slots,
objective_function=None,
algorithm=heu.hill_climber,
initial_solution=None,
initial_solution_algorithm_kwargs={},
objective_function_algorithm_kwargs={},
**kwargs):
"""
Compute a schedule using a heuristic
Parameters
----------
events : list or tuple
of :py:class:`resources.Event` instances
slots : list or tuple
of :py:class:`resources.Slot` instances
algorithm : callable
a heuristic algorithm from conference_scheduler.heuristics
initial_solution_algorithm_kwargs : dict
kwargs for the heuristic algorithm for the initial solution
objective_function_algorithm_kwargs : dict
kwargs for the heuristic algorithm for the objective function (if
necessary.
objective_function: callable
from lp_problem.objective_functions
kwargs : keyword arguments
arguments for the objective function
Returns
-------
list
A list of tuples giving the event and slot index (for the given
events and slots lists) for all scheduled items.
Example
-------
For a solution where
* event 0 is scheduled in slot 1
* event 1 is scheduled in slot 4
* event 2 is scheduled in slot 5
the resulting list would be::
[(0, 1), (1, 4), (2, 5)]
"""
def count_violations(array):
return len(list(val.array_violations(array, events, slots)))
if initial_solution is None:
X = heu.get_initial_array(events=events, slots=slots)
X = algorithm(initial_array=X,
objective_function=count_violations,
lower_bound=0,
**initial_solution_algorithm_kwargs)
else:
X = initial_solution
if objective_function is not None:
kwargs["beta"] = float('inf')
def func(array):
return objective_function(
events=events, slots=slots, X=array, **kwargs)
X = algorithm(initial_array=X,
objective_function=func,
acceptance_criteria=count_violations,
**objective_function_algorithm_kwargs)
return list(zip(*np.nonzero(X)))
|
python
|
{
"resource": ""
}
|
q8559
|
solution
|
train
|
def solution(events, slots, objective_function=None, solver=None, **kwargs):
"""Compute a schedule in solution form
Parameters
----------
events : list or tuple
of :py:class:`resources.Event` instances
slots : list or tuple
of :py:class:`resources.Slot` instances
solver : pulp.solver
a pulp solver
objective_function: callable
from lp_problem.objective_functions
kwargs : keyword arguments
arguments for the objective function
Returns
-------
list
A list of tuples giving the event and slot index (for the given
events and slots lists) for all scheduled items.
Example
-------
For a solution where
* event 0 is scheduled in slot 1
* event 1 is scheduled in slot 4
* event 2 is scheduled in slot 5
the resulting list would be::
[(0, 1), (1, 4), (2, 5)]
"""
shape = Shape(len(events), len(slots))
problem = pulp.LpProblem()
X = lp.utils.variables(shape)
beta = pulp.LpVariable("upper_bound")
for constraint in lp.constraints.all_constraints(
events, slots, X, beta, 'lpsum'
):
problem += constraint.condition
if objective_function is not None:
problem += objective_function(events=events, slots=slots, X=X,
beta=beta,
**kwargs)
status = problem.solve(solver=solver)
if status == 1:
return [item for item, variable in X.items() if variable.value() > 0]
else:
raise ValueError('No valid solution found')
|
python
|
{
"resource": ""
}
|
q8560
|
array
|
train
|
def array(events, slots, objective_function=None, solver=None, **kwargs):
"""Compute a schedule in array form
Parameters
----------
events : list or tuple
of :py:class:`resources.Event` instances
slots : list or tuple
of :py:class:`resources.Slot` instances
objective_function : callable
from lp_problem.objective_functions
Returns
-------
np.array
An E by S array (X) where E is the number of events and S the
number of slots. Xij is 1 if event i is scheduled in slot j and
zero otherwise
Example
-------
For 3 events, 7 slots and a solution where
* event 0 is scheduled in slot 1
* event 1 is scheduled in slot 4
* event 2 is scheduled in slot 5
the resulting array would be::
[[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0]]
"""
return conv.solution_to_array(
solution(events, slots, objective_function, solver=solver, **kwargs),
events, slots
)
|
python
|
{
"resource": ""
}
|
q8561
|
schedule
|
train
|
def schedule(events, slots, objective_function=None, solver=None, **kwargs):
"""Compute a schedule in schedule form
Parameters
----------
events : list or tuple
of :py:class:`resources.Event` instances
slots : list or tuple
of :py:class:`resources.Slot` instances
solver : pulp.solver
a pulp solver
objective_function : callable
from lp_problem.objective_functions
kwargs : keyword arguments
arguments for the objective function
Returns
-------
list
A list of instances of :py:class:`resources.ScheduledItem`
"""
return conv.solution_to_schedule(
solution(events, slots, objective_function, solver=solver, **kwargs),
events, slots
)
|
python
|
{
"resource": ""
}
|
q8562
|
event_schedule_difference
|
train
|
def event_schedule_difference(old_schedule, new_schedule):
"""Compute the difference between two schedules from an event perspective
Parameters
----------
old_schedule : list or tuple
of :py:class:`resources.ScheduledItem` objects
new_schedule : list or tuple
of :py:class:`resources.ScheduledItem` objects
Returns
-------
list
A list of :py:class:`resources.ChangedEventScheduledItem` objects
Example
-------
>>> from conference_scheduler.resources import Event, Slot, ScheduledItem
>>> from conference_scheduler.scheduler import event_schedule_difference
>>> events = [Event(f'event_{i}', 30, 0) for i in range(5)]
>>> slots = [Slot(f'venue_{i}', '', 30, 100, None) for i in range(5)]
>>> old_schedule = (
... ScheduledItem(events[0], slots[0]),
... ScheduledItem(events[1], slots[1]),
... ScheduledItem(events[2], slots[2]))
>>> new_schedule = (
... ScheduledItem(events[0], slots[0]),
... ScheduledItem(events[1], slots[2]),
... ScheduledItem(events[2], slots[3]),
... ScheduledItem(events[3], slots[4]))
>>> diff = (event_schedule_difference(old_schedule, new_schedule))
>>> print([item.event.name for item in diff])
['event_1', 'event_2', 'event_3']
"""
old = {item.event.name: item for item in old_schedule}
new = {item.event.name: item for item in new_schedule}
common_events = set(old.keys()).intersection(new.keys())
added_events = new.keys() - old.keys()
removed_events = old.keys() - new.keys()
changed = [
ChangedEventScheduledItem(
old[event].event, old[event].slot, new[event].slot)
for event in common_events
if old[event].slot != new[event].slot
]
added = [
ChangedEventScheduledItem(new[event].event, None, new[event].slot)
for event in added_events
]
removed = [
ChangedEventScheduledItem(old[event].event, old[event].slot, None)
for event in removed_events
]
return sorted(changed + added + removed, key=lambda item: item.event.name)
|
python
|
{
"resource": ""
}
|
q8563
|
slot_schedule_difference
|
train
|
def slot_schedule_difference(old_schedule, new_schedule):
"""Compute the difference between two schedules from a slot perspective
Parameters
----------
old_schedule : list or tuple
of :py:class:`resources.ScheduledItem` objects
new_schedule : list or tuple
of :py:class:`resources.ScheduledItem` objects
Returns
-------
list
A list of :py:class:`resources.ChangedSlotScheduledItem` objects
Example
-------
>>> from conference_scheduler.resources import Event, Slot, ScheduledItem
>>> from conference_scheduler.scheduler import slot_schedule_difference
>>> events = [Event(f'event_{i}', 30, 0) for i in range(5)]
>>> slots = [Slot(f'venue_{i}', '', 30, 100, None) for i in range(5)]
>>> old_schedule = (
... ScheduledItem(events[0], slots[0]),
... ScheduledItem(events[1], slots[1]),
... ScheduledItem(events[2], slots[2]))
>>> new_schedule = (
... ScheduledItem(events[0], slots[0]),
... ScheduledItem(events[1], slots[2]),
... ScheduledItem(events[2], slots[3]),
... ScheduledItem(events[3], slots[4]))
>>> diff = slot_schedule_difference(old_schedule, new_schedule)
>>> print([item.slot.venue for item in diff])
['venue_1', 'venue_2', 'venue_3', 'venue_4']
"""
old = {item.slot: item for item in old_schedule}
new = {item.slot: item for item in new_schedule}
common_slots = set(old.keys()).intersection(new.keys())
added_slots = new.keys() - old.keys()
removed_slots = old.keys() - new.keys()
changed = [
ChangedSlotScheduledItem(
old[slot].slot, old[slot].event, new[slot].event)
for slot in common_slots
if old[slot].event != new[slot].event
]
added = [
ChangedSlotScheduledItem(new[slot].slot, None, new[slot].event)
for slot in added_slots
]
removed = [
ChangedSlotScheduledItem(old[slot].slot, old[slot].event, None)
for slot in removed_slots
]
return sorted(
changed + added + removed,
key=lambda item: (item.slot.venue, item.slot.starts_at)
)
|
python
|
{
"resource": ""
}
|
q8564
|
array_violations
|
train
|
def array_violations(array, events, slots, beta=None):
"""Take a schedule in array form and return any violated constraints
Parameters
----------
array : np.array
a schedule in array form
events : list or tuple
of resources.Event instances
slots : list or tuple
of resources.Slot instances
constraints : list or tuple
of generator functions which each produce instances of
resources.Constraint
Returns
-------
Generator
of a list of strings indicating the nature of the violated
constraints
"""
return (
c.label
for c in constraints.all_constraints(events, slots, array, beta=beta)
if not c.condition
)
|
python
|
{
"resource": ""
}
|
q8565
|
is_valid_array
|
train
|
def is_valid_array(array, events, slots):
"""Take a schedule in array form and return whether it is a valid
solution for the given constraints
Parameters
----------
array : np.array
a schedule in array form
events : list or tuple
of resources.Event instances
slots : list or tuple
of resources.Slot instances
Returns
-------
bool
True if array represents a valid solution
"""
if len(array) == 0:
return False
violations = sum(1 for c in (array_violations(array, events, slots)))
return violations == 0
|
python
|
{
"resource": ""
}
|
q8566
|
is_valid_solution
|
train
|
def is_valid_solution(solution, events, slots):
"""Take a solution and return whether it is valid for the
given constraints
Parameters
----------
solution: list or tuple
a schedule in solution form
events : list or tuple
of resources.Event instances
slots : list or tuple
of resources.Slot instances
Returns
-------
bool
True if schedule is a valid solution
"""
if len(solution) == 0:
return False
array = converter.solution_to_array(solution, events, slots)
return is_valid_array(array, events, slots)
|
python
|
{
"resource": ""
}
|
q8567
|
solution_violations
|
train
|
def solution_violations(solution, events, slots):
"""Take a solution and return a list of violated constraints
Parameters
----------
solution: list or tuple
a schedule in solution form
events : list or tuple
of resources.Event instances
slots : list or tuple
of resources.Slot instances
Returns
-------
Generator
of a list of strings indicating the nature of the violated
constraints
"""
array = converter.solution_to_array(solution, events, slots)
return array_violations(array, events, slots)
|
python
|
{
"resource": ""
}
|
q8568
|
is_valid_schedule
|
train
|
def is_valid_schedule(schedule, events, slots):
"""Take a schedule and return whether it is a valid solution for the
given constraints
Parameters
----------
schedule : list or tuple
a schedule in schedule form
events : list or tuple
of resources.Event instances
slots : list or tuple
of resources.Slot instances
Returns
-------
bool
True if schedule is a valid solution
"""
if len(schedule) == 0:
return False
array = converter.schedule_to_array(schedule, events, slots)
return is_valid_array(array, events, slots)
|
python
|
{
"resource": ""
}
|
q8569
|
schedule_violations
|
train
|
def schedule_violations(schedule, events, slots):
"""Take a schedule and return a list of violated constraints
Parameters
----------
schedule : list or tuple
a schedule in schedule form
events : list or tuple
of resources.Event instances
slots : list or tuple
of resources.Slot instances
Returns
-------
Generator
of a list of strings indicating the nature of the violated
constraints
"""
array = converter.schedule_to_array(schedule, events, slots)
return array_violations(array, events, slots)
|
python
|
{
"resource": ""
}
|
q8570
|
tag_array
|
train
|
def tag_array(events):
"""
Return a numpy array mapping events to tags
- Rows corresponds to events
- Columns correspond to tags
"""
all_tags = sorted(set(tag for event in events for tag in event.tags))
array = np.zeros((len(events), len(all_tags)))
for row, event in enumerate(events):
for tag in event.tags:
array[row, all_tags.index(tag)] = 1
return array
|
python
|
{
"resource": ""
}
|
q8571
|
session_array
|
train
|
def session_array(slots):
"""
Return a numpy array mapping sessions to slots
- Rows corresponds to sessions
- Columns correspond to slots
"""
# Flatten the list: this assumes that the sessions do not share slots
sessions = sorted(set([slot.session for slot in slots]))
array = np.zeros((len(sessions), len(slots)))
for col, slot in enumerate(slots):
array[sessions.index(slot.session), col] = 1
return array
|
python
|
{
"resource": ""
}
|
q8572
|
slot_availability_array
|
train
|
def slot_availability_array(events, slots):
"""
Return a numpy array mapping events to slots
- Rows corresponds to events
- Columns correspond to stags
Array has value 0 if event cannot be scheduled in a given slot
(1 otherwise)
"""
array = np.ones((len(events), len(slots)))
for row, event in enumerate(events):
for col, slot in enumerate(slots):
if slot in event.unavailability or event.duration > slot.duration:
array[row, col] = 0
return array
|
python
|
{
"resource": ""
}
|
q8573
|
event_availability_array
|
train
|
def event_availability_array(events):
"""
Return a numpy array mapping events to events
- Rows corresponds to events
- Columns correspond to events
Array has value 0 if event cannot be scheduled at same time as other event
(1 otherwise)
"""
array = np.ones((len(events), len(events)))
for row, event in enumerate(events):
for col, other_event in enumerate(events):
if row != col:
tags = set(event.tags)
events_share_tag = len(tags.intersection(other_event.tags)) > 0
if (other_event in event.unavailability) or events_share_tag:
array[row, col] = 0
array[col, row] = 0
return array
|
python
|
{
"resource": ""
}
|
q8574
|
concurrent_slots
|
train
|
def concurrent_slots(slots):
"""
Yields all concurrent slot indices.
"""
for i, slot in enumerate(slots):
for j, other_slot in enumerate(slots[i + 1:]):
if slots_overlap(slot, other_slot):
yield (i, j + i + 1)
|
python
|
{
"resource": ""
}
|
q8575
|
_events_with_diff_tag
|
train
|
def _events_with_diff_tag(talk, tag_array):
"""
Return the indices of the events with no tag in common as tag
"""
event_categories = np.nonzero(tag_array[talk])[0]
return np.nonzero(sum(tag_array.transpose()[event_categories]) == 0)[0]
|
python
|
{
"resource": ""
}
|
q8576
|
solution_to_array
|
train
|
def solution_to_array(solution, events, slots):
"""Convert a schedule from solution to array form
Parameters
----------
solution : list or tuple
of tuples of event index and slot index for each scheduled item
events : list or tuple
of :py:class:`resources.Event` instances
slots : list or tuple
of :py:class:`resources.Slot` instances
Returns
-------
np.array
An E by S array (X) where E is the number of events and S the
number of slots. Xij is 1 if event i is scheduled in slot j and
zero otherwise
Example
-------
For For 3 events, 7 slots and the solution::
[(0, 1), (1, 4), (2, 5)]
The resulting array would be::
[[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0]]
"""
array = np.zeros((len(events), len(slots)), dtype=np.int8)
for item in solution:
array[item[0], item[1]] = 1
return array
|
python
|
{
"resource": ""
}
|
q8577
|
solution_to_schedule
|
train
|
def solution_to_schedule(solution, events, slots):
"""Convert a schedule from solution to schedule form
Parameters
----------
solution : list or tuple
of tuples of event index and slot index for each scheduled item
events : list or tuple
of :py:class:`resources.Event` instances
slots : list or tuple
of :py:class:`resources.Slot` instances
Returns
-------
list
A list of instances of :py:class:`resources.ScheduledItem`
"""
return [
ScheduledItem(
event=events[item[0]],
slot=slots[item[1]]
)
for item in solution
]
|
python
|
{
"resource": ""
}
|
q8578
|
schedule_to_array
|
train
|
def schedule_to_array(schedule, events, slots):
"""Convert a schedule from schedule to array form
Parameters
----------
schedule : list or tuple
of instances of :py:class:`resources.ScheduledItem`
events : list or tuple
of :py:class:`resources.Event` instances
slots : list or tuple
of :py:class:`resources.Slot` instances
Returns
-------
np.array
An E by S array (X) where E is the number of events and S the
number of slots. Xij is 1 if event i is scheduled in slot j and
zero otherwise
"""
array = np.zeros((len(events), len(slots)), dtype=np.int8)
for item in schedule:
array[events.index(item.event), slots.index(item.slot)] = 1
return array
|
python
|
{
"resource": ""
}
|
q8579
|
array_to_schedule
|
train
|
def array_to_schedule(array, events, slots):
"""Convert a schedule from array to schedule form
Parameters
----------
array : np.array
An E by S array (X) where E is the number of events and S the
number of slots. Xij is 1 if event i is scheduled in slot j and
zero otherwise
events : list or tuple
of :py:class:`resources.Event` instances
slots : list or tuple
of :py:class:`resources.Slot` instances
Returns
-------
list
A list of instances of :py:class:`resources.ScheduledItem`
"""
scheduled = np.transpose(np.nonzero(array))
return [
ScheduledItem(event=events[item[0]], slot=slots[item[1]])
for item in scheduled
]
|
python
|
{
"resource": ""
}
|
q8580
|
add_line
|
train
|
def add_line(self, line, source, *lineno):
"""Append one line of generated reST to the output."""
if 'conference_scheduler.scheduler' in source:
module = 'scheduler'
else:
module = 'resources'
rst[module].append(line)
self.directive.result.append(self.indent + line, source, *lineno)
|
python
|
{
"resource": ""
}
|
q8581
|
efficiency_capacity_demand_difference
|
train
|
def efficiency_capacity_demand_difference(slots, events, X, **kwargs):
"""
A function that calculates the total difference between demand for an event
and the slot capacity it is scheduled in.
"""
overflow = 0
for row, event in enumerate(events):
for col, slot in enumerate(slots):
overflow += (event.demand - slot.capacity) * X[row, col]
return overflow
|
python
|
{
"resource": ""
}
|
q8582
|
get_initial_array
|
train
|
def get_initial_array(events, slots, seed=None):
"""
Obtain a random initial array.
"""
if seed is not None:
np.random.seed(seed)
m = len(events)
n = len(slots)
X = np.zeros((m, n))
for i, row in enumerate(X):
X[i, i] = 1
np.random.shuffle(X)
return X
|
python
|
{
"resource": ""
}
|
q8583
|
fcs
|
train
|
def fcs(bits):
'''
Append running bitwise FCS CRC checksum to end of generator
'''
fcs = FCS()
for bit in bits:
yield bit
fcs.update_bit(bit)
# test = bitarray()
# for byte in (digest & 0xff, digest >> 8):
# print byte
# for i in range(8):
# b = (byte >> i) & 1 == 1
# test.append(b)
# yield b
# append fcs digest to bit stream
# n.b. wire format is little-bit-endianness in addition to little-endian
digest = bitarray(endian="little")
digest.frombytes(fcs.digest())
for bit in digest:
yield bit
|
python
|
{
"resource": ""
}
|
q8584
|
modulate
|
train
|
def modulate(data):
'''
Generate Bell 202 AFSK samples for the given symbol generator
Consumes raw wire symbols and produces the corresponding AFSK samples.
'''
seconds_per_sample = 1.0 / audiogen.sampler.FRAME_RATE
phase, seconds, bits = 0, 0, 0
# construct generators
clock = (x / BAUD_RATE for x in itertools.count(1))
tones = (MARK_HZ if bit else SPACE_HZ for bit in data)
for boundary, frequency in itertools.izip(clock, tones):
# frequency of current symbol is determined by how much
# we advance the signal's phase in each audio frame
phase_change_per_sample = TWO_PI / (audiogen.sampler.FRAME_RATE / frequency)
# produce samples for the current symbol
# until we reach the next clock boundary
while seconds < boundary:
yield math.sin(phase)
seconds += seconds_per_sample
phase += phase_change_per_sample
if phase > TWO_PI:
phase -= TWO_PI
bits += 1
logger.debug("bits = %d, time = %.7f ms, expected time = %.7f ms, error = %.7f ms, baud rate = %.6f Hz" \
% (bits, 1000 * seconds, 1000 * bits / BAUD_RATE, 1000 * (seconds - bits / BAUD_RATE), bits / seconds))
|
python
|
{
"resource": ""
}
|
q8585
|
HackerNews._get_sync
|
train
|
def _get_sync(self, url):
"""Internal method used for GET requests
Args:
url (str): URL to fetch
Returns:
Individual URL request's response
Raises:
HTTPError: If HTTP request failed.
"""
response = self.session.get(url)
if response.status_code == requests.codes.ok:
return response.json()
else:
raise HTTPError
|
python
|
{
"resource": ""
}
|
q8586
|
HackerNews._get_async
|
train
|
async def _get_async(self, url, session):
"""Asynchronous internal method used for GET requests
Args:
url (str): URL to fetch
session (obj): aiohttp client session for async loop
Returns:
data (obj): Individual URL request's response corountine
"""
data = None
async with session.get(url) as resp:
if resp.status == 200:
data = await resp.json()
return data
|
python
|
{
"resource": ""
}
|
q8587
|
HackerNews._async_loop
|
train
|
async def _async_loop(self, urls):
"""Asynchronous internal method used to request multiple URLs
Args:
urls (list): URLs to fetch
Returns:
responses (obj): All URL requests' response coroutines
"""
results = []
async with aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl=False)
) as session:
for url in urls:
result = asyncio.ensure_future(self._get_async(url, session))
results.append(result)
responses = await asyncio.gather(*results)
return responses
|
python
|
{
"resource": ""
}
|
q8588
|
HackerNews._run_async
|
train
|
def _run_async(self, urls):
"""Asynchronous event loop execution
Args:
urls (list): URLs to fetch
Returns:
results (obj): All URL requests' responses
"""
loop = asyncio.get_event_loop()
results = loop.run_until_complete(self._async_loop(urls))
return results
|
python
|
{
"resource": ""
}
|
q8589
|
HackerNews.get_item
|
train
|
def get_item(self, item_id, expand=False):
"""Returns Hacker News `Item` object.
Fetches the data from url:
https://hacker-news.firebaseio.com/v0/item/<item_id>.json
e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
Args:
item_id (int or string): Unique item id of Hacker News story,
comment etc.
expand (bool): expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`Item` object representing Hacker News item.
Raises:
InvalidItemID: If corresponding Hacker News story does not exist.
"""
url = urljoin(self.item_url, F"{item_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidItemID
item = Item(response)
if expand:
item.by = self.get_user(item.by)
item.kids = self.get_items_by_ids(item.kids) if item.kids else None
item.parent = self.get_item(item.parent) if item.parent else None
item.poll = self.get_item(item.poll) if item.poll else None
item.parts = (
self.get_items_by_ids(item.parts) if item.parts else None
)
return item
|
python
|
{
"resource": ""
}
|
q8590
|
HackerNews.get_items_by_ids
|
train
|
def get_items_by_ids(self, item_ids, item_type=None):
"""Given a list of item ids, return all the Item objects
Args:
item_ids (obj): List of item IDs to query
item_type (str): (optional) Item type to filter results with
Returns:
List of `Item` objects for given item IDs and given item type
"""
urls = [urljoin(self.item_url, F"{i}.json") for i in item_ids]
result = self._run_async(urls=urls)
items = [Item(r) for r in result if r]
if item_type:
return [item for item in items if item.item_type == item_type]
else:
return items
|
python
|
{
"resource": ""
}
|
q8591
|
HackerNews.get_user
|
train
|
def get_user(self, user_id, expand=False):
"""Returns Hacker News `User` object.
Fetches data from the url:
https://hacker-news.firebaseio.com/v0/user/<user_id>.json
e.g. https://hacker-news.firebaseio.com/v0/user/pg.json
Args:
user_id (string): unique user id of a Hacker News user.
expand (bool): Flag to indicate whether to
transform all IDs into objects.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
"""
url = urljoin(self.user_url, F"{user_id}.json")
response = self._get_sync(url)
if not response:
raise InvalidUserID
user = User(response)
if expand and user.submitted:
items = self.get_items_by_ids(user.submitted)
user_opt = {
'stories': 'story',
'comments': 'comment',
'jobs': 'job',
'polls': 'poll',
'pollopts': 'pollopt'
}
for key, value in user_opt.items():
setattr(
user,
key,
[i for i in items if i.item_type == value]
)
return user
|
python
|
{
"resource": ""
}
|
q8592
|
HackerNews.get_users_by_ids
|
train
|
def get_users_by_ids(self, user_ids):
"""
Given a list of user ids, return all the User objects
"""
urls = [urljoin(self.user_url, F"{i}.json") for i in user_ids]
result = self._run_async(urls=urls)
return [User(r) for r in result if r]
|
python
|
{
"resource": ""
}
|
q8593
|
HackerNews.top_stories
|
train
|
def top_stories(self, raw=False, limit=None):
"""Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to represent all
objects in raw json.
Returns:
`list` object containing ids of top stories.
"""
top_stories = self._get_stories('topstories', limit)
if raw:
top_stories = [story.raw for story in top_stories]
return top_stories
|
python
|
{
"resource": ""
}
|
q8594
|
HackerNews.new_stories
|
train
|
def new_stories(self, raw=False, limit=None):
"""Returns list of item ids of current new stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of new stories.
"""
new_stories = self._get_stories('newstories', limit)
if raw:
new_stories = [story.raw for story in new_stories]
return new_stories
|
python
|
{
"resource": ""
}
|
q8595
|
HackerNews.ask_stories
|
train
|
def ask_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Ask HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Ask HN stories.
"""
ask_stories = self._get_stories('askstories', limit)
if raw:
ask_stories = [story.raw for story in ask_stories]
return ask_stories
|
python
|
{
"resource": ""
}
|
q8596
|
HackerNews.show_stories
|
train
|
def show_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Show HN stories.
"""
show_stories = self._get_stories('showstories', limit)
if raw:
show_stories = [story.raw for story in show_stories]
return show_stories
|
python
|
{
"resource": ""
}
|
q8597
|
HackerNews.job_stories
|
train
|
def job_stories(self, raw=False, limit=None):
"""Returns list of item ids of latest Job stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of Job stories.
"""
job_stories = self._get_stories('jobstories', limit)
if raw:
job_stories = [story.raw for story in job_stories]
return job_stories
|
python
|
{
"resource": ""
}
|
q8598
|
HackerNews.get_max_item
|
train
|
def get_max_item(self, expand=False):
"""The current largest item id
Fetches data from URL:
https://hacker-news.firebaseio.com/v0/maxitem.json
Args:
expand (bool): Flag to indicate whether to transform all
IDs into objects.
Returns:
`int` if successful.
"""
url = urljoin(self.base_url, 'maxitem.json')
response = self._get_sync(url)
if expand:
return self.get_item(response)
else:
return response
|
python
|
{
"resource": ""
}
|
q8599
|
HackerNews.get_last
|
train
|
def get_last(self, num=10):
"""Returns last `num` of HN stories
Downloads all the HN articles and returns them as Item objects
Returns:
`list` object containing ids of HN stories.
"""
max_item = self.get_max_item()
urls = [urljoin(self.item_url, F"{i}.json") for i in range(
max_item - num + 1, max_item + 1)]
result = self._run_async(urls=urls)
return [Item(r) for r in result if r]
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.