code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if self._receiver_directory is None: self._receiver_directory = self.downloads_directory.parent return self._receiver_directory
def receiver_directory(self)
Parent directory of the downloads directory
4.287873
3.219565
1.331817
filename = eval_path(filename) u = self.upload(filename, self.torrents_directory) t = self._load_torrent(u) if select: return t return t.submit()
def add_task_bt(self, filename, select=False)
Add a new BT task :param str filename: path to torrent file to upload :param bool select: whether to select files in the torrent. * True: it returns the opened torrent (:class:`.Torrent`) and can then iterate files in :attr:`.Torrent.files` and select/unselect them before calling :meth:`.Torrent.submit` * False: it will submit the torrent with default selected files
9.147405
9.171226
0.997403
res = self._req_get_storage_info() if human: res['total'] = humanize.naturalsize(res['total'], binary=True) res['used'] = humanize.naturalsize(res['used'], binary=True) return res
def get_storage_info(self, human=False)
Get storage info :param bool human: whether return human-readable size :return: total and used storage :rtype: dict
2.610328
2.593226
1.006595
filename = eval_path(filename) if directory is None: directory = self.downloads_directory # First request res1 = self._req_upload(filename, directory) data1 = res1['data'] file_id = data1['file_id'] # Second request res2 = self._req_file(file_id) data2 = res2['data'][0] data2.update(**data1) return _instantiate_uploaded_file(self, data2)
def upload(self, filename, directory=None)
Upload a file ``filename`` to ``directory`` :param str filename: path to the file to upload :param directory: destionation :class:`.Directory`, defaults to :attribute:`.API.downloads_directory` if None :return: the uploaded file :rtype: :class:`.File`
4.38177
4.159816
1.053357
url = obj.get_download_url(proapi) download(url, path=path, session=self.http.session, show_progress=show_progress, resume=resume, auto_retry=auto_retry)
def download(self, obj, path=None, show_progress=True, resume=True, auto_retry=True, proapi=False)
Download a file :param obj: :class:`.File` object :param str path: local path :param bool show_progress: whether to show download progress :param bool resume: whether to resume on unfinished downloads identified by filename :param bool auto_retry: whether to retry automatically upon closed transfer until the file's download is finished :param bool proapi: whether to use pro API
2.837272
3.878629
0.731514
kwargs = {} kwargs['search_value'] = keyword root = self.root_directory entries = root._load_entries(func=self._req_files_search, count=count, page=1, **kwargs) res = [] for entry in entries: if 'pid' in entry: res.append(_instantiate_directory(self, entry)) else: res.append(_instantiate_file(self, entry)) return res
def search(self, keyword, count=30)
Search files or directories :param str keyword: keyword :param int count: number of entries to be listed
5.548213
5.884436
0.942862
fcids = [] for entry in entries: if isinstance(entry, File): fcid = entry.fid elif isinstance(entry, Directory): fcid = entry.cid else: raise APIError('Invalid BaseFile instance for an entry.') fcids.append(fcid) if not isinstance(directory, Directory): raise APIError('Invalid destination directory.') if self._req_files_move(directory.cid, fcids): for entry in entries: if isinstance(entry, File): entry.cid = directory.cid entry.reload() return True else: raise APIError('Error moving entries.')
def move(self, entries, directory)
Move one or more entries (file or directory) to the destination directory :param list entries: a list of source entries (:class:`.BaseFile` object) :param directory: destination directory :return: whether the action is successful :raise: :class:`.APIError` if something bad happened
3.658937
3.135987
1.166758
fcid = None if isinstance(entry, File): fcid = entry.fid elif isinstance(entry, Directory): fcid = entry.cid else: raise APIError('Invalid BaseFile instance for an entry.') is_mark = 0 if mark is True: is_mark = 1 if self._req_files_edit(fcid, name, is_mark): entry.reload() return True else: raise APIError('Error editing the entry.')
def edit(self, entry, name, mark=False)
Edit an entry (file or directory) :param entry: :class:`.BaseFile` object :param str name: new name for the entry :param bool mark: whether to bookmark the entry
4.951583
4.943505
1.001634
pid = None cid = None if isinstance(parent, Directory): pid = parent.cid else: raise('Invalid Directory instance.') cid = self._req_files_add(pid, name)['cid'] return self._load_directory(cid)
def mkdir(self, parent, name)
Create a directory :param parent: the parent directory :param str name: the name of the new directory :return: the new directory :rtype: :class:`.Directory`
8.623545
9.4637
0.911223
url = 'http://115.com/' params = { 'ct': 'offline', 'ac': 'space', '_': get_timestamp(13) } _sign = os.environ.get('U115_BROWSER_SIGN') if _sign is not None: _time = os.environ.get('U115_BROWSER_TIME') if _time is None: msg = 'U115_BROWSER_TIME is required given U115_BROWSER_SIGN.' raise APIError(msg) params['sign'] = _sign params['time'] = _time params['uid'] = self.user_id req = Request(url=url, params=params) r = self.http.send(req) if r.state: self._signatures['offline_space'] = r.content['sign'] self._lixian_timestamp = r.content['time'] else: msg = 'Failed to retrieve signatures.' raise RequestFailure(msg)
def _req_offline_space(self)
Required before accessing lixian tasks
4.152709
4.015731
1.03411
url = 'http://115.com/lixian/' params = {'ct': 'lixian', 'ac': 'task_lists'} self._load_signatures() data = { 'page': page, 'uid': self.user_id, 'sign': self._signatures['offline_space'], 'time': self._lixian_timestamp, } req = Request(method='POST', url=url, params=params, data=data) res = self.http.send(req) if res.state: self._task_count = res.content['count'] self._task_quota = res.content['quota'] return res.content['tasks'] else: msg = 'Failed to get tasks.' raise RequestFailure(msg)
def _req_lixian_task_lists(self, page=1)
This request will cause the system to create a default downloads directory if it does not exist
4.509568
4.401131
1.024638
url = 'http://115.com/' params = { 'ct': 'lixian', 'ac': 'get_id', 'torrent': 1 if torrent else None, '_': get_timestamp(13) } req = Request(method='GET', url=url, params=params) res = self.http.send(req) return res.content
def _req_lixian_get_id(self, torrent=False)
Get `cid` of lixian space directory
3.982832
3.861095
1.031529
self._load_signatures() url = 'http://115.com/lixian/' params = { 'ct': 'lixian', 'ac': 'torrent', } data = { 'pickcode': u.pickcode, 'sha1': u.sha, 'uid': self.user_id, 'sign': self._signatures['offline_space'], 'time': self._lixian_timestamp, } req = Request(method='POST', url=url, params=params, data=data) res = self.http.send(req) if res.state: return res.content else: msg = res.content.get('error_msg') self.logger.error(msg) raise RequestFailure('Failed to open torrent.')
def _req_lixian_torrent(self, u)
:param u: uploaded torrent file
5.232212
4.961862
1.054485
params = locals() del params['self'] req = Request(method='GET', url=self.aps_natsort_url, params=params) res = self.http.send(req) if res.state: return res.content else: raise RequestFailure('Failed to access files API.')
def _req_aps_natsort_files(self, cid, offset, limit, o='file_name', asc=1, aid=1, show_dir=1, code=None, scid=None, snap=0, natsort=1, source=None, type=0, format='json', star=None, is_share=None)
When :meth:`.API._req_files` is called with `o='filename'` and `natsort=1`, API access will fail and :meth:`.API._req_aps_natsort_files` is subsequently called with the same kwargs. Refer to the implementation in :meth:`.Directory.list`
4.421305
4.901108
0.902103
url = self.web_api_url + '/edit' data = locals() del data['self'] req = Request(method='POST', url=url, data=data) res = self.http.send(req) if res.state: return True else: raise RequestFailure('Failed to access files API.')
def _req_files_edit(self, fid, file_name=None, is_mark=0)
Edit a file or directory
4.91737
4.810341
1.02225
url = self.web_api_url + '/add' data = locals() del data['self'] req = Request(method='POST', url=url, data=data) res = self.http.send(req) if res.state: return res.content else: raise RequestFailure('Failed to access files API.')
def _req_files_add(self, pid, cname)
Add a directory :param str pid: parent directory id :param str cname: directory name
4.874395
5.140031
0.94832
url = self.web_api_url + '/move' data = {} data['pid'] = pid for i, fid in enumerate(fids): data['fid[%d]' % i] = fid req = Request(method='POST', url=url, data=data) res = self.http.send(req) if res.state: return True else: raise RequestFailure('Failed to access files API.')
def _req_files_move(self, pid, fids)
Move files or directories :param str pid: destination directory id :param list fids: a list of ids of files or directories to be moved
3.778598
4.172993
0.905489
res = self._req_files(cid=cid, offset=0, limit=1, show_dir=1) path = res['path'] count = res['count'] for d in path: if str(d['cid']) == str(cid): res = { 'cid': d['cid'], 'name': d['name'], 'pid': d['pid'], 'count': count, } return res else: raise RequestFailure('No directory found.')
def _req_directory(self, cid)
Return name and pid of by cid
3.708932
3.507494
1.057431
self._upload_url = self._load_upload_url() self.http.get('http://upload.115.com/crossdomain.xml') b = os.path.basename(filename) target = 'U_1_' + str(directory.cid) files = { 'Filename': ('', quote(b), ''), 'target': ('', target, ''), 'Filedata': (quote(b), open(filename, 'rb'), ''), 'Upload': ('', 'Submit Query', ''), } req = Request(method='POST', url=self._upload_url, files=files) res = self.http.send(req) if res.state: return res.content else: msg = None if res.content['code'] == 990002: msg = 'Invalid parameter.' elif res.content['code'] == 1001: msg = 'Torrent upload failed. Please try again later.' raise RequestFailure(msg)
def _req_upload(self, filename, directory)
Raw request to upload a file ``filename``
4.798203
4.900242
0.979177
kwargs = self._req_directory(0) self._root_directory = Directory(api=self, **kwargs)
def _load_root_directory(self)
Load root directory, which has a cid of 0
14.741475
9.846272
1.497163
r = self._req_lixian_get_id(torrent=True) self._downloads_directory = self._load_directory(r['cid'])
def _load_torrents_directory(self)
Load torrents directory If it does not exist yet, this request will cause the system to create one
22.211872
21.36458
1.039659
r = self._req_lixian_get_id(torrent=False) self._downloads_directory = self._load_directory(r['cid'])
def _load_downloads_directory(self)
Load downloads directory If it does not exist yet, this request will cause the system to create one
21.281197
21.3962
0.994625
src_url = 'http://115.com' r = self.http.get(src_url) soup = BeautifulSoup(r.content) scripts = [script.text for script in soup.find_all('script')] text = '\n'.join(scripts) pattern = "%s\s*=\s*(.*);" % (variable.upper()) m = re.search(pattern, text) if not m: msg = 'Cannot parse source JavaScript for %s.' % variable raise APIError(msg) return json.loads(m.group(1).strip())
def _parse_src_js_var(self, variable)
Parse JavaScript variables in the source page
3.68474
3.494855
1.054333
fcid = None pid = None if isinstance(self, File): fcid = self.fid pid = self.cid elif isinstance(self, Directory): fcid = self.cid pid = self.pid else: raise APIError('Invalid BaseFile instance.') if not self._deleted: if self.api._req_rb_delete(fcid, pid): self._deleted = True return True else: raise APIError('This file or directory is already deleted.')
def delete(self)
Delete this file or directory :return: whether deletion is successful :raise: :class:`.APIError` if this file or directory is already deleted
4.939784
4.188141
1.179469
self.api.edit(self, name, mark)
def edit(self, name, mark=False)
Edit this file or directory :param str name: new name for this entry :param bool mark: whether to bookmark this entry
11.20172
14.480541
0.773571
if self._directory is None: self._directory = self.api._load_directory(self.cid) return self._directory
def directory(self)
Directory that holds this file
5.895621
5.470892
1.077634
if self._download_url is None: self._download_url = \ self.api._req_files_download_url(self.pickcode, proapi) return self._download_url
def get_download_url(self, proapi=False)
Get this file's download URL :param bool proapi: whether to use pro API
5.942665
6.58616
0.902296
self.api.download(self, path, show_progress, resume, auto_retry, proapi)
def download(self, path=None, show_progress=True, resume=True, auto_retry=True, proapi=False)
Download this file
3.436464
3.374786
1.018276
res = self.api._req_file(self.fid) data = res['data'][0] self.name = data['file_name'] self.sha = data['sha1'] self.pickcode = data['pick_code']
def reload(self)
Reload file info and metadata * name * sha * pickcode
7.022327
3.650017
1.923916
if self._parent is None: if self.pid is not None: self._parent = self.api._load_directory(self.pid) return self._parent
def parent(self)
Parent directory that holds this directory
4.705355
4.477338
1.050927
r = self.api._req_directory(self.cid) self.pid = r['pid'] self.name = r['name'] self._count = r['count']
def reload(self)
Reload directory info and metadata * `name` * `pid` * `count`
7.845079
5.520624
1.421049
if entries is None: entries = [] res = \ func(offset=(page - 1) * self.max_entries_per_load, limit=self.max_entries_per_load, **kwargs) loaded_entries = [ entry for entry in res['data'][:count] ] #total_count = res['count'] total_count = self.count # count should never be greater than total_count if count > total_count: count = total_count if count <= self.max_entries_per_load: return entries + loaded_entries else: cur_count = count - self.max_entries_per_load return self._load_entries( func=func, count=cur_count, page=page + 1, entries=entries + loaded_entries, **kwargs)
def _load_entries(self, func, count, page=1, entries=None, **kwargs)
Load entries :param function func: function (:meth:`.API._req_files` or :meth:`.API._req_search`) that returns entries :param int count: number of entries to load. This value should never be greater than self.count :param int page: page number (starting from 1)
2.528788
2.621478
0.964642
if self.cid is None: return False self.reload() kwargs = {} # `cid` is the only required argument kwargs['cid'] = self.cid kwargs['asc'] = 1 if asc is True else 0 kwargs['show_dir'] = 1 if show_dir is True else 0 kwargs['natsort'] = 1 if natsort is True else 0 kwargs['o'] = order # When the downloads directory exists along with its parent directory, # the receiver directory, its parent's count (receiver directory's # count) does not include the downloads directory. This behavior is # similar to its parent's parent (root), the count of which does not # include the receiver directory. # The following code fixed this behavior so that a directory's # count correctly reflects the actual number of entries in it # The side-effect that this code may ensure that downloads directory # exists, causing the system to create the receiver directory and # downloads directory, if they do not exist. if self.is_root or self == self.api.receiver_directory: self._count += 1 if self.count <= count: # count should never be greater than self.count count = self.count try: entries = self._load_entries(func=self.api._req_files, count=count, page=1, **kwargs) # When natsort=1 and order='file_name', API access will fail except RequestFailure as e: if natsort is True and order == 'file_name': entries = \ self._load_entries(func=self.api._req_aps_natsort_files, count=count, page=1, **kwargs) else: raise e res = [] for entry in entries: if 'pid' in entry: res.append(_instantiate_directory(self.api, entry)) else: res.append(_instantiate_file(self.api, entry)) return res
def list(self, count=30, order='user_ptime', asc=False, show_dir=True, natsort=True)
List directory contents :param int count: number of entries to be listed :param str order: order of entries, originally named `o`. This value may be one of `user_ptime` (default), `file_size` and `file_name` :param bool asc: whether in ascending order :param bool show_dir: whether to show directories :param bool natsort: whether to use natural sort Return a list of :class:`.File` or :class:`.Directory` objects
5.331063
5.186779
1.027818
if self.cid is None: msg = 'Cannot determine whether this task is a directory.' if not self.is_transferred: msg += ' This task has not been transferred.' raise TaskError(msg) return self.api.downloads_directory.cid != self.cid
def is_directory(self)
:return: whether this task is associated with a directory. :rtype: bool
8.21823
6.69316
1.227855
if not self._deleted: if self.api._req_lixian_task_del(self): self._deleted = True return True raise TaskError('This task is already deleted.')
def delete(self)
Delete task (does not influence its corresponding directory) :return: whether deletion is successful :raise: :class:`.TaskError` if the task is already deleted
10.424869
8.336575
1.250498
res = None if self._deleted: return 'DELETED' if self.status == 1: res = 'DOWNLOADING' elif self.status == 2: if self.move == 0: res = 'BEING TRANSFERRED' elif self.move == 1: res = 'TRANSFERRED' elif self.move == 2: res = 'PARTIALLY TRANSFERRED' elif self.status == 4: res = 'SEARCHING RESOURCES' elif self.status == -1: res = 'FAILED' if res is not None: return res return 'UNKNOWN STATUS'
def status_human(self)
Human readable status :return: * `DOWNLOADING`: the task is downloading files * `BEING TRANSFERRED`: the task is being transferred * `TRANSFERRED`: the task has been transferred to downloads \ directory * `SEARCHING RESOURCES`: the task is searching resources * `FAILED`: the task is failed * `DELETED`: the task is deleted * `UNKNOWN STATUS` :rtype: str
2.802597
2.134327
1.313106
if not self.is_directory: msg = 'This task is a file task with no associated directory.' raise TaskError(msg) if self._directory is None: if self.is_transferred: self._directory = self.api._load_directory(self.cid) if self._directory is None: msg = 'No directory assciated with this task: Task is %s.' % \ self.status_human.lower() raise TaskError(msg) return self._directory
def directory(self)
Associated directory, if any, with this task
5.004203
4.452402
1.123933
return self.directory.list(count, order, asc, show_dir, natsort)
def list(self, count=30, order='user_ptime', asc=False, show_dir=True, natsort=True)
List files of the associated directory to this task. :param int count: number of entries to be listed :param str order: originally named `o` :param bool asc: whether in ascending order :param bool show_dir: whether to show directories
3.608506
7.249257
0.497776
if self.api._req_lixian_add_task_bt(self): self.submitted = True return True return False
def submit(self)
Submit this torrent and create a new task
21.107029
17.467388
1.208368
manifest = self.storage.load_manifest() if self.keep_unhashed_files: if PY3: needful_files = set(manifest.keys() | manifest.values()) else: needful_files = set(manifest.keys() + manifest.values()) needful_files = {self.storage.clean_name(file) for file in needful_files} else: needful_files = set(manifest.values()) return {self.process_file(file) for file in needful_files}
def get_needful_files(self)
Returns currently used static files. Assumes that manifest staticfiles.json is up-to-date.
3.15071
3.13176
1.006051
for field in model._meta.fields: if isinstance(field, models.FileField): yield field
def model_file_fields(self, model)
Generator yielding all instances of FileField and its subclasses of a model.
3.033044
2.116152
1.433282
resource_types = set() for model in self.models(): for field in self.model_file_fields(model): resource_type = field.storage.RESOURCE_TYPE resource_types.add(resource_type) return resource_types
def get_resource_types(self)
Returns set of resource types of FileFields of all registered models. Needed by Cloudinary as resource type is needed to browse or delete specific files.
4.027657
2.527843
1.593318
needful_files = [] for model in self.models(): media_fields = [] for field in self.model_file_fields(model): media_fields.append(field.name) if media_fields: exclude_options = {media_field: '' for media_field in media_fields} model_uploaded_media = model.objects.exclude(**exclude_options).values_list(*media_fields) needful_files.extend(model_uploaded_media) return set(chain.from_iterable(needful_files))
def get_needful_files(self)
Returns set of media files associated with models. Those files won't be deleted.
3.408754
2.916847
1.168643
files_to_remove = {} needful_files = self.get_needful_files() for resources_type, resources in self.get_uploaded_resources(): exclude_paths = self.get_exclude_paths() resources = {resource for resource in resources if not resource.startswith(exclude_paths)} files_to_remove[resources_type] = resources - needful_files return files_to_remove
def get_files_to_remove(self)
Returns orphaned media files to be removed grouped by resource type. All files which paths start with any of exclude paths are ignored.
3.700424
3.057181
1.210404
extension = self._get_file_extension(name) if extension is None: return self.RESOURCE_TYPE elif extension in app_settings.STATIC_IMAGES_EXTENSIONS: return RESOURCE_TYPES['IMAGE'] elif extension in app_settings.STATIC_VIDEOS_EXTENSIONS: return RESOURCE_TYPES['VIDEO'] else: return self.RESOURCE_TYPE
def _get_resource_type(self, name)
Implemented as static files can be of different resource types. Because web developers are the people who control those files, we can distinguish them simply by looking at their extensions, we don't need any content based validation.
3.270233
2.732583
1.196755
file_resource_type = self._get_resource_type(name) if file_resource_type is None or file_resource_type == self.RESOURCE_TYPE: return name else: extension = self._get_file_extension(name) return name[:-len(extension) - 1]
def _remove_extension_for_non_raw_file(self, name)
Implemented as image and video files' Cloudinary public id shouldn't contain file extensions, otherwise Cloudinary url would contain doubled extension - Cloudinary adds extension to url to allow file conversion to arbitrary file, like png to jpg.
2.946368
2.770092
1.063635
url = self._get_url(name) response = requests.head(url) if response.status_code == 404: return False etag = response.headers['ETAG'].split('"')[1] hash = self.file_hash(name, content) return etag.startswith(hash)
def _exists_with_etag(self, name, content)
Checks whether a file with a name and a content is already uploaded to Cloudinary. Uses ETAG header and MD5 hash for the content comparison.
3.162062
2.668478
1.184969
name = self.clean_name(name) # to change to UNIX style path on windows if necessary if not self._exists_with_etag(name, content): content.seek(0) super(StaticCloudinaryStorage, self)._save(name, content) return self._prepend_prefix(name)
def _save(self, name, content)
Saves only when a file with a name and a content is not already uploaded to Cloudinary.
8.700646
7.162721
1.214712
pk = str(pk) if pk in self.items: existing_item = self.items[pk] existing_item.quantity += _clean_quantity(quantity) else: queryset = self.get_queryset([pk]) try: obj = queryset[0] except IndexError: raise ItemNotInDatabase(pk=pk) obj = self.process_object(obj) self.items[pk] = self.item_class(obj, quantity, **kwargs) self.update()
def add(self, pk, quantity=1, **kwargs)
Add an item to the cart. If the item is already in the cart, then its quantity will be increased by `quantity` units. Parameters ---------- pk : str or int The primary key of the item. quantity : int-convertible A number of units of to add. **kwargs Extra keyword arguments to pass to the item class constructor. Raises ------ ItemNotInDatabase NegativeItemQuantity NonConvertibleItemQuantity TooLargeItemQuantity ZeroItemQuantity
3.265896
3.505682
0.931601
pk = str(pk) try: item = self.items[pk] except KeyError: raise ItemNotInCart(pk=pk) item.quantity = quantity self.update()
def change_quantity(self, pk, quantity)
Change the quantity of an item. Parameters ---------- pk : str or int The primary key of the item. quantity : int-convertible A new quantity. Raises ------ ItemNotInCart NegativeItemQuantity NonConvertibleItemQuantity TooLargeItemQuantity ZeroItemQuantity
4.093052
4.341912
0.942684
pk = str(pk) try: del self.items[pk] except KeyError: raise ItemNotInCart(pk=pk) self.update()
def remove(self, pk)
Remove an item from the cart. Parameters ---------- pk : str or int The primary key of the item. Raises ------ ItemNotInCart
4.802756
6.319798
0.759954
items = list(self.items.values()) if sort_key: items.sort(key=sort_key, reverse=reverse) return items
def list_items(self, sort_key=None, reverse=False)
Return a list of cart items. Parameters ---------- sort_key : func A function to customize the list order, same as the 'key' argument to the built-in :func:`sorted`. reverse: bool If set to True, the sort order will be reversed. Returns ------- list List of :attr:`item_class` instances. Examples -------- >>> cart = Cart(request) >>> cart.list_items(lambda item: item.obj.name) [<CartItem: obj=bar, quantity=3>, <CartItem: obj=foo, quantity=1>, <CartItem: obj=nox, quantity=5>] >>> cart.list_items(lambda item: item.quantity, reverse=True) [<CartItem: obj=nox, quantity=5>, <CartItem: obj=bar, quantity=3>, <CartItem: obj=foo, quantity=1>]
2.332011
4.782215
0.487642
items = {} # The prices are converted to strings, because they may have a # type that can't be serialized to JSON (e.g. Decimal). for item in self.items.values(): pk = str(item.obj.pk) items[pk] = { 'price': str(item.price), 'quantity': item.quantity, 'total': item.total, } cart_repr = { 'items': items, 'itemCount': self.item_count, 'totalPrice': str(self.total_price), } if formatter: cart_repr = formatter(cart_repr) return JsonResponse(cart_repr)
def encode(self, formatter=None)
Return a representation of the cart as a JSON-response. Parameters ---------- formatter : func, optional A function that accepts the cart representation and returns its formatted version. Returns ------- django.http.JsonResponse Examples -------- Assume that items with primary keys "1" and "4" are already in the cart. >>> cart = Cart(request) >>> def format_total_price(cart_repr): ... return intcomma(cart_repr['totalPrice']) ... >>> json_response = cart.encode(format_total_price) >>> json_response.content b'{ "items": { '1': {"price": 100, "quantity": 10, "total": 1000}, '4': {"price": 50, "quantity": 20, "total": 1000}, }, "itemCount": 2, "totalPrice": "2,000", }'
3.544817
2.62252
1.351683
pks = list(session_items.keys()) items = {} item_class = self.item_class process_object = self.process_object for obj in self.get_queryset(pks): pk = str(obj.pk) obj = process_object(obj) items[pk] = item_class(obj, **session_items[pk]) if len(items) < len(session_items): self._stale_pks = set(session_items).difference(items) return items
def create_items(self, session_items)
Instantiate cart items from session data. The value returned by this method is used to populate the cart's `items` attribute. Parameters ---------- session_items : dict A dictionary of pk-quantity mappings (each pk is a string). For example: ``{'1': 5, '3': 2}``. Returns ------- dict A map between the `session_items` keys and instances of :attr:`item_class`. For example:: {'1': <CartItem: obj=foo, quantity=5>, '3': <CartItem: obj=bar, quantity=2>}
3.440676
3.27126
1.051789
self.item_count = self.count_items() self.total_price = self.count_total_price() # Update the session session = self.request.session session_items = {} for pk, item in self.items.items(): session_items[pk] = dict(quantity=item.quantity, **item._kwargs) session_data = session[session_key] session_data['items'] = session_items session_data['itemCount'] = self.item_count # The price can be of a type that can't be serialized to JSON session_data['totalPrice'] = str(self.total_price) session.modified = True
def update(self)
Update the cart. First this method updates attributes dependent on the cart's `items`, such as `total_price` or `item_count`. After that, it saves the new cart state to the session. Generally, you'll need to call this method by yourself, only when implementing new methods that directly change the `items` attribute.
3.765268
3.382403
1.113193
if unique: return len(self.items) return sum([item.quantity for item in self.items.values()])
def count_items(self, unique=True)
Count items in the cart. Parameters ---------- unique : bool-convertible, optional Returns ------- int If `unique` is truthy, then the result is the number of items in the cart. Otherwise, it's the sum of all item quantities.
3.96998
3.575528
1.11032
if (settings.STATICFILES_STORAGE == 'cloudinary_storage.storage.StaticCloudinaryStorage' or self.upload_unhashed_files): super(Command, self).copy_file(path, prefixed_path, source_storage)
def copy_file(self, path, prefixed_path, source_storage)
Overwritten to execute only with --upload-unhashed-files param or StaticCloudinaryStorage. Otherwise only hashed files will be uploaded during postprocessing.
5.948948
2.652268
2.242967
X = initial_array if acceptance_criteria is not None: acceptance_bound = acceptance_criteria(X) iterations = 0 current_energy = objective_function(X) while current_energy > lower_bound and iterations <= max_iterations: iterations += 1 candidate = element_from_neighbourhood(X) candidate_energy = objective_function(candidate) if (candidate_energy < current_energy and (acceptance_criteria is None or acceptance_criteria(candidate) <= acceptance_bound)): X = candidate current_energy = candidate_energy if lower_bound > -float('inf') and current_energy != lower_bound: warnings.warn(f"Lower bound {lower_bound} not achieved after {max_iterations} iterations") return X
def hill_climber(objective_function, initial_array, lower_bound=-float('inf'), acceptance_criteria=None, max_iterations=10 ** 3)
Implement a basic hill climbing algorithm. Has two stopping conditions: 1. Maximum number of iterations; 2. A known lower bound, a none is passed then this is not used. If acceptance_criteria (a callable) is not None then this is used to obtain an upper bound on some other measure (different to the objective function). In practice this is used when optimising the objective function to ensure that we don't accept a solution that improves the objective function but tht adds more constraint violations.
2.569217
2.633423
0.975619
X = initial_array if acceptance_criteria is not None: acceptance_bound = acceptance_criteria(X) best_X = X iterations = 0 current_energy = objective_function(X) best_energy = current_energy temperature = initial_temperature while current_energy > lower_bound and iterations <= max_iterations: iterations += 1 candidate = element_from_neighbourhood(X) candidate_energy = objective_function(candidate) delta = candidate_energy - current_energy if (candidate_energy < best_energy and (acceptance_criteria is None or acceptance_criteria(candidate) <= acceptance_bound)): best_energy = candidate_energy best_X = candidate if delta < 0 or (temperature > 0 and np.random.random() < np.exp(-delta / temperature)): X = candidate current_energy = candidate_energy temperature *= (cooldown_rate) ** iterations if lower_bound > -float('inf') and current_energy != lower_bound: warnings.warn(f"Lower bound {lower_bound} not achieved after {max_iterations} iterations") return best_X
def simulated_annealing(objective_function, initial_array, initial_temperature=10 ** 4, cooldown_rate=0.7, acceptance_criteria=None, lower_bound=-float('inf'), max_iterations=10 ** 3)
Implement a simulated annealing algorithm with exponential cooling Has two stopping conditions: 1. Maximum number of iterations; 2. A known lower bound, a none is passed then this is not used. Note that starting with an initial_temperature corresponds to a hill climbing algorithm
2.471794
2.562577
0.964574
slot_availability_array = lpu.slot_availability_array(slots=slots, events=events) label = 'Event scheduled when not available' for row, event in enumerate(slot_availability_array): for col, availability in enumerate(event): if availability == 0: yield Constraint( f'{label} - event: {row}, slot: {col}', X[row, col] <= availability )
def _events_available_in_scheduled_slot(events, slots, X, **kwargs)
Constraint that ensures that an event is scheduled in slots for which it is available
6.596159
5.978721
1.103273
summation = lpu.summation_functions[summation_type] event_availability_array = lpu.event_availability_array(events) label = 'Event clashes with another event' for slot1, slot2 in lpu.concurrent_slots(slots): for row, event in enumerate(event_availability_array): if events[row].unavailability: for col, availability in enumerate(event): if availability == 0: yield Constraint( f'{label} - event: {row} and event: {col}', summation( (X[row, slot1], X[col, slot2]) ) <= 1 + availability )
def _events_available_during_other_events( events, slots, X, summation_type=None, **kwargs )
Constraint that ensures that an event is not scheduled at the same time as another event for which it is unavailable. Unavailability of events is either because it is explicitly defined or because they share a tag.
5.378414
4.931485
1.090628
label = 'Artificial upper bound constraint' for row, event in enumerate(events): for col, slot in enumerate(slots): yield Constraint( f'{label} - slot: {col} and event: {row}', event.demand * X[row, col] - slot.capacity <= beta)
def _upper_bound_on_event_overflow( events, slots, X, beta, summation_type=None, **kwargs )
This is an artificial constraint that is used by the objective function aiming to minimise the maximum overflow in a slot.
8.910475
7.137107
1.248472
def count_violations(array): return len(list(val.array_violations(array, events, slots))) if initial_solution is None: X = heu.get_initial_array(events=events, slots=slots) X = algorithm(initial_array=X, objective_function=count_violations, lower_bound=0, **initial_solution_algorithm_kwargs) else: X = initial_solution if objective_function is not None: kwargs["beta"] = float('inf') def func(array): return objective_function( events=events, slots=slots, X=array, **kwargs) X = algorithm(initial_array=X, objective_function=func, acceptance_criteria=count_violations, **objective_function_algorithm_kwargs) return list(zip(*np.nonzero(X)))
def heuristic(events, slots, objective_function=None, algorithm=heu.hill_climber, initial_solution=None, initial_solution_algorithm_kwargs={}, objective_function_algorithm_kwargs={}, **kwargs)
Compute a schedule using a heuristic Parameters ---------- events : list or tuple of :py:class:`resources.Event` instances slots : list or tuple of :py:class:`resources.Slot` instances algorithm : callable a heuristic algorithm from conference_scheduler.heuristics initial_solution_algorithm_kwargs : dict kwargs for the heuristic algorithm for the initial solution objective_function_algorithm_kwargs : dict kwargs for the heuristic algorithm for the objective function (if necessary. objective_function: callable from lp_problem.objective_functions kwargs : keyword arguments arguments for the objective function Returns ------- list A list of tuples giving the event and slot index (for the given events and slots lists) for all scheduled items. Example ------- For a solution where * event 0 is scheduled in slot 1 * event 1 is scheduled in slot 4 * event 2 is scheduled in slot 5 the resulting list would be:: [(0, 1), (1, 4), (2, 5)]
3.477661
4.112639
0.845603
shape = Shape(len(events), len(slots)) problem = pulp.LpProblem() X = lp.utils.variables(shape) beta = pulp.LpVariable("upper_bound") for constraint in lp.constraints.all_constraints( events, slots, X, beta, 'lpsum' ): problem += constraint.condition if objective_function is not None: problem += objective_function(events=events, slots=slots, X=X, beta=beta, **kwargs) status = problem.solve(solver=solver) if status == 1: return [item for item, variable in X.items() if variable.value() > 0] else: raise ValueError('No valid solution found')
def solution(events, slots, objective_function=None, solver=None, **kwargs)
Compute a schedule in solution form Parameters ---------- events : list or tuple of :py:class:`resources.Event` instances slots : list or tuple of :py:class:`resources.Slot` instances solver : pulp.solver a pulp solver objective_function: callable from lp_problem.objective_functions kwargs : keyword arguments arguments for the objective function Returns ------- list A list of tuples giving the event and slot index (for the given events and slots lists) for all scheduled items. Example ------- For a solution where * event 0 is scheduled in slot 1 * event 1 is scheduled in slot 4 * event 2 is scheduled in slot 5 the resulting list would be:: [(0, 1), (1, 4), (2, 5)]
4.780941
4.829525
0.98994
return conv.solution_to_array( solution(events, slots, objective_function, solver=solver, **kwargs), events, slots )
def array(events, slots, objective_function=None, solver=None, **kwargs)
Compute a schedule in array form Parameters ---------- events : list or tuple of :py:class:`resources.Event` instances slots : list or tuple of :py:class:`resources.Slot` instances objective_function : callable from lp_problem.objective_functions Returns ------- np.array An E by S array (X) where E is the number of events and S the number of slots. Xij is 1 if event i is scheduled in slot j and zero otherwise Example ------- For 3 events, 7 slots and a solution where * event 0 is scheduled in slot 1 * event 1 is scheduled in slot 4 * event 2 is scheduled in slot 5 the resulting array would be:: [[0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 0]]
5.512782
9.627386
0.572615
return conv.solution_to_schedule( solution(events, slots, objective_function, solver=solver, **kwargs), events, slots )
def schedule(events, slots, objective_function=None, solver=None, **kwargs)
Compute a schedule in schedule form Parameters ---------- events : list or tuple of :py:class:`resources.Event` instances slots : list or tuple of :py:class:`resources.Slot` instances solver : pulp.solver a pulp solver objective_function : callable from lp_problem.objective_functions kwargs : keyword arguments arguments for the objective function Returns ------- list A list of instances of :py:class:`resources.ScheduledItem`
6.046416
9.491639
0.637025
old = {item.event.name: item for item in old_schedule} new = {item.event.name: item for item in new_schedule} common_events = set(old.keys()).intersection(new.keys()) added_events = new.keys() - old.keys() removed_events = old.keys() - new.keys() changed = [ ChangedEventScheduledItem( old[event].event, old[event].slot, new[event].slot) for event in common_events if old[event].slot != new[event].slot ] added = [ ChangedEventScheduledItem(new[event].event, None, new[event].slot) for event in added_events ] removed = [ ChangedEventScheduledItem(old[event].event, old[event].slot, None) for event in removed_events ] return sorted(changed + added + removed, key=lambda item: item.event.name)
def event_schedule_difference(old_schedule, new_schedule)
Compute the difference between two schedules from an event perspective Parameters ---------- old_schedule : list or tuple of :py:class:`resources.ScheduledItem` objects new_schedule : list or tuple of :py:class:`resources.ScheduledItem` objects Returns ------- list A list of :py:class:`resources.ChangedEventScheduledItem` objects Example ------- >>> from conference_scheduler.resources import Event, Slot, ScheduledItem >>> from conference_scheduler.scheduler import event_schedule_difference >>> events = [Event(f'event_{i}', 30, 0) for i in range(5)] >>> slots = [Slot(f'venue_{i}', '', 30, 100, None) for i in range(5)] >>> old_schedule = ( ... ScheduledItem(events[0], slots[0]), ... ScheduledItem(events[1], slots[1]), ... ScheduledItem(events[2], slots[2])) >>> new_schedule = ( ... ScheduledItem(events[0], slots[0]), ... ScheduledItem(events[1], slots[2]), ... ScheduledItem(events[2], slots[3]), ... ScheduledItem(events[3], slots[4])) >>> diff = (event_schedule_difference(old_schedule, new_schedule)) >>> print([item.event.name for item in diff]) ['event_1', 'event_2', 'event_3']
1.912914
1.895502
1.009186
old = {item.slot: item for item in old_schedule} new = {item.slot: item for item in new_schedule} common_slots = set(old.keys()).intersection(new.keys()) added_slots = new.keys() - old.keys() removed_slots = old.keys() - new.keys() changed = [ ChangedSlotScheduledItem( old[slot].slot, old[slot].event, new[slot].event) for slot in common_slots if old[slot].event != new[slot].event ] added = [ ChangedSlotScheduledItem(new[slot].slot, None, new[slot].event) for slot in added_slots ] removed = [ ChangedSlotScheduledItem(old[slot].slot, old[slot].event, None) for slot in removed_slots ] return sorted( changed + added + removed, key=lambda item: (item.slot.venue, item.slot.starts_at) )
def slot_schedule_difference(old_schedule, new_schedule)
Compute the difference between two schedules from a slot perspective Parameters ---------- old_schedule : list or tuple of :py:class:`resources.ScheduledItem` objects new_schedule : list or tuple of :py:class:`resources.ScheduledItem` objects Returns ------- list A list of :py:class:`resources.ChangedSlotScheduledItem` objects Example ------- >>> from conference_scheduler.resources import Event, Slot, ScheduledItem >>> from conference_scheduler.scheduler import slot_schedule_difference >>> events = [Event(f'event_{i}', 30, 0) for i in range(5)] >>> slots = [Slot(f'venue_{i}', '', 30, 100, None) for i in range(5)] >>> old_schedule = ( ... ScheduledItem(events[0], slots[0]), ... ScheduledItem(events[1], slots[1]), ... ScheduledItem(events[2], slots[2])) >>> new_schedule = ( ... ScheduledItem(events[0], slots[0]), ... ScheduledItem(events[1], slots[2]), ... ScheduledItem(events[2], slots[3]), ... ScheduledItem(events[3], slots[4])) >>> diff = slot_schedule_difference(old_schedule, new_schedule) >>> print([item.slot.venue for item in diff]) ['venue_1', 'venue_2', 'venue_3', 'venue_4']
1.996382
1.929138
1.034857
return ( c.label for c in constraints.all_constraints(events, slots, array, beta=beta) if not c.condition )
def array_violations(array, events, slots, beta=None)
Take a schedule in array form and return any violated constraints Parameters ---------- array : np.array a schedule in array form events : list or tuple of resources.Event instances slots : list or tuple of resources.Slot instances constraints : list or tuple of generator functions which each produce instances of resources.Constraint Returns ------- Generator of a list of strings indicating the nature of the violated constraints
7.963125
10.050913
0.792279
if len(array) == 0: return False violations = sum(1 for c in (array_violations(array, events, slots))) return violations == 0
def is_valid_array(array, events, slots)
Take a schedule in array form and return whether it is a valid solution for the given constraints Parameters ---------- array : np.array a schedule in array form events : list or tuple of resources.Event instances slots : list or tuple of resources.Slot instances Returns ------- bool True if array represents a valid solution
6.246939
7.044209
0.886819
if len(solution) == 0: return False array = converter.solution_to_array(solution, events, slots) return is_valid_array(array, events, slots)
def is_valid_solution(solution, events, slots)
Take a solution and return whether it is valid for the given constraints Parameters ---------- solution: list or tuple a schedule in solution form events : list or tuple of resources.Event instances slots : list or tuple of resources.Slot instances Returns ------- bool True if schedule is a valid solution
3.383041
4.497364
0.752228
array = converter.solution_to_array(solution, events, slots) return array_violations(array, events, slots)
def solution_violations(solution, events, slots)
Take a solution and return a list of violated constraints Parameters ---------- solution: list or tuple a schedule in solution form events : list or tuple of resources.Event instances slots : list or tuple of resources.Slot instances Returns ------- Generator of a list of strings indicating the nature of the violated constraints
5.405768
7.032114
0.768726
if len(schedule) == 0: return False array = converter.schedule_to_array(schedule, events, slots) return is_valid_array(array, events, slots)
def is_valid_schedule(schedule, events, slots)
Take a schedule and return whether it is a valid solution for the given constraints Parameters ---------- schedule : list or tuple a schedule in schedule form events : list or tuple of resources.Event instances slots : list or tuple of resources.Slot instances Returns ------- bool True if schedule is a valid solution
3.392524
4.587361
0.739537
array = converter.schedule_to_array(schedule, events, slots) return array_violations(array, events, slots)
def schedule_violations(schedule, events, slots)
Take a schedule and return a list of violated constraints Parameters ---------- schedule : list or tuple a schedule in schedule form events : list or tuple of resources.Event instances slots : list or tuple of resources.Slot instances Returns ------- Generator of a list of strings indicating the nature of the violated constraints
6.427641
7.443087
0.863572
all_tags = sorted(set(tag for event in events for tag in event.tags)) array = np.zeros((len(events), len(all_tags))) for row, event in enumerate(events): for tag in event.tags: array[row, all_tags.index(tag)] = 1 return array
def tag_array(events)
Return a numpy array mapping events to tags - Rows corresponds to events - Columns correspond to tags
1.951094
1.878297
1.038757
# Flatten the list: this assumes that the sessions do not share slots sessions = sorted(set([slot.session for slot in slots])) array = np.zeros((len(sessions), len(slots))) for col, slot in enumerate(slots): array[sessions.index(slot.session), col] = 1 return array
def session_array(slots)
Return a numpy array mapping sessions to slots - Rows corresponds to sessions - Columns correspond to slots
4.124021
3.919985
1.05205
array = np.ones((len(events), len(slots))) for row, event in enumerate(events): for col, slot in enumerate(slots): if slot in event.unavailability or event.duration > slot.duration: array[row, col] = 0 return array
def slot_availability_array(events, slots)
Return a numpy array mapping events to slots - Rows corresponds to events - Columns correspond to stags Array has value 0 if event cannot be scheduled in a given slot (1 otherwise)
2.635698
2.68719
0.980838
array = np.ones((len(events), len(events))) for row, event in enumerate(events): for col, other_event in enumerate(events): if row != col: tags = set(event.tags) events_share_tag = len(tags.intersection(other_event.tags)) > 0 if (other_event in event.unavailability) or events_share_tag: array[row, col] = 0 array[col, row] = 0 return array
def event_availability_array(events)
Return a numpy array mapping events to events - Rows corresponds to events - Columns correspond to events Array has value 0 if event cannot be scheduled at same time as other event (1 otherwise)
2.661145
2.570601
1.035223
slot_ends_at = ends_at(slot) other_ends_at = ends_at(other_slot) if (slot.starts_at >= other_slot.starts_at and slot_ends_at <= other_ends_at ): return True if (other_slot.starts_at >= slot.starts_at and other_ends_at <= slot_ends_at ): return True return False
def slots_overlap(slot, other_slot)
Return boolean: whether or not two events overlap
2.091727
1.997363
1.047244
for i, slot in enumerate(slots): for j, other_slot in enumerate(slots[i + 1:]): if slots_overlap(slot, other_slot): yield (i, j + i + 1)
def concurrent_slots(slots)
Yields all concurrent slot indices.
2.763602
2.300457
1.201327
event_categories = np.nonzero(tag_array[talk])[0] return np.nonzero(sum(tag_array.transpose()[event_categories]) == 0)[0]
def _events_with_diff_tag(talk, tag_array)
Return the indices of the events with no tag in common as tag
6.730926
5.540267
1.21491
array = np.zeros((len(events), len(slots)), dtype=np.int8) for item in solution: array[item[0], item[1]] = 1 return array
def solution_to_array(solution, events, slots)
Convert a schedule from solution to array form Parameters ---------- solution : list or tuple of tuples of event index and slot index for each scheduled item events : list or tuple of :py:class:`resources.Event` instances slots : list or tuple of :py:class:`resources.Slot` instances Returns ------- np.array An E by S array (X) where E is the number of events and S the number of slots. Xij is 1 if event i is scheduled in slot j and zero otherwise Example ------- For For 3 events, 7 slots and the solution:: [(0, 1), (1, 4), (2, 5)] The resulting array would be:: [[0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 0]]
2.388463
3.027248
0.788988
return [ ScheduledItem( event=events[item[0]], slot=slots[item[1]] ) for item in solution ]
def solution_to_schedule(solution, events, slots)
Convert a schedule from solution to schedule form Parameters ---------- solution : list or tuple of tuples of event index and slot index for each scheduled item events : list or tuple of :py:class:`resources.Event` instances slots : list or tuple of :py:class:`resources.Slot` instances Returns ------- list A list of instances of :py:class:`resources.ScheduledItem`
4.134479
3.836957
1.077541
array = np.zeros((len(events), len(slots)), dtype=np.int8) for item in schedule: array[events.index(item.event), slots.index(item.slot)] = 1 return array
def schedule_to_array(schedule, events, slots)
Convert a schedule from schedule to array form Parameters ---------- schedule : list or tuple of instances of :py:class:`resources.ScheduledItem` events : list or tuple of :py:class:`resources.Event` instances slots : list or tuple of :py:class:`resources.Slot` instances Returns ------- np.array An E by S array (X) where E is the number of events and S the number of slots. Xij is 1 if event i is scheduled in slot j and zero otherwise
2.205855
2.706818
0.814925
scheduled = np.transpose(np.nonzero(array)) return [ ScheduledItem(event=events[item[0]], slot=slots[item[1]]) for item in scheduled ]
def array_to_schedule(array, events, slots)
Convert a schedule from array to schedule form Parameters ---------- array : np.array An E by S array (X) where E is the number of events and S the number of slots. Xij is 1 if event i is scheduled in slot j and zero otherwise events : list or tuple of :py:class:`resources.Event` instances slots : list or tuple of :py:class:`resources.Slot` instances Returns ------- list A list of instances of :py:class:`resources.ScheduledItem`
4.257425
4.606597
0.924202
if 'conference_scheduler.scheduler' in source: module = 'scheduler' else: module = 'resources' rst[module].append(line) self.directive.result.append(self.indent + line, source, *lineno)
def add_line(self, line, source, *lineno)
Append one line of generated reST to the output.
12.10503
10.710945
1.130155
overflow = 0 for row, event in enumerate(events): for col, slot in enumerate(slots): overflow += (event.demand - slot.capacity) * X[row, col] return overflow
def efficiency_capacity_demand_difference(slots, events, X, **kwargs)
A function that calculates the total difference between demand for an event and the slot capacity it is scheduled in.
4.046385
3.545553
1.141256
changes = 0 original_array = schedule_to_array(original_schedule, events=events, slots=slots) for row, event in enumerate(original_array): for col, slot in enumerate(event): if slot == 0: changes += X[row, col] else: changes += 1 - X[row, col] return changes
def number_of_changes(slots, events, original_schedule, X, **kwargs)
A function that counts the number of changes between a given schedule and an array (either numpy array of lp array).
3.092399
2.989463
1.034433
m, n = X.shape new_X = np.copy(X) event_to_move = np.random.randint(m) current_event_slot = np.where(new_X[event_to_move, :] == 1)[0][0] slot_to_move_to = np.random.randint(n - 1) if slot_to_move_to >= current_event_slot: slot_to_move_to += 1 scheduled_events_in_slot = np.where(new_X[:, slot_to_move_to] == 1)[0] if len(scheduled_events_in_slot) == 0: # No event in that slot new_X[event_to_move] = np.zeros(n) new_X[event_to_move, slot_to_move_to] = 1 else: # There is an event in that slot swap_rows = [event_to_move, scheduled_events_in_slot[0]] new_X[swap_rows] = new_X[swap_rows[::-1]] return new_X
def element_from_neighbourhood(X)
Randomly move an event: - Either to an empty slot - Swapping with another event
2.272139
2.037037
1.115414
if seed is not None: np.random.seed(seed) m = len(events) n = len(slots) X = np.zeros((m, n)) for i, row in enumerate(X): X[i, i] = 1 np.random.shuffle(X) return X
def get_initial_array(events, slots, seed=None)
Obtain a random initial array.
2.269839
2.104256
1.07869
''' Append running bitwise FCS CRC checksum to end of generator ''' fcs = FCS() for bit in bits: yield bit fcs.update_bit(bit) # test = bitarray() # for byte in (digest & 0xff, digest >> 8): # print byte # for i in range(8): # b = (byte >> i) & 1 == 1 # test.append(b) # yield b # append fcs digest to bit stream # n.b. wire format is little-bit-endianness in addition to little-endian digest = bitarray(endian="little") digest.frombytes(fcs.digest()) for bit in digest: yield bit
def fcs(bits)
Append running bitwise FCS CRC checksum to end of generator
5.50618
4.356201
1.263987
''' Encode binary data using Bell-202 AFSK Expects a bitarray.bitarray() object of binary data as its argument. Returns a generator of sound samples suitable for use with the audiogen module. ''' framed_data = frame(binary_data) # set volume to 1/2, preceed packet with 1/20 s silence to allow for startup glitches for sample in itertools.chain( audiogen.silence(1.05), multiply(modulate(framed_data), constant(0.5)), audiogen.silence(1.05), ): yield sample
def encode(binary_data)
Encode binary data using Bell-202 AFSK Expects a bitarray.bitarray() object of binary data as its argument. Returns a generator of sound samples suitable for use with the audiogen module.
9.934529
3.952272
2.513625
''' Generate Bell 202 AFSK samples for the given symbol generator Consumes raw wire symbols and produces the corresponding AFSK samples. ''' seconds_per_sample = 1.0 / audiogen.sampler.FRAME_RATE phase, seconds, bits = 0, 0, 0 # construct generators clock = (x / BAUD_RATE for x in itertools.count(1)) tones = (MARK_HZ if bit else SPACE_HZ for bit in data) for boundary, frequency in itertools.izip(clock, tones): # frequency of current symbol is determined by how much # we advance the signal's phase in each audio frame phase_change_per_sample = TWO_PI / (audiogen.sampler.FRAME_RATE / frequency) # produce samples for the current symbol # until we reach the next clock boundary while seconds < boundary: yield math.sin(phase) seconds += seconds_per_sample phase += phase_change_per_sample if phase > TWO_PI: phase -= TWO_PI bits += 1 logger.debug("bits = %d, time = %.7f ms, expected time = %.7f ms, error = %.7f ms, baud rate = %.6f Hz" \ % (bits, 1000 * seconds, 1000 * bits / BAUD_RATE, 1000 * (seconds - bits / BAUD_RATE), bits / seconds))
def modulate(data)
Generate Bell 202 AFSK samples for the given symbol generator Consumes raw wire symbols and produces the corresponding AFSK samples.
6.259468
4.596662
1.361742
''' Packet uses NRZI (non-return to zero inverted) encoding, which means that a 0 is encoded as a change in tone, and a 1 is encoded as no change in tone. ''' current = True for bit in data: if not bit: current = not current yield current
def nrzi(data)
Packet uses NRZI (non-return to zero inverted) encoding, which means that a 0 is encoded as a change in tone, and a 1 is encoded as no change in tone.
7.735945
2.495723
3.099681
response = self.session.get(url) if response.status_code == requests.codes.ok: return response.json() else: raise HTTPError
def _get_sync(self, url)
Internal method used for GET requests Args: url (str): URL to fetch Returns: Individual URL request's response Raises: HTTPError: If HTTP request failed.
2.565326
2.791356
0.919025
data = None async with session.get(url) as resp: if resp.status == 200: data = await resp.json() return data
async def _get_async(self, url, session)
Asynchronous internal method used for GET requests Args: url (str): URL to fetch session (obj): aiohttp client session for async loop Returns: data (obj): Individual URL request's response corountine
2.45381
3.117165
0.787193
results = [] async with aiohttp.ClientSession( connector=aiohttp.TCPConnector(ssl=False) ) as session: for url in urls: result = asyncio.ensure_future(self._get_async(url, session)) results.append(result) responses = await asyncio.gather(*results) return responses
async def _async_loop(self, urls)
Asynchronous internal method used to request multiple URLs Args: urls (list): URLs to fetch Returns: responses (obj): All URL requests' response coroutines
2.312815
2.331769
0.991871
loop = asyncio.get_event_loop() results = loop.run_until_complete(self._async_loop(urls)) return results
def _run_async(self, urls)
Asynchronous event loop execution Args: urls (list): URLs to fetch Returns: results (obj): All URL requests' responses
2.678299
3.413714
0.78457
url = urljoin(self.base_url, F"{page}.json") story_ids = self._get_sync(url)[:limit] return self.get_items_by_ids(item_ids=story_ids)
def _get_stories(self, page, limit)
Hacker News has different categories (i.e. stories) like 'topstories', 'newstories', 'askstories', 'showstories', 'jobstories'. This method, first fetches the relevant story ids of that category The URL is: https://hacker-news.firebaseio.com/v0/<story_name>.json e.g. https://hacker-news.firebaseio.com/v0/topstories.json Then, asynchronously it fetches each story and returns the Item objects The URL for individual story is: https://hacker-news.firebaseio.com/v0/item/<item_id>.json e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json
5.915907
5.414444
1.092616
url = urljoin(self.item_url, F"{item_id}.json") response = self._get_sync(url) if not response: raise InvalidItemID item = Item(response) if expand: item.by = self.get_user(item.by) item.kids = self.get_items_by_ids(item.kids) if item.kids else None item.parent = self.get_item(item.parent) if item.parent else None item.poll = self.get_item(item.poll) if item.poll else None item.parts = ( self.get_items_by_ids(item.parts) if item.parts else None ) return item
def get_item(self, item_id, expand=False)
Returns Hacker News `Item` object. Fetches the data from url: https://hacker-news.firebaseio.com/v0/item/<item_id>.json e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json Args: item_id (int or string): Unique item id of Hacker News story, comment etc. expand (bool): expand (bool): Flag to indicate whether to transform all IDs into objects. Returns: `Item` object representing Hacker News item. Raises: InvalidItemID: If corresponding Hacker News story does not exist.
2.680142
2.349036
1.140954