Search is not available for this dataset
text
stringlengths
75
104k
def media(self): """Media defined as a dynamic property instead of an inner class.""" media = super(JqueryMediaMixin, self).media js = [] if JQUERY_URL: js.append(JQUERY_URL) elif JQUERY_URL is not False: vendor = '' if django.VERSION < (1, 9, 0) else 'vendor/jquery/' extra = '' if settings.DEBUG else '.min' jquery_paths = [ '{}jquery{}.js'.format(vendor, extra), 'jquery.init.js', ] if USE_DJANGO_JQUERY: jquery_paths = ['admin/js/{}'.format(path) for path in jquery_paths] js.extend(jquery_paths) media += Media(js=js) return media
def media(self): """Media defined as a dynamic property instead of an inner class.""" media = super(ChainedSelect, self).media js = ['smart-selects/admin/js/chainedfk.js', 'smart-selects/admin/js/bindfields.js'] media += Media(js=js) return media
def _get_available_choices(self, queryset, value): """ get possible choices for selection """ item = queryset.filter(pk=value).first() if item: try: pk = getattr(item, self.chained_model_field + "_id") filter = {self.chained_model_field: pk} except AttributeError: try: # maybe m2m? pks = getattr(item, self.chained_model_field).all().values_list('pk', flat=True) filter = {self.chained_model_field + "__in": pks} except AttributeError: try: # maybe a set? pks = getattr(item, self.chained_model_field + "_set").all().values_list('pk', flat=True) filter = {self.chained_model_field + "__in": pks} except AttributeError: # give up filter = {} filtered = list(get_model(self.to_app_name, self.to_model_name).objects.filter(**filter).distinct()) if self.sort: sort_results(filtered) else: # invalid value for queryset filtered = [] return filtered
def media(self): """Media defined as a dynamic property instead of an inner class.""" media = super(ChainedSelectMultiple, self).media js = ['smart-selects/admin/js/chainedm2m.js', 'smart-selects/admin/js/bindfields.js'] if self.horizontal: # For horizontal mode add django filter horizontal javascript code js.extend(["admin/js/core.js", "admin/js/SelectBox.js", "admin/js/SelectFilter2.js"]) media += Media(js=js) return media
def unicode_sorter(input): """ This function implements sort keys for the german language according to DIN 5007.""" # key1: compare words lowercase and replace umlauts according to DIN 5007 key1 = input.lower() key1 = key1.replace(u"ä", u"a") key1 = key1.replace(u"ö", u"o") key1 = key1.replace(u"ü", u"u") key1 = key1.replace(u"ß", u"ss") # key2: sort the lowercase word before the uppercase word and sort # the word with umlaut after the word without umlaut # key2=input.swapcase() # in case two words are the same according to key1, sort the words # according to key2. return key1
def get_raw_record(self, instance, update_fields=None): """ Gets the raw record. If `update_fields` is set, the raw record will be build with only the objectID and the given fields. Also, `_geoloc` and `_tags` will not be included. """ tmp = {'objectID': self.objectID(instance)} if update_fields: if isinstance(update_fields, str): update_fields = (update_fields,) for elt in update_fields: key = self.__translate_fields.get(elt, None) if key: tmp[key] = self.__named_fields[key](instance) else: for key, value in self.__named_fields.items(): tmp[key] = value(instance) if self.geo_field: loc = self.geo_field(instance) if isinstance(loc, tuple): tmp['_geoloc'] = {'lat': loc[0], 'lng': loc[1]} elif isinstance(loc, dict): self._validate_geolocation(loc) tmp['_geoloc'] = loc elif isinstance(loc, list): [self._validate_geolocation(geo) for geo in loc] tmp['_geoloc'] = loc if self.tags: if callable(self.tags): tmp['_tags'] = self.tags(instance) if not isinstance(tmp['_tags'], list): tmp['_tags'] = list(tmp['_tags']) logger.debug('BUILD %s FROM %s', tmp['objectID'], self.model) return tmp
def _should_really_index(self, instance): """Return True if according to should_index the object should be indexed.""" if self._should_index_is_method: is_method = inspect.ismethod(self.should_index) try: count_args = len(inspect.signature(self.should_index).parameters) except AttributeError: # noinspection PyDeprecation count_args = len(inspect.getargspec(self.should_index).args) if is_method or count_args is 1: # bound method, call with instance return self.should_index(instance) else: # unbound method, simply call without arguments return self.should_index() else: # property/attribute/Field, evaluate as bool attr_type = type(self.should_index) if attr_type is DeferredAttribute: attr_value = self.should_index.__get__(instance, None) elif attr_type is str: attr_value = getattr(instance, self.should_index) elif attr_type is property: attr_value = self.should_index.__get__(instance) else: raise AlgoliaIndexError('{} should be a boolean attribute or a method that returns a boolean.'.format( self.should_index)) if type(attr_value) is not bool: raise AlgoliaIndexError("%s's should_index (%s) should be a boolean" % ( instance.__class__.__name__, self.should_index)) return attr_value
def save_record(self, instance, update_fields=None, **kwargs): """Saves the record. If `update_fields` is set, this method will use partial_update_object() and will update only the given fields (never `_geoloc` and `_tags`). For more information about partial_update_object: https://github.com/algolia/algoliasearch-client-python#update-an-existing-object-in-the-index """ if not self._should_index(instance): # Should not index, but since we don't now the state of the # instance, we need to send a DELETE request to ensure that if # the instance was previously indexed, it will be removed. self.delete_record(instance) return try: if update_fields: obj = self.get_raw_record(instance, update_fields=update_fields) result = self.__index.partial_update_object(obj) else: obj = self.get_raw_record(instance) result = self.__index.save_object(obj) logger.info('SAVE %s FROM %s', obj['objectID'], self.model) return result except AlgoliaException as e: if DEBUG: raise e else: logger.warning('%s FROM %s NOT SAVED: %s', obj['objectID'], self.model, e)
def delete_record(self, instance): """Deletes the record.""" objectID = self.objectID(instance) try: self.__index.delete_object(objectID) logger.info('DELETE %s FROM %s', objectID, self.model) except AlgoliaException as e: if DEBUG: raise e else: logger.warning('%s FROM %s NOT DELETED: %s', objectID, self.model, e)
def update_records(self, qs, batch_size=1000, **kwargs): """ Updates multiple records. This method is optimized for speed. It takes a QuerySet and the same arguments as QuerySet.update(). Optionnaly, you can specify the size of the batch send to Algolia with batch_size (default to 1000). >>> from algoliasearch_django import update_records >>> qs = MyModel.objects.filter(myField=False) >>> update_records(MyModel, qs, myField=True) >>> qs.update(myField=True) """ tmp = {} for key, value in kwargs.items(): name = self.__translate_fields.get(key, None) if name: tmp[name] = value batch = [] objectsIDs = qs.only(self.custom_objectID).values_list( self.custom_objectID, flat=True) for elt in objectsIDs: tmp['objectID'] = elt batch.append(dict(tmp)) if len(batch) >= batch_size: self.__index.partial_update_objects(batch) batch = [] if len(batch) > 0: self.__index.partial_update_objects(batch)
def raw_search(self, query='', params=None): """Performs a search query and returns the parsed JSON.""" if params is None: params = {} try: return self.__index.search(query, params) except AlgoliaException as e: if DEBUG: raise e else: logger.warning('ERROR DURING SEARCH ON %s: %s', self.index_name, e)
def get_settings(self): """Returns the settings of the index.""" try: logger.info('GET SETTINGS ON %s', self.index_name) return self.__index.get_settings() except AlgoliaException as e: if DEBUG: raise e else: logger.warning('ERROR DURING GET_SETTINGS ON %s: %s', self.model, e)
def set_settings(self): """Applies the settings to the index.""" if not self.settings: return try: self.__index.set_settings(self.settings) logger.info('APPLY SETTINGS ON %s', self.index_name) except AlgoliaException as e: if DEBUG: raise e else: logger.warning('SETTINGS NOT APPLIED ON %s: %s', self.model, e)
def reindex_all(self, batch_size=1000): """ Reindex all the records. By default, this method use Model.objects.all() but you can implement a method `get_queryset` in your subclass. This can be used to optimize the performance (for example with select_related or prefetch_related). """ should_keep_synonyms = False should_keep_rules = False try: if not self.settings: self.settings = self.get_settings() logger.debug('Got settings for index %s: %s', self.index_name, self.settings) else: logger.debug("index %s already has settings: %s", self.index_name, self.settings) except AlgoliaException as e: if any("Index does not exist" in arg for arg in e.args): pass # Expected, let's clear and recreate from scratch else: raise e # Unexpected error while getting settings try: if self.settings: replicas = self.settings.get('replicas', None) slaves = self.settings.get('slaves', None) should_keep_replicas = replicas is not None should_keep_slaves = slaves is not None if should_keep_replicas: self.settings['replicas'] = [] logger.debug("REMOVE REPLICAS FROM SETTINGS") if should_keep_slaves: self.settings['slaves'] = [] logger.debug("REMOVE SLAVES FROM SETTINGS") self.__tmp_index.wait_task(self.__tmp_index.set_settings(self.settings)['taskID']) logger.debug('APPLY SETTINGS ON %s_tmp', self.index_name) rules = [] synonyms = [] for r in self.__index.iter_rules(): rules.append(r) for s in self.__index.iter_synonyms(): synonyms.append(s) if len(rules): logger.debug('Got rules for index %s: %s', self.index_name, rules) should_keep_rules = True if len(synonyms): logger.debug('Got synonyms for index %s: %s', self.index_name, rules) should_keep_synonyms = True self.__tmp_index.clear_index() logger.debug('CLEAR INDEX %s_tmp', self.index_name) counts = 0 batch = [] if hasattr(self, 'get_queryset'): qs = self.get_queryset() else: qs = self.model.objects.all() for instance in qs: if not self._should_index(instance): continue # should not index batch.append(self.get_raw_record(instance)) if len(batch) >= batch_size: self.__tmp_index.save_objects(batch) logger.info('SAVE %d OBJECTS TO %s_tmp', len(batch), self.index_name) batch = [] counts += 1 if len(batch) > 0: self.__tmp_index.save_objects(batch) logger.info('SAVE %d OBJECTS TO %s_tmp', len(batch), self.index_name) self.__client.move_index(self.__tmp_index.index_name, self.__index.index_name) logger.info('MOVE INDEX %s_tmp TO %s', self.index_name, self.index_name) if self.settings: if should_keep_replicas: self.settings['replicas'] = replicas logger.debug("RESTORE REPLICAS") if should_keep_slaves: self.settings['slaves'] = slaves logger.debug("RESTORE SLAVES") if should_keep_replicas or should_keep_slaves: self.__index.set_settings(self.settings) if should_keep_rules: response = self.__index.batch_rules(rules, forward_to_replicas=True) self.__index.wait_task(response['taskID']) logger.info("Saved rules for index %s with response: {}".format(response), self.index_name) if should_keep_synonyms: response = self.__index.batch_synonyms(synonyms, forward_to_replicas=True) self.__index.wait_task(response['taskID']) logger.info("Saved synonyms for index %s with response: {}".format(response), self.index_name) return counts except AlgoliaException as e: if DEBUG: raise e else: logger.warning('ERROR DURING REINDEXING %s: %s', self.model, e)
def register(model): """ Register the given model class and wrapped AlgoliaIndex class with the Algolia engine: @register(Author) class AuthorIndex(AlgoliaIndex): pass """ from algoliasearch_django import AlgoliaIndex, register def _algolia_engine_wrapper(index_class): if not issubclass(index_class, AlgoliaIndex): raise ValueError('Wrapped class must subclass AlgoliaIndex.') register(model, index_class) return index_class return _algolia_engine_wrapper
def handle(self, *args, **options): """Run the management command.""" self.stdout.write('Apply settings to index:') for model in get_registered_model(): if options.get('model', None) and not (model.__name__ in options['model']): continue get_adapter(model).set_settings() self.stdout.write('\t* {}'.format(model.__name__))
def register(self, model, index_cls=AlgoliaIndex, auto_indexing=None): """ Registers the given model with Algolia engine. If the given model is already registered with Algolia engine, a RegistrationError will be raised. """ # Check for existing registration. if self.is_registered(model): raise RegistrationError( '{} is already registered with Algolia engine'.format(model)) # Perform the registration. if not issubclass(index_cls, AlgoliaIndex): raise RegistrationError( '{} should be a subclass of AlgoliaIndex'.format(index_cls)) index_obj = index_cls(model, self.client, self.__settings) self.__registered_models[model] = index_obj if (isinstance(auto_indexing, bool) and auto_indexing) or self.__auto_indexing: # Connect to the signalling framework. post_save.connect(self.__post_save_receiver, model) pre_delete.connect(self.__pre_delete_receiver, model) logger.info('REGISTER %s', model)
def unregister(self, model): """ Unregisters the given model with Algolia engine. If the given model is not registered with Algolia engine, a RegistrationError will be raised. """ if not self.is_registered(model): raise RegistrationError( '{} is not registered with Algolia engine'.format(model)) # Perform the unregistration. del self.__registered_models[model] # Disconnect from the signalling framework. post_save.disconnect(self.__post_save_receiver, model) pre_delete.disconnect(self.__pre_delete_receiver, model) logger.info('UNREGISTER %s', model)
def get_adapter(self, model): """Returns the adapter associated with the given model.""" if not self.is_registered(model): raise RegistrationError( '{} is not registered with Algolia engine'.format(model)) return self.__registered_models[model]
def save_record(self, instance, **kwargs): """Saves the record. If `update_fields` is set, this method will use partial_update_object() and will update only the given fields (never `_geoloc` and `_tags`). For more information about partial_update_object: https://github.com/algolia/algoliasearch-client-python#update-an-existing-object-in-the-index """ adapter = self.get_adapter_from_instance(instance) adapter.save_record(instance, **kwargs)
def delete_record(self, instance): """Deletes the record.""" adapter = self.get_adapter_from_instance(instance) adapter.delete_record(instance)
def update_records(self, model, qs, batch_size=1000, **kwargs): """ Updates multiple records. This method is optimized for speed. It takes a QuerySet and the same arguments as QuerySet.update(). Optionally, you can specify the size of the batch send to Algolia with batch_size (default to 1000). >>> from algoliasearch_django import update_records >>> qs = MyModel.objects.filter(myField=False) >>> update_records(MyModel, qs, myField=True) >>> qs.update(myField=True) """ adapter = self.get_adapter(model) adapter.update_records(qs, batch_size=batch_size, **kwargs)
def raw_search(self, model, query='', params=None): """Performs a search query and returns the parsed JSON.""" if params is None: params = {} adapter = self.get_adapter(model) return adapter.raw_search(query, params)
def reindex_all(self, model, batch_size=1000): """ Reindex all the records. By default, this method use Model.objects.all() but you can implement a method `get_queryset` in your subclass. This can be used to optimize the performance (for example with select_related or prefetch_related). """ adapter = self.get_adapter(model) return adapter.reindex_all(batch_size)
def __post_save_receiver(self, instance, **kwargs): """Signal handler for when a registered model has been saved.""" logger.debug('RECEIVE post_save FOR %s', instance.__class__) self.save_record(instance, **kwargs)
def __pre_delete_receiver(self, instance, **kwargs): """Signal handler for when a registered model has been deleted.""" logger.debug('RECEIVE pre_delete FOR %s', instance.__class__) self.delete_record(instance)
def handle(self, *args, **options): """Run the management command.""" batch_size = options.get('batchsize', None) if not batch_size: # py34-django18: batchsize is set to None if the user don't set # the value, instead of not be present in the dict batch_size = 1000 self.stdout.write('The following models were reindexed:') for model in get_registered_model(): if options.get('model', None) and not (model.__name__ in options['model']): continue counts = reindex_all(model, batch_size=batch_size) self.stdout.write('\t* {} --> {}'.format(model.__name__, counts))
def handle(self, *args, **options): """Run the management command.""" self.stdout.write('Clear index:') for model in get_registered_model(): if options.get('model', None) and not (model.__name__ in options['model']): continue clear_index(model) self.stdout.write('\t* {}'.format(model.__name__))
def decode_exactly(geohash): """ Decode the geohash to its exact values, including the error margins of the result. Returns four float values: latitude, longitude, the plus/minus error for latitude (as a positive number) and the plus/minus error for longitude (as a positive number). """ lat_interval, lon_interval = (-90.0, 90.0), (-180.0, 180.0) lat_err, lon_err = 90.0, 180.0 is_even = True for c in geohash: cd = __decodemap[c] for mask in [16, 8, 4, 2, 1]: if is_even: # adds longitude info lon_err /= 2 if cd & mask: lon_interval = ((lon_interval[0]+lon_interval[1])/2, lon_interval[1]) else: lon_interval = (lon_interval[0], (lon_interval[0]+lon_interval[1])/2) else: # adds latitude info lat_err /= 2 if cd & mask: lat_interval = ((lat_interval[0]+lat_interval[1])/2, lat_interval[1]) else: lat_interval = (lat_interval[0], (lat_interval[0]+lat_interval[1])/2) is_even = not is_even lat = (lat_interval[0] + lat_interval[1]) / 2 lon = (lon_interval[0] + lon_interval[1]) / 2 return lat, lon, lat_err, lon_err
def decode(geohash): """ Decode geohash, returning two strings with latitude and longitude containing only relevant digits and with trailing zeroes removed. """ lat, lon, lat_err, lon_err = decode_exactly(geohash) # Format to the number of decimals that are known lats = "%.*f" % (max(1, int(round(-log10(lat_err)))) - 1, lat) lons = "%.*f" % (max(1, int(round(-log10(lon_err)))) - 1, lon) if '.' in lats: lats = lats.rstrip('0') if '.' in lons: lons = lons.rstrip('0') return lats, lons
def encode(latitude, longitude, precision=12): """ Encode a position given in float arguments latitude, longitude to a geohash which will have the character count precision. """ lat_interval, lon_interval = (-90.0, 90.0), (-180.0, 180.0) geohash = [] bits = [ 16, 8, 4, 2, 1 ] bit = 0 ch = 0 even = True while len(geohash) < precision: if even: mid = (lon_interval[0] + lon_interval[1]) / 2 if longitude > mid: ch |= bits[bit] lon_interval = (mid, lon_interval[1]) else: lon_interval = (lon_interval[0], mid) else: mid = (lat_interval[0] + lat_interval[1]) / 2 if latitude > mid: ch |= bits[bit] lat_interval = (mid, lat_interval[1]) else: lat_interval = (lat_interval[0], mid) even = not even if bit < 4: bit += 1 else: geohash += __base32[ch] bit = 0 ch = 0 return ''.join(geohash)
def pad_to(unpadded, target_len): """ Pad a string to the target length in characters, or return the original string if it's longer than the target length. """ under = target_len - len(unpadded) if under <= 0: return unpadded return unpadded + (' ' * under)
def normalize_cols(table): """ Pad short rows to the length of the longest row to help render "jagged" CSV files """ longest_row_len = max([len(row) for row in table]) for row in table: while len(row) < longest_row_len: row.append('') return table
def pad_cells(table): """Pad each cell to the size of the largest cell in its column.""" col_sizes = [max(map(len, col)) for col in zip(*table)] for row in table: for cell_num, cell in enumerate(row): row[cell_num] = pad_to(cell, col_sizes[cell_num]) return table
def horiz_div(col_widths, horiz, vert, padding): """ Create the column dividers for a table with given column widths. col_widths: list of column widths horiz: the character to use for a horizontal divider vert: the character to use for a vertical divider padding: amount of padding to add to each side of a column """ horizs = [horiz * w for w in col_widths] div = ''.join([padding * horiz, vert, padding * horiz]) return div.join(horizs)
def add_dividers(row, divider, padding): """Add dividers and padding to a row of cells and return a string.""" div = ''.join([padding * ' ', divider, padding * ' ']) return div.join(row)
def md_table(table, *, padding=DEFAULT_PADDING, divider='|', header_div='-'): """ Convert a 2D array of items into a Markdown table. padding: the number of padding spaces on either side of each divider divider: the vertical divider to place between columns header_div: the horizontal divider to place between the header row and body cells """ table = normalize_cols(table) table = pad_cells(table) header = table[0] body = table[1:] col_widths = [len(cell) for cell in header] horiz = horiz_div(col_widths, header_div, divider, padding) header = add_dividers(header, divider, padding) body = [add_dividers(row, divider, padding) for row in body] table = [header, horiz] table.extend(body) table = [row.rstrip() for row in table] return '\n'.join(table)
def baseId(resource_id, return_version=False): """Calculate base id and version from a resource id. :params resource_id: Resource id. :params return_version: (optional) True if You need version, returns (resource_id, version). """ version = 0 resource_id = resource_id + 0xC4000000 # 3288334336 # TODO: version is broken due ^^, needs refactoring while resource_id > 0x01000000: # 16777216 version += 1 if version == 1: resource_id -= 0x80000000 # 2147483648 # 0x50000000 # 1342177280 ? || 0x2000000 # 33554432 elif version == 2: resource_id -= 0x03000000 # 50331648 else: resource_id -= 0x01000000 # 16777216 if return_version: return resource_id, version - 67 # just correct "magic number" return resource_id
def itemParse(item_data, full=True): """Parser for item data. Returns nice dictionary. :params item_data: Item data received from ea servers. :params full: (optional) False if you're sniping and don't need extended info. Anyone really use this? """ # TODO: object # TODO: dynamically parse all data # TODO: make it less ugly # ItemRareType={NONE:0,RARE:1,LOCK:2,TOTW:3,PURPLE:4,TOTY:5,RB:6,GREEN:7,ORANGE:8,PINK:9,TEAL:10,TOTS:11,LEGEND:12,WC:13,UNICEF:14,OLDIMOTM:15,FUTTY:16,STORYMODE:17,CHAMPION:18,CMOTM:19,IMOTM:20,OTW:21,HALLOWEEN:22,MOVEMBER:23,SBC:24,SBCP:25,PROMOA:26,PROMOB:27,AWARD:28,BDAY:30,UNITED:31,FUTMAS:32,RTRC:33,PTGS:34,FOF:35,MARQUEE:36,CHAMPIONSHIP:37,EUMOTM:38,TOTT:39,RRC:40,RRR:41} return_data = { 'tradeId': item_data.get('tradeId'), 'buyNowPrice': item_data.get('buyNowPrice'), 'tradeState': item_data.get('tradeState'), 'bidState': item_data.get('bidState'), 'startingBid': item_data.get('startingBid'), 'id': item_data.get('itemData', {'id': None})['id'] or item_data.get('item', {'id': None})['id'], 'offers': item_data.get('offers'), 'currentBid': item_data.get('currentBid'), 'expires': item_data.get('expires'), # seconds left 'sellerEstablished': item_data.get('sellerEstablished'), 'sellerId': item_data.get('sellerId'), 'sellerName': item_data.get('sellerName'), 'watched': item_data.get('watched'), 'resourceId': item_data.get('resourceId'), # consumables only? 'discardValue': item_data.get('discardValue'), # consumables only? } if full: if 'itemData' in item_data: return_data.update({ 'timestamp': item_data['itemData'].get('timestamp'), # auction start 'rating': item_data['itemData'].get('rating'), 'assetId': item_data['itemData'].get('assetId'), 'resourceId': item_data['itemData'].get('resourceId'), 'itemState': item_data['itemData'].get('itemState'), 'rareflag': item_data['itemData'].get('rareflag'), 'formation': item_data['itemData'].get('formation'), 'leagueId': item_data['itemData'].get('leagueId'), 'injuryType': item_data['itemData'].get('injuryType'), 'injuryGames': item_data['itemData'].get('injuryGames'), 'lastSalePrice': item_data['itemData'].get('lastSalePrice'), 'fitness': item_data['itemData'].get('fitness'), 'training': item_data['itemData'].get('training'), 'suspension': item_data['itemData'].get('suspension'), 'contract': item_data['itemData'].get('contract'), 'position': item_data['itemData'].get('preferredPosition'), 'playStyle': item_data['itemData'].get('playStyle'), # used only for players 'discardValue': item_data['itemData'].get('discardValue'), 'itemType': item_data['itemData'].get('itemType'), 'cardType': item_data['itemData'].get('cardsubtypeid'), # alias 'cardsubtypeid': item_data['itemData'].get('cardsubtypeid'), # used only for cards 'owners': item_data['itemData'].get('owners'), 'untradeable': item_data['itemData'].get('untradeable'), 'morale': item_data['itemData'].get('morale'), 'statsList': item_data['itemData'].get('statsList'), # what is this? 'lifetimeStats': item_data['itemData'].get('lifetimeStats'), 'attributeList': item_data['itemData'].get('attributeList'), 'teamid': item_data['itemData'].get('teamid'), 'assists': item_data['itemData'].get('assists'), 'lifetimeAssists': item_data['itemData'].get('lifetimeAssists'), 'loyaltyBonus': item_data['itemData'].get('loyaltyBonus'), 'pile': item_data['itemData'].get('pile'), 'nation': item_data['itemData'].get('nation'), # nation_id? 'year': item_data['itemData'].get('resourceGameYear'), # alias 'resourceGameYear': item_data['itemData'].get('resourceGameYear'), 'marketDataMinPrice': item_data['itemData'].get('marketDataMinPrice'), 'marketDataMaxPrice': item_data['itemData'].get('marketDataMaxPrice'), 'loans': item_data.get('loans'), }) elif 'item' in item_data: # consumables only (?) return_data.update({ 'cardassetid': item_data['item'].get('cardassetid'), 'weightrare': item_data['item'].get('weightrare'), 'gold': item_data['item'].get('gold'), 'silver': item_data['item'].get('silver'), 'bronze': item_data['item'].get('bronze'), 'consumablesContractPlayer': item_data['item'].get('consumablesContractPlayer'), 'consumablesContractManager': item_data['item'].get('consumablesContractManager'), 'consumablesFormationPlayer': item_data['item'].get('consumablesFormationPlayer'), 'consumablesFormationManager': item_data['item'].get('consumablesFormationManager'), 'consumablesPosition': item_data['item'].get('consumablesPosition'), 'consumablesTraining': item_data['item'].get('consumablesTraining'), 'consumablesTrainingPlayer': item_data['item'].get('consumablesTrainingPlayer'), 'consumablesTrainingManager': item_data['item'].get('consumablesTrainingManager'), 'consumablesTrainingGk': item_data['item'].get('consumablesTrainingGk'), 'consumablesTrainingPlayerPlayStyle': item_data['item'].get('consumablesTrainingPlayerPlayStyle'), 'consumablesTrainingGkPlayStyle': item_data['item'].get('consumablesTrainingGkPlayStyle'), 'consumablesTrainingManagerLeagueModifier': item_data['item'].get( 'consumablesTrainingManagerLeagueModifier'), 'consumablesHealing': item_data['item'].get('consumablesHealing'), 'consumablesTeamTalksPlayer': item_data['item'].get('consumablesTeamTalksPlayer'), 'consumablesTeamTalksTeam': item_data['item'].get('consumablesTeamTalksTeam'), 'consumablesFitnessPlayer': item_data['item'].get('consumablesFitnessPlayer'), 'consumablesFitnessTeam': item_data['item'].get('consumablesFitnessTeam'), 'consumables': item_data['item'].get('consumables'), 'count': item_data.get('count'), # consumables only (?) 'untradeableCount': item_data.get('untradeableCount'), # consumables only (?) }) return return_data
def nations(timeout=timeout): """Return all nations in dict {id0: nation0, id1: nation1}. :params year: Year. """ rc = requests.get(messages_url, timeout=timeout) rc.encoding = 'utf-8' # guessing takes huge amount of cpu time rc = rc.text data = re.findall('"search.nationName.nation([0-9]+)": "(.+)"', rc) nations = {} for i in data: nations[int(i[0])] = i[1] return nations
def players(timeout=timeout): """Return all players in dict {id: c, f, l, n, r}. id, rank, nationality(?), first name, last name. """ rc = requests.get('{0}{1}.json'.format(card_info_url, 'players'), timeout=timeout).json() players = {} for i in rc['Players'] + rc['LegendsPlayers']: players[i['id']] = {'id': i['id'], 'firstname': i['f'], 'lastname': i['l'], 'surname': i.get('c'), 'rating': i['r']} return players
def playstyles(year=2019, timeout=timeout): """Return all playstyles in dict {id0: playstyle0, id1: playstyle1}. :params year: Year. """ rc = requests.get(messages_url, timeout=timeout) rc.encoding = 'utf-8' # guessing takes huge amount of cpu time rc = rc.text data = re.findall('"playstyles.%s.playstyle([0-9]+)": "(.+)"' % year, rc) playstyles = {} for i in data: playstyles[int(i[0])] = i[1] return playstyles
def logout(self, save=True): """Log out nicely (like clicking on logout button). :params save: False if You don't want to save cookies. """ # self.r.get('https://www.easports.com/signout', params={'ct': self._}) # self.r.get('https://accounts.ea.com/connect/clearsid', params={'ct': self._}) # self.r.get('https://beta.www.origin.com/views/logout.html', params={'ct': self._}) # self.r.get('https://help.ea.com/community/logout/', params={'ct': self._}) self.r.delete('https://%s/ut/auth' % self.fut_host, timeout=self.timeout) if save: self.saveSession() # needed? https://accounts.ea.com/connect/logout?client_id=FIFA-18-WEBCLIENT&redirect_uri=https://www.easports.com/fifa/ultimate-team/web-app/auth.html return True
def playstyles(self, year=2019): """Return all playstyles in dict {id0: playstyle0, id1: playstyle1}. :params year: Year. """ if not self._playstyles: self._playstyles = playstyles() return self._playstyles
def leagues(self, year=2019): """Return all leagues in dict {id0: league0, id1: league1}. :params year: Year. """ if year not in self._leagues: self._leagues[year] = leagues(year) return self._leagues[year]
def teams(self, year=2019): """Return all teams in dict {id0: team0, id1: team1}. :params year: Year. """ if year not in self._teams: self._teams[year] = teams(year) return self._teams[year]
def saveSession(self): """Save cookies/session.""" if self.cookies_file: self.r.cookies.save(ignore_discard=True) with open(self.token_file, 'w') as f: f.write('%s %s' % (self.token_type, self.access_token))
def cardInfo(self, resource_id): """Return card info. :params resource_id: Resource id. """ # TODO: add referer to headers (futweb) base_id = baseId(resource_id) if base_id in self.players: return self.players[base_id] else: # not a player? url = '{0}{1}.json'.format(card_info_url, base_id) return requests.get(url, timeout=self.timeout).json()
def searchDefinition(self, asset_id, start=0, page_size=itemsPerPage['transferMarket'], count=None): """Return variations of the given asset id, e.g. IF cards. :param asset_id: Asset id / Definition id. :param start: (optional) Start page. :param count: (optional) Number of definitions you want to request. """ method = 'GET' url = 'defid' if count: # backward compatibility, will be removed in future page_size = count base_id = baseId(asset_id) if base_id not in self.players: raise FutError(reason='Invalid player asset/definition id.') params = { 'defId': base_id, 'start': start, 'type': 'player', 'count': page_size } rc = self.__request__(method, url, params=params) # try: # return [itemParse({'itemData': i}) for i in rc['itemData']] # except: # raise UnknownError('Invalid definition response') return [itemParse({'itemData': i}) for i in rc['itemData']]
def search(self, ctype, level=None, category=None, assetId=None, defId=None, min_price=None, max_price=None, min_buy=None, max_buy=None, league=None, club=None, position=None, zone=None, nationality=None, rare=False, playStyle=None, start=0, page_size=itemsPerPage['transferMarket'], fast=False): """Prepare search request, send and return parsed data as a dict. :param ctype: [development / ? / ?] Card type. :param level: (optional) [?/?/gold] Card level. :param category: (optional) [fitness/?/?] Card category. :param assetId: (optional) Asset id. :param defId: (optional) Definition id. :param min_price: (optional) Minimal price. :param max_price: (optional) Maximum price. :param min_buy: (optional) Minimal buy now price. :param max_buy: (optional) Maximum buy now price. :param league: (optional) League id. :param club: (optional) Club id. :param position: (optional) Position. :param nationality: (optional) Nation id. :param rare: (optional) [boolean] True for searching special cards. :param playStyle: (optional) Play style. :param start: (optional) Start page sent to server so it supposed to be 12/15, 24/30 etc. (default platform page_size*n) :param page_size: (optional) Page size (items per page). """ # TODO: add "search" alias # TODO: generator method = 'GET' url = 'transfermarket' # pinEvents if start == 0: events = [self.pin.event('page_view', 'Hub - Transfers'), self.pin.event('page_view', 'Transfer Market Search')] self.pin.send(events, fast=fast) params = { 'start': start, 'num': page_size, 'type': ctype, # "type" namespace is reserved in python } if level: params['lev'] = level if category: params['cat'] = category if assetId: params['maskedDefId'] = assetId if defId: params['definitionId'] = defId if min_price: params['micr'] = min_price if max_price: params['macr'] = max_price if min_buy: params['minb'] = min_buy if max_buy: params['maxb'] = max_buy if league: params['leag'] = league if club: params['team'] = club if position: params['pos'] = position if zone: params['zone'] = zone if nationality: params['nat'] = nationality if rare: params['rare'] = 'SP' if playStyle: params['playStyle'] = playStyle rc = self.__request__(method, url, params=params, fast=fast) # pinEvents if start == 0: events = [self.pin.event('page_view', 'Transfer Market Results - List View'), self.pin.event('page_view', 'Item - Detail View')] self.pin.send(events, fast=fast) return [itemParse(i) for i in rc.get('auctionInfo', ())]
def bid(self, trade_id, bid, fast=False): """Make a bid. :params trade_id: Trade id. :params bid: Amount of credits You want to spend. :params fast: True for fastest bidding (skips trade status & credits check). """ method = 'PUT' url = 'trade/%s/bid' % trade_id if not fast: rc = self.tradeStatus(trade_id)[0] # don't bid if current bid is equal or greater than our max bid if rc['currentBid'] >= bid or self.credits < bid: return False # TODO: add exceptions data = {'bid': bid} try: rc = self.__request__(method, url, data=json.dumps(data), params={'sku_b': self.sku_b}, fast=fast)[ 'auctionInfo'][0] except PermissionDenied: # too slow, somebody took it already :-( return False if rc['bidState'] == 'highest' or ( rc['tradeState'] == 'closed' and rc['bidState'] == 'buyNow'): # checking 'tradeState' is required? return True else: return False
def club(self, sort='desc', ctype='player', defId='', start=0, count=None, page_size=itemsPerPage['club'], level=None, category=None, assetId=None, league=None, club=None, position=None, zone=None, nationality=None, rare=False, playStyle=None): """Return items in your club, excluding consumables. :param ctype: [development / ? / ?] Card type. :param level: (optional) [?/?/gold] Card level. :param category: (optional) [fitness/?/?] Card category. :param assetId: (optional) Asset id. :param defId: (optional) Definition id. :param min_price: (optional) Minimal price. :param max_price: (optional) Maximum price. :param min_buy: (optional) Minimal buy now price. :param max_buy: (optional) Maximum buy now price. :param league: (optional) League id. :param club: (optional) Club id. :param position: (optional) Position. :param nationality: (optional) Nation id. :param rare: (optional) [boolean] True for searching special cards. :param playStyle: (optional) Play style. :param start: (optional) Start page sent to server so it supposed to be 12/15, 24/30 etc. (default platform page_size*n) :param page_size: (optional) Page size (items per page) """ method = 'GET' url = 'club' if count: # backward compatibility, will be removed in future page_size = count params = {'sort': sort, 'type': ctype, 'defId': defId, 'start': start, 'count': page_size} if level: params['level'] = level if category: params['cat'] = category if assetId: params['maskedDefId'] = assetId if league: params['leag'] = league if club: params['team'] = club if position: params['pos'] = position if zone: params['zone'] = zone if nationality: params['nat'] = nationality if rare: params['rare'] = 'SP' if playStyle: params['playStyle'] = playStyle rc = self.__request__(method, url, params=params) # pinEvent if start == 0: if ctype == 'player': pgid = 'Club - Players - List View' elif ctype == 'staff': pgid = 'Club - Staff - List View' elif ctype in ('item', 'kit', 'ball', 'badge', 'stadium'): pgid = 'Club - Club Items - List View' # else: # TODO: THIS IS probably WRONG, detect all ctypes # pgid = 'Club - Club Items - List View' events = [self.pin.event('page_view', 'Hub - Club'), self.pin.event('page_view', pgid)] if rc['itemData']: events.append(self.pin.event('page_view', 'Item - Detail View')) self.pin.send(events) return [itemParse({'itemData': i}) for i in rc['itemData']]
def clubStaff(self): """Return staff in your club.""" method = 'GET' url = 'club/stats/staff' rc = self.__request__(method, url) return rc
def clubConsumables(self, fast=False): """Return all consumables from club.""" method = 'GET' url = 'club/consumables/development' rc = self.__request__(method, url) events = [self.pin.event('page_view', 'Hub - Club')] self.pin.send(events, fast=fast) events = [self.pin.event('page_view', 'Club - Consumables')] self.pin.send(events, fast=fast) events = [self.pin.event('page_view', 'Club - Consumables - List View')] self.pin.send(events, fast=fast) return [itemParse(i) for i in rc.get('itemData', ())]
def squad(self, squad_id=0, persona_id=None): """Return a squad. :params squad_id: Squad id. """ method = 'GET' url = 'squad/%s/user/%s' % (squad_id, persona_id or self.persona_id) # pinEvents events = [self.pin.event('page_view', 'Hub - Squads')] self.pin.send(events) # TODO: ability to return other info than players only rc = self.__request__(method, url) # pinEvents events = [self.pin.event('page_view', 'Squad Details'), self.pin.event('page_view', 'Squads - Squad Overview')] self.pin.send(events) return [itemParse(i) for i in rc.get('players', ())]
def tradeStatus(self, trade_id): """Return trade status. :params trade_id: Trade id. """ method = 'GET' url = 'trade/status' if not isinstance(trade_id, (list, tuple)): trade_id = (trade_id,) trade_id = (str(i) for i in trade_id) params = {'tradeIds': ','.join(trade_id)} # multiple trade_ids not tested rc = self.__request__(method, url, params=params) return [itemParse(i, full=False) for i in rc['auctionInfo']]
def tradepile(self): """Return items in tradepile.""" method = 'GET' url = 'tradepile' rc = self.__request__(method, url) # pinEvents events = [self.pin.event('page_view', 'Hub - Transfers'), self.pin.event('page_view', 'Transfer List - List View')] if rc.get('auctionInfo'): events.append(self.pin.event('page_view', 'Item - Detail View')) self.pin.send(events) return [itemParse(i) for i in rc.get('auctionInfo', ())]
def sell(self, item_id, bid, buy_now, duration=3600, fast=False): """Start auction. Returns trade_id. :params item_id: Item id. :params bid: Stard bid. :params buy_now: Buy now price. :params duration: Auction duration in seconds (Default: 3600). """ method = 'POST' url = 'auctionhouse' # TODO: auto send to tradepile data = {'buyNowPrice': buy_now, 'startingBid': bid, 'duration': duration, 'itemData': {'id': item_id}} rc = self.__request__(method, url, data=json.dumps(data), params={'sku_b': self.sku_b}) if not fast: # tradeStatus check like webapp do self.tradeStatus(rc['id']) return rc['id']
def quickSell(self, item_id): """Quick sell. :params item_id: Item id. """ method = 'DELETE' url = 'item' if not isinstance(item_id, (list, tuple)): item_id = (item_id,) item_id = (str(i) for i in item_id) params = {'itemIds': ','.join(item_id)} self.__request__(method, url, params=params) # {"items":[{"id":280607437106}],"totalCredits":18136} return True
def watchlistDelete(self, trade_id): """Remove cards from watchlist. :params trade_id: Trade id. """ method = 'DELETE' url = 'watchlist' if not isinstance(trade_id, (list, tuple)): trade_id = (trade_id,) trade_id = (str(i) for i in trade_id) params = {'tradeId': ','.join(trade_id)} self.__request__(method, url, params=params) # returns nothing return True
def tradepileDelete(self, trade_id): # item_id instead of trade_id? """Remove card from tradepile. :params trade_id: Trade id. """ method = 'DELETE' url = 'trade/%s' % trade_id self.__request__(method, url) # returns nothing # TODO: validate status code return True
def sendToTradepile(self, item_id, safe=True): """Send to tradepile (alias for __sendToPile__). :params item_id: Item id. :params safe: (optional) False to disable tradepile free space check. """ if safe and len( self.tradepile()) >= self.tradepile_size: # TODO?: optimization (don't parse items in tradepile) return False return self.__sendToPile__('trade', item_id=item_id)
def sendToWatchlist(self, trade_id): """Send to watchlist. :params trade_id: Trade id. """ method = 'PUT' url = 'watchlist' data = {'auctionInfo': [{'id': trade_id}]} return self.__request__(method, url, data=json.dumps(data))
def sendToSbs(self, challenge_id, item_id): """Send card FROM CLUB to first free slot in sbs squad.""" # TODO?: multiple item_ids method = 'PUT' url = 'sbs/challenge/%s/squad' % challenge_id squad = self.sbsSquad(challenge_id) players = [] moved = False n = 0 for i in squad['squad']['players']: if i['itemData']['id'] == item_id: # item already in sbs # TODO?: report reason return False if i['itemData']['id'] == 0 and not moved: i['itemData']['id'] = item_id moved = True players.append({"index": n, "itemData": {"id": i['itemData']['id'], "dream": False}}) n += 1 data = {'players': players} if not moved: return False else: self.__request__(method, url, data=json.dumps(data)) return True
def applyConsumable(self, item_id, resource_id): """Apply consumable on player. :params item_id: Item id of player. :params resource_id: Resource id of consumable. """ # TODO: catch exception when consumable is not found etc. # TODO: multiple players like in quickSell method = 'POST' url = 'item/resource/%s' % resource_id data = {'apply': [{'id': item_id}]} self.__request__(method, url, data=json.dumps(data))
def messages(self): """Return active messages.""" method = 'GET' url = 'activeMessage' rc = self.__request__(method, url) # try: # return rc['activeMessage'] # except: # raise UnknownError('Invalid activeMessage response') # is it even possible? return rc['activeMessage']
def packs(self): """List all (currently?) available packs.""" method = 'GET' url = 'store/purchaseGroup/cardpack' params = {'ppInfo': True} return self.__request__(method, url, params=params)
def num2hex(self, num): ''' Convert a decimal number to hexadecimal ''' temp = '' for i in range(0, 4): x = self.hexChars[ ( num >> (i * 8 + 4) ) & 0x0F ] y = self.hexChars[ ( num >> (i * 8) ) & 0x0F ] temp += (x + y) return temp
def logger(name=None, save=False): """Init and configure logger.""" logger = logging.getLogger(name) if save: logformat = '%(asctime)s [%(levelname)s] [%(name)s] %(funcName)s: %(message)s (line %(lineno)d)' log_file_path = 'fut.log' # TODO: define logpath open(log_file_path, 'w').write('') # remove old logs logger.setLevel(logging.DEBUG) logger_handler = logging.FileHandler(log_file_path) logger_handler.setFormatter(logging.Formatter(logformat)) else: logger_handler = NullHandler() logger.addHandler(logger_handler) return logger
def get_bits_per_pixel(data_format): """ Returns the number of (used) bits per pixel. So without padding. Returns None if format is not known. """ if data_format in component_8bit_formats: return 8 elif data_format in component_10bit_formats: return 10 elif data_format in component_12bit_formats: return 12 elif data_format in component_14bit_formats: return 14 elif data_format in component_16bit_formats: return 16 # format not known return None
def run(self): """ Runs its worker method. This method will be terminated once its parent's is_running property turns False. """ while self._base.is_running: if self._worker: self._worker() time.sleep(self._sleep_duration)
def represent_pixel_location(self): """ Returns a NumPy array that represents the 2D pixel location, which is defined by PFNC, of the original image data. You may use the returned NumPy array for a calculation to map the original image to another format. :return: A NumPy array that represents the 2D pixel location. """ if self.data is None: return None # return self._data.reshape( self.height + self.y_padding, int(self.width * self._num_components_per_pixel + self.x_padding) )
def width(self): """ :return: The width of the data component in the buffer in number of pixels. """ try: if self._part: value = self._part.width else: value = self._buffer.width except InvalidParameterException: value = self._node_map.Width.value return value
def height(self): """ :return: The height of the data component in the buffer in number of pixels. """ try: if self._part: value = self._part.height else: value = self._buffer.height except InvalidParameterException: value = self._node_map.Height.value return value
def data_format_value(self): """ :return: The data type of the data component as integer value. """ try: if self._part: value = self._part.data_format else: value = self._buffer.pixel_format except InvalidParameterException: value = self._node_map.PixelFormat.value return value
def delivered_image_height(self): """ :return: The image height of the data component. """ try: if self._part: value = self._part.delivered_image_height else: value = self._buffer.delivered_image_height except InvalidParameterException: value = 0 return value
def x_offset(self): # TODO: Check the naming convention. """ :return: The X offset of the data in the buffer in number of pixels from the image origin to handle areas of interest. """ try: if self._part: value = self._part.x_offset else: value = self._buffer.offset_x except InvalidParameterException: value = self._node_map.OffsetX.value return value
def y_offset(self): """ :return: The Y offset of the data in the buffer in number of pixels from the image origin to handle areas of interest. """ try: if self._part: value = self._part.y_offset else: value = self._buffer.offset_y except InvalidParameterException: value = self._node_map.OffsetY.value return value
def x_padding(self): """ Returns :return: The X padding of the data component in the buffer in number of pixels. """ try: if self._part: value = self._part.x_padding else: value = self._buffer.padding_x except InvalidParameterException: value = 0 return value
def queue(self): """ Queues the buffer to prepare for the upcoming image acquisition. Once the buffer is queued, the :class:`Buffer` object will be obsolete. You'll have nothing to do with it. Note that you have to return the ownership of the fetched buffers to the :class:`ImageAcquirer` object before stopping image acquisition calling this method because the :class:`ImageAcquirer` object tries to clear the self-allocated buffers when it stops image acquisition. """ # if _is_logging_buffer_manipulation: self._logger.debug( 'Queued Buffer module #{0}' ' containing frame #{1}' ' to DataStream module {2}' ' of Device module {3}' '.'.format( self._buffer.context, self._buffer.frame_id, self._buffer.parent.id_, self._buffer.parent.parent.id_ ) ) self._buffer.parent.queue_buffer(self._buffer)
def start_image_acquisition(self): """ Starts image acquisition. :return: None. """ if not self._create_ds_at_connection: self._setup_data_streams() # num_required_buffers = self._num_buffers for data_stream in self._data_streams: try: num_buffers = data_stream.buffer_announce_min if num_buffers < num_required_buffers: num_buffers = num_required_buffers except InvalidParameterException as e: num_buffers = num_required_buffers self._logger.debug(e, exc_info=True) if data_stream.defines_payload_size(): buffer_size = data_stream.payload_size else: buffer_size = self.device.node_map.PayloadSize.value raw_buffers = self._create_raw_buffers( num_buffers, buffer_size ) buffer_tokens = self._create_buffer_tokens( raw_buffers ) self._announced_buffers = self._announce_buffers( data_stream=data_stream, _buffer_tokens=buffer_tokens ) self._queue_announced_buffers( data_stream=data_stream, buffers=self._announced_buffers ) # Reset the number of images to acquire. try: acq_mode = self.device.node_map.AcquisitionMode.value if acq_mode == 'Continuous': num_images_to_acquire = -1 elif acq_mode == 'SingleFrame': num_images_to_acquire = 1 elif acq_mode == 'MultiFrame': num_images_to_acquire = self.device.node_map.AcquisitionFrameCount.value else: num_images_to_acquire = -1 except LogicalErrorException as e: # The node doesn't exist. num_images_to_acquire = -1 self._logger.debug(e, exc_info=True) self._num_images_to_acquire = num_images_to_acquire try: # We're ready to start image acquisition. Lock the device's # transport layer related features: self.device.node_map.TLParamsLocked.value = 1 except LogicalErrorException: # SFNC < 2.0 pass # Start image acquisition. self._is_acquiring_images = True for data_stream in self._data_streams: data_stream.start_acquisition( ACQ_START_FLAGS_LIST.ACQ_START_FLAGS_DEFAULT, self._num_images_to_acquire ) # if self.thread_image_acquisition: self.thread_image_acquisition.start() # self.device.node_map.AcquisitionStart.execute() self._logger.info( '{0} started image acquisition.'.format(self._device.id_) ) if self._profiler: self._profiler.print_diff()
def fetch_buffer(self, *, timeout=0, is_raw=False): """ Fetches the latest :class:`Buffer` object and returns it. :param timeout: Set timeout value in second. :param is_raw: Set :const:`True` if you need a raw GenTL Buffer module. :return: A :class:`Buffer` object. """ if not self.is_acquiring_images: raise TimeoutException watch_timeout = True if timeout > 0 else False buffer = None base = time.time() while buffer is None: if watch_timeout and (time.time() - base) > timeout: raise TimeoutException else: with MutexLocker(self.thread_image_acquisition): if len(self._holding_filled_buffers) > 0: if is_raw: buffer = self._holding_filled_buffers.pop(0) else: # Update the chunk data: _buffer = self._holding_filled_buffers.pop(0) self._update_chunk_data(buffer=_buffer) # buffer = Buffer( buffer=_buffer, node_map=self.device.node_map, logger=self._logger ) if _is_logging_buffer_manipulation: self._logger.debug( 'Fetched Buffer module #{0}' ' containing frame #{1}' ' of DataStream module {2}' ' of Device module {2}' '.'.format( buffer._buffer.context, buffer._buffer.frame_id, buffer._buffer.parent.id_, buffer._buffer.parent.parent.id_ ) ) return buffer
def stop_image_acquisition(self): """ Stops image acquisition. :return: None. """ if self.is_acquiring_images: # self._is_acquiring_images = False # if self.thread_image_acquisition.is_running: # TODO self.thread_image_acquisition.stop() with MutexLocker(self.thread_image_acquisition): # self.device.node_map.AcquisitionStop.execute() try: # Unlock TLParamsLocked in order to allow full device # configuration: self.device.node_map.TLParamsLocked.value = 0 except LogicalErrorException: # SFNC < 2.0 pass for data_stream in self._data_streams: # Stop image acquisition. try: data_stream.stop_acquisition( ACQ_STOP_FLAGS_LIST.ACQ_STOP_FLAGS_KILL ) except (ResourceInUseException, TimeoutException) as e: self._logger.error(e, exc_info=True) # Flash the queue for image acquisition process. data_stream.flush_buffer_queue( ACQ_QUEUE_TYPE_LIST.ACQ_QUEUE_ALL_DISCARD ) for event_manager in self._event_new_buffer_managers: event_manager.flush_event_queue() if self._create_ds_at_connection: self._release_buffers() else: self._release_data_streams() # self._has_acquired_1st_image = False # self._chunk_adapter.detach_buffer() # self._logger.info( '{0} stopped image acquisition.'.format(self._device.id_) ) if self._profiler: self._profiler.print_diff()
def add_cti_file(self, file_path: str): """ Adds a CTI file to work with to the CTI file list. :param file_path: Set a file path to the target CTI file. :return: None. """ if not os.path.exists(file_path): self._logger.warning( 'Attempted to add {0} which does not exist.'.format(file_path) ) if file_path not in self._cti_files: self._cti_files.append(file_path) self._logger.info( 'Added {0} to the CTI file list.'.format(file_path) )
def remove_cti_file(self, file_path: str): """ Removes the specified CTI file from the CTI file list. :param file_path: Set a file path to the target CTI file. :return: None. """ if file_path in self._cti_files: self._cti_files.remove(file_path) self._logger.info( 'Removed {0} from the CTI file list.'.format(file_path) )
def _reset(self): """ Initializes the :class:`Harvester` object. Once you reset the :class:`Harvester` object, all allocated resources, including buffers and remote device, will be released. :return: None. """ # for ia in self._ias: ia._destroy() self._ias.clear() # self._logger.info('Started resetting the Harvester object.') self.remove_cti_files() self._release_gentl_producers() if self._profiler: self._profiler.print_diff() # self._logger.info('Completed resetting the Harvester object.')
def update_device_info_list(self): """ Updates the device information list. You'll have to call this method every time you added CTI files or plugged/unplugged devices. :return: None. """ # self._release_gentl_producers() try: self._open_gentl_producers() self._open_systems() # for system in self._systems: # system.update_interface_info_list(self.timeout_for_update) # for i_info in system.interface_info_list: iface = i_info.create_interface() try: iface.open() except ( NotInitializedException, ResourceInUseException, InvalidHandleException, InvalidHandleException, InvalidParameterException, AccessDeniedException, ) as e: self._logger.debug(e, exc_info=True) else: self._logger.info( 'Opened Interface module, {0}.'.format(iface.id_) ) iface.update_device_info_list(self.timeout_for_update) self._interfaces.append(iface) for d_info in iface.device_info_list: self.device_info_list.append(d_info) except LoadLibraryException as e: self._logger.error(e, exc_info=True) self._has_revised_device_list = False else: self._has_revised_device_list = True # self._logger.info('Updated the device information list.')
def _destroy_image_acquirer(self, ia): """ Releases all external resources including the controlling device. """ id_ = None if ia.device: # ia.stop_image_acquisition() # ia._release_data_streams() # id_ = ia._device.id_ # if ia.device.node_map: # if ia._chunk_adapter: ia._chunk_adapter.detach_buffer() ia._chunk_adapter = None self._logger.info( 'Detached a buffer from the chunk adapter of {0}.'.format( id_ ) ) ia.device.node_map.disconnect() self._logger.info( 'Disconnected the port from the NodeMap of {0}.'.format( id_ ) ) # if ia._device.is_open(): ia._device.close() self._logger.info( 'Closed Device module, {0}.'.format(id_) ) ia._device = None # if id_: self._logger.info( 'Destroyed the ImageAcquirer object which {0} ' 'had belonged to.'.format(id_) ) else: self._logger.info( 'Destroyed an ImageAcquirer.' ) if self._profiler: self._profiler.print_diff() self._ias.remove(ia)
def is_running_on_macos(): """ Returns a truth value for a proposition: "the program is running on a macOS machine". :rtype: bool """ pattern = re.compile('darwin', re.IGNORECASE) return False if not pattern.search(platform.platform()) else True
def get_sha1_signature(token, timestamp, nonce, encrypt): """ 用 SHA1 算法生成安全签名 @param token: 票据 @param timestamp: 时间戳 @param encrypt: 密文 @param nonce: 随机字符串 @return: 安全签名 """ try: sortlist = [token, timestamp, nonce, to_binary(encrypt)] sortlist.sort() sha = hashlib.sha1() sha.update(to_binary("").join(sortlist)) return sha.hexdigest() except Exception as e: raise CryptoComputeSignatureError(e)
def login(self, verify_code=''): """ 登录微信公众平台 注意在实例化 ``WechatExt`` 的时候,如果没有传入 ``token`` 及 ``cookies`` ,将会自动调用该方法,无需手动调用 当且仅当捕获到 ``NeedLoginError`` 异常时才需要调用此方法进行登录重试 :param verify_code: 验证码, 不传入则为无验证码 :raises LoginVerifyCodeError: 需要验证码或验证码出错,该异常为 ``LoginError`` 的子类 :raises LoginError: 登录出错异常,异常内容为微信服务器响应的内容,可作为日志记录下来 """ url = 'https://mp.weixin.qq.com/cgi-bin/login' payload = { 'username': self.__username, 'pwd': self.__password, 'imgcode': verify_code, 'f': 'json', } headers = { 'x-requested-with': 'XMLHttpRequest', 'referer': 'https://mp.weixin.qq.com/cgi-bin/loginpage?t=wxm2-login&lang=zh_CN', 'Cookie': self.__cookies, } r = requests.post(url, data=payload, headers=headers) s = re.search(r'token=(\d+)', r.text) if not s: try: error_code = json.loads(r.text)['base_resp']['ret'] except (KeyError, ValueError): raise LoginError(r.text) if error_code in [-8, -27]: raise LoginVerifyCodeError(r.text) elif re.search(r'readtemplate', r.text): raise LoginError('You need to turn off the safety protection of wechat.') else: raise LoginError(r.text) self.__token = int(s.group(1)) self.__cookies = '' for cookie in r.cookies: self.__cookies += cookie.name + '=' + cookie.value + ';'
def get_verify_code(self, file_path): """ 获取登录验证码并存储 :param file_path: 将验证码图片保存的文件路径 """ url = 'https://mp.weixin.qq.com/cgi-bin/verifycode' payload = { 'username': self.__username, 'r': int(random.random() * 10000000000000), } headers = { 'referer': 'https://mp.weixin.qq.com/', } r = requests.get(url, data=payload, headers=headers, stream=True) self.__cookies = '' for cookie in r.cookies: self.__cookies += cookie.name + '=' + cookie.value + ';' with open(file_path, 'wb') as fd: for chunk in r.iter_content(1024): fd.write(chunk)
def send_message(self, fakeid, content): """ 主动发送文本消息 :param fakeid: 用户的 UID (即 fakeid ) :param content: 发送的内容 :raises NeedLoginError: 操作未执行成功, 需要再次尝试登录, 异常内容为服务器返回的错误数据 :raises ValueError: 参数出错, 具体内容有 ``fake id not exist`` """ url = 'https://mp.weixin.qq.com/cgi-bin/singlesend?t=ajax-response' payload = { 'tofakeid': fakeid, 'type': 1, 'token': self.__token, 'content': content, 'ajax': 1, } headers = { 'x-requested-with': 'XMLHttpRequest', 'referer': 'https://mp.weixin.qq.com/cgi-bin/singlesendpage?t=message/send&action=index&tofakeid={fakeid}&token={token}&lang=zh_CN'.format( fakeid=fakeid, token=self.__token, ), 'cookie': self.__cookies, } r = requests.post(url, data=payload, headers=headers) try: message = json.loads(r.text) except ValueError: raise NeedLoginError(r.text) try: if message['base_resp']['ret'] == -21: raise ValueError('fake id not exist') if message['base_resp']['ret'] != 0: raise NeedLoginError(r.text) except KeyError: raise NeedLoginError(r.text)
def get_user_list(self, page=0, pagesize=10, groupid=0): """ 获取用户列表 返回JSON示例 :: { "contacts": [ { "id": 2431798261, "nick_name": "Doraemonext", "remark_name": "", "group_id": 0 }, { "id": 896229760, "nick_name": "微信昵称", "remark_name": "", "group_id": 0 } ] } :param page: 页码 (从 0 开始) :param pagesize: 每页大小 :param groupid: 分组 ID :return: 返回的 JSON 数据 :raises NeedLoginError: 操作未执行成功, 需要再次尝试登录, 异常内容为服务器返回的错误数据 """ url = 'https://mp.weixin.qq.com/cgi-bin/contactmanage?t=user/index&pagesize={pagesize}&pageidx={page}&type=0&groupid={groupid}&lang=zh_CN&f=json&token={token}'.format( pagesize=pagesize, page=page, groupid=groupid, token=self.__token, ) headers = { 'x-requested-with': 'XMLHttpRequest', 'referer': 'https://mp.weixin.qq.com/cgi-bin/contactmanage?t=user/index&pagesize={pagesize}&pageidx={page}&type=0&groupid=0&lang=zh_CN&token={token}'.format( pagesize=pagesize, page=page, token=self.__token, ), 'cookie': self.__cookies, } r = requests.get(url, headers=headers) try: message = json.loads(r.text)['contact_list'] except (KeyError, ValueError): raise NeedLoginError(r.text) return message
def stat_article_detail_list(self, page=1, start_date=str(date.today()+timedelta(days=-30)), end_date=str(date.today())): """ 获取图文分析数据 返回JSON示例 :: { "hasMore": true, // 说明是否可以增加 page 页码来获取数据 "data": [ { "index": [ "20,816", // 送达人数 "1,944", // 图文页阅读人数 "2,554", // 图文页阅读次数 "9.34%", // (图文页阅读人数 / 送达人数) "0", // 原文页阅读人数 "0", // 原文页阅读次数 "0%", // (原文页阅读人数 / 图文页阅读人数) "47", // 分享转发人数 "61", // 分享转发次数 "1" // 微信收藏人数 ], "time": "2015-01-21", "table_data": "{\"fields\":{\"TargetUser\":{\"thText\":\"\\u9001\\u8fbe\\u4eba\\u6570\",\"number\":false,\"colAlign\":\"center\",\"needOrder\":false,\"precision\":0},\"IntPageReadUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"IntPageReadCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"PageConversion\":{\"thText\":\"\\u56fe\\u6587\\u8f6c\\u5316\\u7387\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":\"2\"},\"OriPageReadUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"OriPageReadCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"Conversion\":{\"thText\":\"\\u539f\\u6587\\u8f6c\\u5316\\u7387\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":\"2\"},\"ShareUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"ShareCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"AddToFavUser\":{\"thText\":\"\\u5fae\\u4fe1\\u6536\\u85cf\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0}},\"data\":[{\"MsgId\":\"205104027_1\",\"Title\":\"\\u56de\\u5bb6\\u5927\\u4f5c\\u6218 | \\u5feb\\u6765\\u5e26\\u6211\\u56de\\u5bb6\",\"RefDate\":\"20150121\",\"TargetUser\":\"20,816\",\"IntPageReadUser\":\"1,944\",\"IntPageReadCount\":\"2,554\",\"OriPageReadUser\":\"0\",\"OriPageReadCount\":\"0\",\"ShareUser\":\"47\",\"ShareCount\":\"61\",\"AddToFavUser\":\"1\",\"Conversion\":\"0%\",\"PageConversion\":\"9.34%\"}],\"fixedRow\":false,\"cssSetting\":{\"\":\"\"},\"complexHeader\":[[{\"field\":\"TargetUser\",\"thText\":\"\\u9001\\u8fbe\\u4eba\\u6570\",\"rowSpan\":2,\"colSpan\":1},{\"thText\":\"\\u56fe\\u6587\\u9875\\u9605\\u8bfb\",\"colSpan\":3},{\"thText\":\"\\u539f\\u6587\\u9875\\u9605\\u8bfb\",\"colSpan\":3},{\"thText\":\"\\u5206\\u4eab\\u8f6c\\u53d1\",\"colSpan\":2},{\"field\":\"AddToFavUser\",\"thText\":\"\\u5fae\\u4fe1\\u6536\\u85cf\\u4eba\\u6570\",\"rowSpan\":2,\"enable\":true}],[{\"field\":\"IntPageReadUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"IntPageReadCount\",\"thText\":\"\\u6b21\\u6570\"},{\"field\":\"PageConversion\",\"thText\":\"\\u56fe\\u6587\\u8f6c\\u5316\\u7387\"},{\"field\":\"OriPageReadUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"OriPageReadCount\",\"thText\":\"\\u6b21\\u6570\"},{\"field\":\"Conversion\",\"thText\":\"\\u539f\\u6587\\u8f6c\\u5316\\u7387\"},{\"field\":\"ShareUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"ShareCount\",\"thText\":\"\\u6b21\\u6570\"}]]}", "id": "205104027_1", "title": "回家大作战 | 快来带我回家" }, { "index": [ "20,786", // 送达人数 "2,598", // 图文页阅读人数 "3,368", // 图文页阅读次数 "12.5%", // (图文页阅读人数 / 送达人数) "0", // 原文页阅读人数 "0", // 原文页阅读次数 "0%", // (原文页阅读人数 / 图文页阅读人数) "73", // 分享转发人数 "98", // 分享转发次数 "1" // 微信收藏人数 ], "time": "2015-01-20", "table_data": "{\"fields\":{\"TargetUser\":{\"thText\":\"\\u9001\\u8fbe\\u4eba\\u6570\",\"number\":false,\"colAlign\":\"center\",\"needOrder\":false,\"precision\":0},\"IntPageReadUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"IntPageReadCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"PageConversion\":{\"thText\":\"\\u56fe\\u6587\\u8f6c\\u5316\\u7387\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":\"2\"},\"OriPageReadUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"OriPageReadCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"Conversion\":{\"thText\":\"\\u539f\\u6587\\u8f6c\\u5316\\u7387\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":\"2\"},\"ShareUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"ShareCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"AddToFavUser\":{\"thText\":\"\\u5fae\\u4fe1\\u6536\\u85cf\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0}},\"data\":[{\"MsgId\":\"205066833_1\",\"Title\":\"\\u56de\\u5bb6\\u5927\\u4f5c\\u6218 | \\u5982\\u4f55\\u4f18\\u96c5\\u5730\\u53bb\\u5f80\\u8f66\\u7ad9\\u548c\\u673a\\u573a\",\"RefDate\":\"20150120\",\"TargetUser\":\"20,786\",\"IntPageReadUser\":\"2,598\",\"IntPageReadCount\":\"3,368\",\"OriPageReadUser\":\"0\",\"OriPageReadCount\":\"0\",\"ShareUser\":\"73\",\"ShareCount\":\"98\",\"AddToFavUser\":\"1\",\"Conversion\":\"0%\",\"PageConversion\":\"12.5%\"}],\"fixedRow\":false,\"cssSetting\":{\"\":\"\"},\"complexHeader\":[[{\"field\":\"TargetUser\",\"thText\":\"\\u9001\\u8fbe\\u4eba\\u6570\",\"rowSpan\":2,\"colSpan\":1},{\"thText\":\"\\u56fe\\u6587\\u9875\\u9605\\u8bfb\",\"colSpan\":3},{\"thText\":\"\\u539f\\u6587\\u9875\\u9605\\u8bfb\",\"colSpan\":3},{\"thText\":\"\\u5206\\u4eab\\u8f6c\\u53d1\",\"colSpan\":2},{\"field\":\"AddToFavUser\",\"thText\":\"\\u5fae\\u4fe1\\u6536\\u85cf\\u4eba\\u6570\",\"rowSpan\":2,\"enable\":true}],[{\"field\":\"IntPageReadUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"IntPageReadCount\",\"thText\":\"\\u6b21\\u6570\"},{\"field\":\"PageConversion\",\"thText\":\"\\u56fe\\u6587\\u8f6c\\u5316\\u7387\"},{\"field\":\"OriPageReadUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"OriPageReadCount\",\"thText\":\"\\u6b21\\u6570\"},{\"field\":\"Conversion\",\"thText\":\"\\u539f\\u6587\\u8f6c\\u5316\\u7387\"},{\"field\":\"ShareUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"ShareCount\",\"thText\":\"\\u6b21\\u6570\"}]]}", "id": "205066833_1", "title": "回家大作战 | 如何优雅地去往车站和机场" }, { "index": [ "20,745", // 送达人数 "1,355", // 图文页阅读人数 "1,839", // 图文页阅读次数 "6.53%", // (图文页阅读人数 / 送达人数) "145", // 原文页阅读人数 "184", // 原文页阅读次数 "10.7%", // (原文页阅读人数 / 图文页阅读人数) "48", // 分享转发人数 "64", // 分享转发次数 "5" // 微信收藏人数 ], "time": "2015-01-19", "table_data": "{\"fields\":{\"TargetUser\":{\"thText\":\"\\u9001\\u8fbe\\u4eba\\u6570\",\"number\":false,\"colAlign\":\"center\",\"needOrder\":false,\"precision\":0},\"IntPageReadUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"IntPageReadCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"PageConversion\":{\"thText\":\"\\u56fe\\u6587\\u8f6c\\u5316\\u7387\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":\"2\"},\"OriPageReadUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"OriPageReadCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"Conversion\":{\"thText\":\"\\u539f\\u6587\\u8f6c\\u5316\\u7387\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":\"2\"},\"ShareUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"ShareCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"AddToFavUser\":{\"thText\":\"\\u5fae\\u4fe1\\u6536\\u85cf\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0}},\"data\":[{\"MsgId\":\"205028693_1\",\"Title\":\"\\u5145\\u7535\\u65f6\\u95f4 | \\u542c\\u542c\\u7535\\u53f0\\uff0c\\u4f18\\u96c5\\u5730\\u63d0\\u5347\\u5b66\\u4e60\\u6548\\u7387\",\"RefDate\":\"20150119\",\"TargetUser\":\"20,745\",\"IntPageReadUser\":\"1,355\",\"IntPageReadCount\":\"1,839\",\"OriPageReadUser\":\"145\",\"OriPageReadCount\":\"184\",\"ShareUser\":\"48\",\"ShareCount\":\"64\",\"AddToFavUser\":\"5\",\"Conversion\":\"10.7%\",\"PageConversion\":\"6.53%\"}],\"fixedRow\":false,\"cssSetting\":{\"\":\"\"},\"complexHeader\":[[{\"field\":\"TargetUser\",\"thText\":\"\\u9001\\u8fbe\\u4eba\\u6570\",\"rowSpan\":2,\"colSpan\":1},{\"thText\":\"\\u56fe\\u6587\\u9875\\u9605\\u8bfb\",\"colSpan\":3},{\"thText\":\"\\u539f\\u6587\\u9875\\u9605\\u8bfb\",\"colSpan\":3},{\"thText\":\"\\u5206\\u4eab\\u8f6c\\u53d1\",\"colSpan\":2},{\"field\":\"AddToFavUser\",\"thText\":\"\\u5fae\\u4fe1\\u6536\\u85cf\\u4eba\\u6570\",\"rowSpan\":2,\"enable\":true}],[{\"field\":\"IntPageReadUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"IntPageReadCount\",\"thText\":\"\\u6b21\\u6570\"},{\"field\":\"PageConversion\",\"thText\":\"\\u56fe\\u6587\\u8f6c\\u5316\\u7387\"},{\"field\":\"OriPageReadUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"OriPageReadCount\",\"thText\":\"\\u6b21\\u6570\"},{\"field\":\"Conversion\",\"thText\":\"\\u539f\\u6587\\u8f6c\\u5316\\u7387\"},{\"field\":\"ShareUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"ShareCount\",\"thText\":\"\\u6b21\\u6570\"}]]}", "id": "205028693_1", "title": "充电时间 | 听听电台,优雅地提升学习效率" } ] } :param page: 页码 (由于腾讯接口限制,page 从 1 开始,3 条数据为 1 页) :param start_date: 开始时间,默认是今天-30天 (类型: str 格式示例: "2015-01-15") :param end_date: 结束时间,默认是今天 (类型: str 格式示例: "2015-02-01") :return: 返回的 JSON 数据,具体的各项内容解释参见上面的 JSON 返回示例 :raises NeedLoginError: 操作未执行成功, 需要再次尝试登录, 异常内容为服务器返回的错误数据 """ self._init_plugin_token_appid() url = 'http://mta.qq.com/mta/wechat/ctr_article_detail/get_list?sort=RefDate%20desc&keyword=&page={page}&appid={appid}&pluginid=luopan&token={token}&from=&src=false&devtype=3&time_type=day&start_date={start_date}&end_date={end_date}&need_compare=0&app_id=&rnd={rnd}&ajax=1'.format( page=page, appid=self.__appid, token=self.__plugin_token, rnd=int(time.time()), start_date=start_date, end_date=end_date, ) headers = { 'x-requested-with': 'XMLHttpRequest', 'referer': 'http://mta.qq.com/mta/wechat/ctr_article_detail/get_list?sort=RefDate%20desc&keyword=&page={page}&appid={appid}&pluginid=luopan&token={token}&from=&src=false&devtype=3&time_type=day&start_date={start_date}&end_date={end_date}&need_compare=0&app_id=&rnd={rnd}&ajax=1'.format( page=page, appid=self.__appid, token=self.__plugin_token, rnd=int(time.time()), start_date=start_date, end_date=end_date, ), 'cookie': self.__cookies, } r = requests.get(url, headers=headers) if not re.search(r'wechat_token', self.__cookies): for cookie in r.cookies: self.__cookies += cookie.name + '=' + cookie.value + ';' try: data = json.loads(r.text) if data.get('is_session_expire'): raise NeedLoginError(r.text) message = json.dumps(data, ensure_ascii=False) except (KeyError, ValueError): raise NeedLoginError(r.text) return message
def get_group_list(self): """ 获取分组列表 返回JSON示例:: { "groups": [ { "cnt": 8, "id": 0, "name": "未分组" }, { "cnt": 0, "id": 1, "name": "黑名单" }, { "cnt": 0, "id": 2, "name": "星标组" } ] } :return: 返回的 JSON 数据 :raises NeedLoginError: 操作未执行成功, 需要再次尝试登录, 异常内容为服务器返回的错误数据 """ url = 'https://mp.weixin.qq.com/cgi-bin/contactmanage?t=user/index&pagesize=10&pageidx=0&type=0&groupid=0&lang=zh_CN&f=json&token={token}'.format( token=self.__token, ) headers = { 'x-requested-with': 'XMLHttpRequest', 'referer': 'https://mp.weixin.qq.com/cgi-bin/contactmanage?t=user/index&pagesize=10&pageidx=0&type=0&groupid=0&lang=zh_CN&token='.format( token=self.__token, ), 'cookie': self.__cookies, } r = requests.get(url, headers=headers) try: message = json.loads(r.text)['group_list'] except (KeyError, ValueError): raise NeedLoginError(r.text) return message
def get_news_list(self, page, pagesize=10): """ 获取图文信息列表 返回JSON示例:: [ { "multi_item": [ { "seq": 0, "title": "98路公交线路", "show_cover_pic": 1, "author": "", "cover": "https://mmbiz.qlogo.cn/mmbiz/D2pflbZwStFibz2Sb1kWOuHrxtDMPKJic3GQgcgkDSoEm668gClFVDt3BR8GGQ5eB8HoL4vDezzKtSblIjckOf7A/0", "content_url": "http://mp.weixin.qq.com/s?__biz=MjM5MTA2ODcwOA==&mid=204884970&idx=1&sn=bf25c51f07260d4ed38305a1cbc0ce0f#rd", "source_url": "", "file_id": 204884939, "digest": "98路线路1.农大- 2.金阳小区- 3.市客运司- 4.市制药厂- 5.新农大- 6.独山子酒店- 7.三" } ], "seq": 0, "title": "98路公交线路", "show_cover_pic": 1, "author": "", "app_id": 204884970, "content_url": "http://mp.weixin.qq.com/s?__biz=MjM5MTA2ODcwOA==&mid=204884970&idx=1&sn=bf25c51f07260d4ed38305a1cbc0ce0f#rd", "create_time": "1405237966", "file_id": 204884939, "img_url": "https://mmbiz.qlogo.cn/mmbiz/D2pflbZwStFibz2Sb1kWOuHrxtDMPKJic3GQgcgkDSoEm668gClFVDt3BR8GGQ5eB8HoL4vDezzKtSblIjckOf7A/0", "digest": "98路线路1.农大- 2.金阳小区- 3.市客运司- 4.市制药厂- 5.新农大- 6.独山子酒店- 7.三" }, { "multi_item": [ { "seq": 0, "title": "2013年新疆软件园大事记", "show_cover_pic": 0, "author": "", "cover": "https://mmbiz.qlogo.cn/mmbiz/D2pflbZwStFibz2Sb1kWOuHrxtDMPKJic3icvFgkxZRyIrkLbic9I5ZKLa3XB8UqNlkT8CYibByHuraSvVoeSzdTRLQ/0", "content_url": "http://mp.weixin.qq.com/s?__biz=MjM5MTA2ODcwOA==&mid=204883415&idx=1&sn=68d62215052d29ece3f2664e9c4e8cab#rd", "source_url": "", "file_id": 204883412, "digest": "1月1.新疆软件园展厅设计方案汇报会2013年1月15日在维泰大厦4楼9号会议室召开新疆软件园展厅设计工作完" }, { "seq": 1, "title": "2012年新疆软件园大事记", "show_cover_pic": 0, "author": "", "cover": "https://mmbiz.qlogo.cn/mmbiz/D2pflbZwStFibz2Sb1kWOuHrxtDMPKJic3oErGEhSicRQc82icibxZOZ2YAGNgiaGYfOFYppmPzOOS0v1xfZ1nvyT58g/0", "content_url": "http://mp.weixin.qq.com/s?__biz=MjM5MTA2ODcwOA==&mid=204883415&idx=2&sn=e7db9b30d770c85c61008d2f523b8610#rd", "source_url": "", "file_id": 204883398, "digest": "1月1.新疆软件园环评顺利通过专家会评审2012年1月30日,新疆软件园环境影响评价顺利通过专家会评审,与会" }, { "seq": 2, "title": "2011年新疆软件园大事记", "show_cover_pic": 0, "author": "", "cover": "https://mmbiz.qlogo.cn/mmbiz/D2pflbZwStFibz2Sb1kWOuHrxtDMPKJic3qA7tEN8GvkgDwnOfKsGsicJeQ6PxQSgWuJXfQaXkpM4VNlQicOWJM4Tg/0", "content_url": "http://mp.weixin.qq.com/s?__biz=MjM5MTA2ODcwOA==&mid=204883415&idx=3&sn=4cb1c6d25cbe6dfeff37f52a62532bd0#rd", "source_url": "", "file_id": 204883393, "digest": "6月1.软件园召开第一次建设领导小组会议2011年6月7日,第一次软件园建设领导小组会议召开,会议认为,新疆" }, { "seq": 3, "title": "2010年新疆软件园大事记", "show_cover_pic": 0, "author": "", "cover": "https://mmbiz.qlogo.cn/mmbiz/D2pflbZwStFibz2Sb1kWOuHrxtDMPKJic3YG4sSuf9X9ecMPjDRju842IbIvpFWK7tuZs0Po4kZCz4URzOBj5rnQ/0", "content_url": "http://mp.weixin.qq.com/s?__biz=MjM5MTA2ODcwOA==&mid=204883415&idx=4&sn=4319f7f051f36ed972e2f05a221738ec#rd", "source_url": "", "file_id": 204884043, "digest": "5月1.新疆软件园与开发区(头屯河区)管委会、经信委签署《新疆软件园建设战略合作协议》2010年5月12日," } ], "seq": 1, "title": "2013年新疆软件园大事记", "show_cover_pic": 0, "author": "", "app_id": 204883415, "content_url": "http://mp.weixin.qq.com/s?__biz=MjM5MTA2ODcwOA==&mid=204883415&idx=1&sn=68d62215052d29ece3f2664e9c4e8cab#rd", "create_time": "1405232974", "file_id": 204883412, "img_url": "https://mmbiz.qlogo.cn/mmbiz/D2pflbZwStFibz2Sb1kWOuHrxtDMPKJic3icvFgkxZRyIrkLbic9I5ZKLa3XB8UqNlkT8CYibByHuraSvVoeSzdTRLQ/0", "digest": "1月1.新疆软件园展厅设计方案汇报会2013年1月15日在维泰大厦4楼9号会议室召开新疆软件园展厅设计工作完" } ] :param page: 页码 (从 0 开始) :param pagesize: 每页数目 :return: 返回的 JSON 数据 :raises NeedLoginError: 操作未执行成功, 需要再次尝试登录, 异常内容为服务器返回的错误数据 """ begin = page * pagesize url = "https://mp.weixin.qq.com/cgi-bin/appmsg?token={token}&lang=zh_CN&type=10&action=list&begin={begin}&count={pagesize}&f=json&random={random}".format( token=self.__token, begin=begin, pagesize=pagesize, random=round(random.random(), 3), ) headers = { 'x-requested-with': 'XMLHttpRequest', 'referer': 'https://mp.weixin.qq.com/cgi-bin/masssendpage?t=mass/send&token={token}&lang=zh_CN'.format( token=self.__token, ), 'cookie': self.__cookies, } r = requests.get(url, headers=headers) try: message = json.dumps(json.loads(r.text)['app_msg_info']['item'], ensure_ascii=False) except (KeyError, ValueError): raise NeedLoginError(r.text) return message
def get_dialog_message(self, fakeid, last_msgid=0, create_time=0): """ 获取与指定用户的对话内容, 获取的内容由 ``last_msgid`` (需要获取的对话中时间最早的 **公众号发送给用户** 的消息ID) 和 ``create_time`` (需要获取的对话中时间最早的消息时间戳) 进行过滤 消息过滤规则: 1. 首先按照 ``last_msgid`` 过滤 (不需要按照 ``last_msgid`` 过滤则不需要传入此参数) a. ``fakeid`` 为用户 UID b. 通过 ``last_msgid`` 去匹配公众号过去发送给用户的某一条消息 c. 如果匹配成功, 则返回这条消息之后与这个用户相关的所有消息内容 (包括发送的消息和接收的) d. 如果匹配失败 (没有找到), 则返回与这个用户相关的所有消息 (包括发送的消息和接收的) 2. 第一条规则返回的消息内容接着按照 ``create_time`` 进行过滤, 返回 ``create_time`` 时间戳之时及之后的所有消息 (不需要按照 ``create_time`` 过滤则不需要传入此参数) 返回JSON示例:: { "to_nick_name": "Doraemonext", "msg_items": { "msg_item": [ { "date_time": 1408671873, "has_reply": 0, "multi_item": [ ], "msg_status": 4, "nick_name": "Doraemonext", "to_uin": 2391068708, "content": "你呢", "source": "", "fakeid": "844735403", "send_stat": { "fail": 0, "succ": 0, "total": 0 }, "refuse_reason": "", "type": 1, "id": 206439567 }, { "date_time": 1408529750, "send_stat": { "fail": 0, "succ": 0, "total": 0 }, "app_sub_type": 3, "multi_item": [ { "seq": 0, "title": "软件企业有望拎包入住新疆软件园", "show_cover_pic": 1, "author": "", "cover": "https://mmbiz.qlogo.cn/mmbiz/D2pflbZwStFibz2Sb1kWOuHrxtDMPKJic3oErGEhSicRQc82icibxZOZ2YAGNgiaGYfOFYppmPzOOS0v1xfZ1nvyT58g/0", "content_url": "http://mp.weixin.qq.com/s?__biz=MjM5MTA2ODcwOA==&mid=204885255&idx=1&sn=40e07d236a497e36d2d3e9711dfe090a#rd", "source_url": "", "content": "", "file_id": 204885252, "vote_id": [ ], "digest": "12月8日,国家软件公共服务平台新疆分平台在乌鲁木齐经济技术开发区(头屯河区)揭牌。这意味着,软件企业有" } ], "msg_status": 2, "title": "软件企业有望拎包入住新疆软件园", "nick_name": "Doraemonext", "to_uin": 844735403, "content_url": "http://mp.weixin.qq.com/s?__biz=MjM5MTA2ODcwOA==&mid=204885255&idx=1&sn=40e07d236a497e36d2d3e9711dfe090a#rd", "show_type": 1, "content": "", "source": "biz", "fakeid": "2391068708", "file_id": 204885252, "has_reply": 0, "refuse_reason": "", "type": 6, "id": 206379033, "desc": "12月8日,国家软件公共服务平台新疆分平台在乌鲁木齐经济技术开发区(头屯河区)揭牌。这意味着,软件企业有" } ] } } :param fakeid: 用户 UID (即 fakeid ) :param last_msgid: 公众号之前发送给用户(fakeid)的消息 ID, 为 0 则表示全部消息 :param create_time: 获取这个时间戳之时及之后的消息,为 0 则表示全部消息 :return: 返回的 JSON 数据 :raises NeedLoginError: 操作未执行成功, 需要再次尝试登录, 异常内容为服务器返回的错误数据 """ self._init_fakeid() url = 'https://mp.weixin.qq.com/cgi-bin/singlesendpage?tofakeid={fakeid}&action=sync&lastmsgfromfakeid={fromfakeid}&lastmsgid={last_msgid}&createtime={create_time}&token={token}&lang=zh_CN&f=json&ajax=1'.format( fakeid=fakeid, fromfakeid=self.__fakeid, last_msgid=last_msgid, create_time=create_time, token=self.__token, ) headers = { 'x-requested-with': 'XMLHttpRequest', 'referer': 'https://mp.weixin.qq.com/cgi-bin/message?t=message/list&count=20&day=7&token={token}&lang=zh_CN'.format(token=self.__token), 'cookie': self.__cookies, } r = requests.get(url, headers=headers) try: message = json.dumps(json.loads(r.text)['page_info'], ensure_ascii=False) except (KeyError, ValueError): raise NeedLoginError(r.text) return message
def send_news(self, fakeid, msgid): """ 向指定用户发送图文消息 (必须从图文库里选取消息ID传入) :param fakeid: 用户的 UID (即 fakeid) :param msgid: 图文消息 ID :raises NeedLoginError: 操作未执行成功, 需要再次尝试登录, 异常内容为服务器返回的错误数据 :raises ValueError: 参数出错, 具体内容有 ``fake id not exist`` 及 ``message id not exist`` """ url = 'https://mp.weixin.qq.com/cgi-bin/singlesend?t=ajax-response' payload = { 'lang': 'zh_CN', 'f': 'json', 'tofakeid': fakeid, 'type': 10, 'token': self.__token, 'appmsgid': msgid, 'app_id': msgid, 'error': 'false', 'ajax': 1, 'random': random.random(), } headers = { 'x-requested-with': 'XMLHttpRequest', 'referer': 'https://mp.weixin.qq.com/cgi-bin/singlemsgpage?fromfakeid={fakeid}&msgid=&source=&count=20&t=wxm-singlechat&lang=zh_CN'.format( fakeid=fakeid, ), 'cookie': self.__cookies, } r = requests.post(url, data=payload, headers=headers) try: message = json.loads(r.text) except ValueError: raise NeedLoginError(r.text) try: if message['base_resp']['ret'] == 10700 or message['base_resp']['ret'] == -21: raise ValueError('fake id not exist') if message['base_resp']['ret'] == 10705: raise ValueError('message id not exist') if message['base_resp']['ret'] != 0: raise NeedLoginError(r.text) except KeyError: raise NeedLoginError(r.text)
def add_news(self, news): """ 在素材库中创建图文消息 :param news: list 对象, 其中的每个元素为一个 dict 对象, 代表一条图文, key 值分别为 ``title``, ``author``, ``summary``, ``content``, ``picture_id``, ``from_url``, 对应内容为标题, 作者, 摘要, 内容, 素材库里的 图片ID(可通过 ``upload_file`` 函数上传获取), 来源链接。 其中必须提供的 key 值为 ``title`` 和 ``content`` 示例:: [ { 'title': '图文标题', 'author': '图文作者', 'summary': '图文摘要', 'content': '图文内容', 'picture_id': '23412341', 'from_url': 'http://www.baidu.com', }, { 'title': '最少图文标题', 'content': '图文内容', } ] :raises ValueError: 参数提供错误时抛出 :raises NeedLoginError: 操作未执行成功, 需要再次尝试登录, 异常内容为服务器返回的错误数据 """ if not news: raise ValueError('The news cannot be empty') for item in news: if 'title' not in item or 'content' not in item: raise ValueError('The news item needs to provide at least two arguments: title, content') url = 'https://mp.weixin.qq.com/cgi-bin/operate_appmsg?lang=zh_CN&t=ajax-response&sub=create&token={token}'.format( token=self.__token, ) payload = { 'token': self.__token, 'type': 10, 'lang': 'zh_CN', 'sub': 'create', 'ajax': 1, 'AppMsgId': '', 'error': 'false', } headers = { 'referer': 'https://mp.weixin.qq.com/cgi-bin/operate_appmsg?lang=zh_CN&sub=edit&t=wxm-appmsgs-edit-new&type=10&subtype=3&token={token}'.format( token=self.__token ), 'cookie': self.__cookies, } i = 0 for item in news: payload['title'+str(i)] = item.get('title') payload['author'+str(i)] = item.get('author') payload['digest'+str(i)] = item.get('summary') payload['content'+str(i)] = item.get('content') payload['fileid'+str(i)] = item.get('picture_id') payload['sourceurl'+str(i)] = item.get('from_url') i += 1 payload['count'] = i r = requests.post(url, data=payload, headers=headers) try: message = json.loads(r.text) except ValueError: raise NeedLoginError(r.text) try: if message['ret'] != '0': raise ValueError(r.text) except KeyError: raise NeedLoginError(r.text)