_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q24200
Map._draw_lines_internal
train
def _draw_lines_internal(self, coords, colour, bg): """Helper to draw lines connecting a set of nodes that are scaled for the Screen.""" for i, (x, y) in enumerate(coords): if i == 0: self._screen.move(x, y) else: self._screen.draw(x, y, colour=colour, bg=bg, thin=True)
python
{ "resource": "" }
q24201
Map._draw_polygons
train
def _draw_polygons(self, feature, bg, colour, extent, polygons, xo, yo): """Draw a set of polygons from a vector tile.""" coords = [] for polygon in polygons: coords.append([self._scale_coords(x, y, extent, xo, yo) for x, y in polygon]) # Polygons are expensive to draw and the buildings layer is huge - so we convert to # lines in order to process updates fast enough to animate. if "type" in feature["properties"] and "building" in feature["properties"]["type"]: for line in coords: self._draw_lines_internal(line, colour, bg) else: self._screen.fill_polygon(coords, colour=colour, bg=bg)
python
{ "resource": "" }
q24202
Map._draw_lines
train
def _draw_lines(self, bg, colour, extent, line, xo, yo): """Draw a set of lines from a vector tile.""" coords = [self._scale_coords(x, y, extent, xo, yo) for x, y in line] self._draw_lines_internal(coords, colour, bg)
python
{ "resource": "" }
q24203
Map._draw_feature
train
def _draw_feature(self, feature, extent, colour, bg, xo, yo): """Draw a single feature from a layer in a vector tile.""" geometry = feature["geometry"] if geometry["type"] == "Polygon": self._draw_polygons(feature, bg, colour, extent, geometry["coordinates"], xo, yo) elif feature["geometry"]["type"] == "MultiPolygon": for multi_polygon in geometry["coordinates"]: self._draw_polygons(feature, bg, colour, extent, multi_polygon, xo, yo) elif feature["geometry"]["type"] == "LineString": self._draw_lines(bg, colour, extent, geometry["coordinates"], xo, yo) elif feature["geometry"]["type"] == "MultiLineString": for line in geometry["coordinates"]: self._draw_lines(bg, colour, extent, line, xo, yo) elif feature["geometry"]["type"] == "Point": x, y = self._scale_coords( geometry["coordinates"][0], geometry["coordinates"][1], extent, xo, yo) text = u" {} ".format(feature["properties"]["name_en"]) self._screen.print_at(text, int(x - len(text) / 2), int(y), colour=colour, bg=bg)
python
{ "resource": "" }
q24204
Map._draw_tile_layer
train
def _draw_tile_layer(self, tile, layer_name, c_filters, colour, t_filters, x, y, bg): """Draw the visible geometry in the specified map tile.""" # Don't bother rendering if the tile is not visible left = (x + self._screen.width // 4) * 2 top = y + self._screen.height // 2 if (left > self._screen.width or left + self._size * 2 < 0 or top > self._screen.height or top + self._size < 0): return 0 # Not all layers are available in every tile. try: _layer = tile[layer_name] _extent = float(_layer["extent"]) except KeyError: return 0 for _feature in _layer["features"]: try: if c_filters and _feature["properties"]["class"] not in c_filters: continue if (t_filters and _feature["type"] not in t_filters and _feature["properties"]["type"] not in t_filters): continue self._draw_feature( _feature, _extent, colour, bg, (x + self._screen.width // 4) * 2, y + self._screen.height // 2) except KeyError: pass return 1
python
{ "resource": "" }
q24205
Map._draw_satellite_tile
train
def _draw_satellite_tile(self, tile, x, y): """Draw a satellite image tile to screen.""" image, colours = tile.rendered_text for (i, line) in enumerate(image): self._screen.paint(line, x, y + i, colour_map=colours[i]) return 1
python
{ "resource": "" }
q24206
Map._draw_tiles
train
def _draw_tiles(self, x_offset, y_offset, bg): """Render all visible tiles a layer at a time.""" count = 0 for layer_name, c_filters, t_filters in self._get_features(): colour = (self._256_PALETTE[layer_name] if self._screen.colours >= 256 else self._16_PALETTE[layer_name]) for x, y, z, tile, satellite in sorted(self._tiles.values(), key=lambda k: k[0]): # Don't draw the wrong type or zoom of tile. if satellite != self._satellite or z != self._zoom: continue # Convert tile location into pixels and draw the tile. x *= self._size y *= self._size if satellite: count += self._draw_satellite_tile( tile, int((x-x_offset + self._screen.width // 4) * 2), int(y-y_offset + self._screen.height // 2)) else: count += self._draw_tile_layer(tile, layer_name, c_filters, colour, t_filters, x - x_offset, y - y_offset, bg) return count
python
{ "resource": "" }
q24207
Map._move_to_desired_location
train
def _move_to_desired_location(self): """Animate movement to desired location on map.""" self._next_update = 100000 x_start = self._convert_longitude(self._longitude) y_start = self._convert_latitude(self._latitude) x_end = self._convert_longitude(self._desired_longitude) y_end = self._convert_latitude(self._desired_latitude) if sqrt((x_end - x_start) ** 2 + (y_end - y_start) ** 2) > _START_SIZE // 4: self._zoom_map(True) elif self._zoom != self._desired_zoom: self._zoom_map(self._desired_zoom < self._zoom) if self._longitude != self._desired_longitude: self._next_update = 1 if self._desired_longitude < self._longitude: self._longitude = max(self._longitude - 360 / 2 ** self._zoom / self._size * 2, self._desired_longitude) else: self._longitude = min(self._longitude + 360 / 2 ** self._zoom / self._size * 2, self._desired_longitude) if self._latitude != self._desired_latitude: self._next_update = 1 if self._desired_latitude < self._latitude: self._latitude = max(self._inc_lat(self._latitude, 2), self._desired_latitude) else: self._latitude = min(self._inc_lat(self._latitude, -2), self._desired_latitude) if self._next_update == 1: self._updated.set()
python
{ "resource": "" }
q24208
Map.process_event
train
def process_event(self, event): """User input for the main map view.""" if isinstance(event, KeyboardEvent): if event.key_code in [Screen.ctrl("m"), Screen.ctrl("j")]: self._scene.add_effect( EnterLocation( self._screen, self._longitude, self._latitude, self._on_new_location)) elif event.key_code in [ord('q'), ord('Q'), Screen.ctrl("c")]: raise StopApplication("User quit") elif event.key_code in [ord('t'), ord('T')]: self._satellite = not self._satellite if self._satellite: self._size = _START_SIZE elif event.key_code == ord("?"): self._scene.add_effect(PopUpDialog(self._screen, _HELP, ["OK"])) elif event.key_code == ord("+") and self._zoom <= 20: if self._desired_zoom < 20: self._desired_zoom += 1 elif event.key_code == ord("-") and self._zoom >= 0: if self._desired_zoom > 0: self._desired_zoom -= 1 elif event.key_code == ord("0"): self._desired_zoom = 0 elif event.key_code == ord("9"): self._desired_zoom = 20 elif event.key_code == Screen.KEY_LEFT: self._desired_longitude -= 360 / 2 ** self._zoom / self._size * 10 elif event.key_code == Screen.KEY_RIGHT: self._desired_longitude += 360 / 2 ** self._zoom / self._size * 10 elif event.key_code == Screen.KEY_UP: self._desired_latitude = self._inc_lat(self._desired_latitude, -self._size / 10) elif event.key_code == Screen.KEY_DOWN: self._desired_latitude = self._inc_lat(self._desired_latitude, self._size / 10) else: return # Trigger a reload of the tiles and redraw map self._updated.set() self._screen.force_update()
python
{ "resource": "" }
q24209
Map._on_new_location
train
def _on_new_location(self, form): """Set a new desired location entered in the pop-up form.""" self._desired_longitude = float(form.data["long"]) self._desired_latitude = float(form.data["lat"]) self._desired_zoom = 13 self._screen.force_update()
python
{ "resource": "" }
q24210
cli
train
def cli(location, **kwargs): """Geocode an arbitrary number of strings from Command Line.""" locations = [] # Read Standard Input # $ cat foo.txt | geocode try: for line in fileinput.input(): locations.append(line.strip()) except: pass # Read multiple files & user input location for item in location: if os.path.exists(item): with open(item, 'rb') as f: locations += f.read().splitlines() else: locations.append(item) # Distance calculation if kwargs['distance']: d = geocoder.distance(locations, **kwargs) click.echo(d) return # Geocode results from user input for location in locations: g = geocoder.get(location.strip(), **kwargs) try: click.echo(json.dumps(getattr(g, kwargs['output']))) except IOError: # When invalid command is entered a broken pipe error occurs return
python
{ "resource": "" }
q24211
GeonamesQuery._build_params
train
def _build_params(self, location, provider_key, **kwargs): """Will be overridden according to the targetted web service""" base_kwargs = { 'q': location, 'fuzzy': kwargs.get('fuzzy', 1.0), 'username': provider_key, 'maxRows': kwargs.get('maxRows', 1), } # check out for bbox in kwargs bbox = kwargs.pop('proximity', None) if bbox is not None: bbox = BBox.factory(bbox) base_kwargs.update( {'east': bbox.east, 'west': bbox.west, 'north': bbox.north, 'south': bbox.south}) # look out for valid extra kwargs supported_kwargs = set(( 'name', 'name_equals', 'name_startsWith', 'startRow', 'country', 'countryBias', 'continentCode', 'adminCode1', 'adminCode2', 'adminCode3', 'cities', 'featureClass', 'featureCode', 'lang', 'type', 'style', 'isNameRequired', 'tag', 'operator', 'charset', 'east', 'west', 'north', 'south', 'orderby', 'inclBbox', )) found_kwargs = supported_kwargs & set(kwargs.keys()) LOGGER.debug("Adding extra kwargs %s", found_kwargs) # update base kwargs with extra ones base_kwargs.update(dict( [(extra, kwargs[extra]) for extra in found_kwargs] )) return base_kwargs
python
{ "resource": "" }
q24212
BaiduQuery._sign_url
train
def _sign_url(self, base_url, params, security_key): """ Signs a request url with a security key. """ import hashlib if six.PY3: from urllib.parse import urlencode, quote, quote_plus else: from urllib import urlencode, quote, quote_plus if not base_url or not self.security_key: return None params = params.copy() address = params.pop('address') url = base_url + '?address=' + address + '&' + urlencode(params) encoded_url = quote(url, safe="/:=&?#+!$,;'@()*[]") signature = quote_plus(encoded_url + self.security_key).encode('utf-8') encoded_signature = hashlib.md5(signature).hexdigest() return encoded_signature
python
{ "resource": "" }
q24213
MultipleResultsQuery._is_valid_url
train
def _is_valid_url(url): """ Helper function to validate that URLs are well formed, i.e that it contains a valid protocol and a valid domain. It does not actually check if the URL exists """ try: parsed = urlparse(url) mandatory_parts = [parsed.scheme, parsed.netloc] return all(mandatory_parts) except: return False
python
{ "resource": "" }
q24214
OsmQuery._before_initialize
train
def _before_initialize(self, location, **kwargs): """ Check if specific URL has not been provided, otherwise, use cls._URL""" url = kwargs.get('url', '') if url.lower() == 'localhost': self.url = 'http://localhost/nominatim/search' elif url: self.url = url
python
{ "resource": "" }
q24215
haversine
train
def haversine(point1, point2, **kwargs): """ Calculate the great-circle distance bewteen two points on the Earth surface. :input: two 2-tuples, containing the latitude and longitude of each point in decimal degrees. Example: haversine((45.7597, 4.8422), (48.8567, 2.3508)) :output: Returns the distance bewteen the two points. The default unit is kilometers. Miles can be returned if the ``miles`` parameter is set to True. """ lookup_units = { 'miles': 'miles', 'mile': 'miles', 'mi': 'miles', 'ml': 'miles', 'kilometers': 'kilometers', 'kilometres': 'kilometers', 'kilometer': 'kilometers', 'kilometre': 'kilometers', 'km': 'kilometers', 'meters': 'meters', 'metres': 'meters', 'meter': 'meters', 'metre': 'meters', 'm': 'meters', 'feet': 'feet', 'f': 'feet', 'ft': 'feet', } if point1.ok and point2.ok: # convert all latitudes/longitudes from decimal degrees to radians lat1, lng1, lat2, lng2 = list(map(radians, point1.latlng + point2.latlng)) # calculate haversine lat = lat2 - lat1 lng = lng2 - lng1 d = sin(lat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(lng / 2) ** 2 h = 2 * AVG_EARTH_RADIUS * asin(sqrt(d)) # Measurements units = kwargs.get('units', 'kilometers').lower() units_calculation = { 'miles': h * 0.621371, 'feet': h * 0.621371 * 5280, 'meters': h * 1000, 'kilometers': h, } if units in lookup_units: return units_calculation[lookup_units[units]] else: raise ValueError("Unknown units of measurement") else: print(u'[WARNING] Error calculating the following two locations.\n' 'Points: {0} to {1}'.format(point1.location, point2.location))
python
{ "resource": "" }
q24216
get_gif
train
def get_gif(api_key, gif_id): '''Returns dict with gif informations from the API.''' url = 'http://api.giphy.com/v1/gifs/{}?api_key={}'.format(gif_id, api_key) r = urlopen(url) return json.loads(r.read().decode('utf-8'))
python
{ "resource": "" }
q24217
create_html
train
def create_html(api_key, attrs): '''Returns complete html tag string.''' gif = get_gif(api_key, attrs['gif_id']) if 'alt' not in attrs.keys(): attrs['alt'] = 'source: {}'.format(gif['data']['source']) html_out = '<a href="{}">'.format(gif['data']['url']) html_out += '<img src="{}" alt="{}">'.format( gif['data']['images']['original']['url'], attrs['alt']) html_out += '</a>' return html_out
python
{ "resource": "" }
q24218
main
train
def main(api_key, markup): '''Doing the regex parsing and running the create_html function.''' match = GIPHY.search(markup) attrs = None if match: attrs = dict( [(key, value.strip()) for (key, value) in match.groupdict().items() if value]) else: raise ValueError('Error processing input. ' 'Expected syntax: {}'.format(SYNTAX)) return create_html(api_key, attrs)
python
{ "resource": "" }
q24219
filetime_from_git
train
def filetime_from_git(content, git_content): ''' Update modification and creation times from git ''' if not content.settings['GIT_FILETIME_FROM_GIT']: # Disabled for everything return if not string_to_bool(content.metadata.get('gittime', 'yes')): # Disable for this content return path = content.source_path fs_creation_time = datetime_from_timestamp(os.stat(path).st_ctime, content) fs_modified_time = datetime_from_timestamp(os.stat(path).st_mtime, content) # 1. file is not managed by git # date: fs time # 2. file is staged, but has no commits # date: fs time # 3. file is managed, and clean # date: first commit time, update: last commit time or None # 4. file is managed, but dirty # date: first commit time, update: fs time if git_content.is_managed_by_git(): if git_content.is_committed(): content.date = git_content.get_oldest_commit_date() if git_content.is_modified(): content.modified = fs_modified_time else: content.modified = git_content.get_newest_commit_date() else: # File isn't committed content.date = fs_creation_time else: # file is not managed by git content.date = fs_creation_time # Clean up content attributes if not hasattr(content, 'modified'): content.modified = content.date if hasattr(content, 'date'): content.locale_date = strftime(content.date, content.date_format) if hasattr(content, 'modified'): content.locale_modified = strftime( content.modified, content.date_format)
python
{ "resource": "" }
q24220
git_sha_metadata
train
def git_sha_metadata(content, git_content): ''' Add sha metadata to content ''' if not content.settings['GIT_SHA_METADATA']: return if not git_content.is_committed(): return content.metadata['gitsha_newest'] = str(git_content.get_newest_commit()) content.metadata['gitsha_oldest'] = str(git_content.get_oldest_commit())
python
{ "resource": "" }
q24221
update_hash_from_str
train
def update_hash_from_str(hsh, str_input): """ Convert a str to object supporting buffer API and update a hash with it. """ byte_input = str(str_input).encode("UTF-8") hsh.update(byte_input)
python
{ "resource": "" }
q24222
git_permalink
train
def git_permalink(content, git_content): ''' Add git based permalink id to content metadata ''' if not content.settings['GIT_GENERATE_PERMALINK']: return if not string_to_bool(content.metadata.get('git_permalink', 'yes')): # Disable for this content return if not git_content.is_committed(): return permalink_hash = hashlib.sha1() update_hash_from_str(permalink_hash, git_content.get_oldest_commit()) update_hash_from_str(permalink_hash, git_content.get_oldest_filename()) git_permalink_id_raw = base64.urlsafe_b64encode(permalink_hash.digest()) git_permalink_id = git_permalink_id_raw.decode("UTF-8") permalink_id_metadata_key = content.settings['PERMALINK_ID_METADATA_KEY'] if permalink_id_metadata_key in content.metadata: content.metadata[permalink_id_metadata_key] = ( ','.join(( content.metadata[permalink_id_metadata_key], git_permalink_id)) ) else: content.metadata[permalink_id_metadata_key] = git_permalink_id
python
{ "resource": "" }
q24223
process_summary
train
def process_summary(article): """Ensures summaries are not cut off. Also inserts mathjax script so that math will be rendered""" summary = article.summary summary_parsed = BeautifulSoup(summary, 'html.parser') math = summary_parsed.find_all(class_='math') if len(math) > 0: last_math_text = math[-1].get_text() if len(last_math_text) > 3 and last_math_text[-3:] == '...': content_parsed = BeautifulSoup(article._content, 'html.parser') full_text = content_parsed.find_all(class_='math')[len(math)-1].get_text() math[-1].string = "%s ..." % full_text summary = summary_parsed.decode() # clear memoization cache import functools if isinstance(article.get_summary, functools.partial): memoize_instance = article.get_summary.func.__self__ memoize_instance.cache.clear() article._summary = "%s<script type='text/javascript'>%s</script>" % (summary, process_summary.mathjax_script)
python
{ "resource": "" }
q24224
configure_typogrify
train
def configure_typogrify(pelicanobj, mathjax_settings): """Instructs Typogrify to ignore math tags - which allows Typogrify to play nicely with math related content""" # If Typogrify is not being used, then just exit if not pelicanobj.settings.get('TYPOGRIFY', False): return try: import typogrify from distutils.version import LooseVersion if LooseVersion(typogrify.__version__) < LooseVersion('2.0.7'): raise TypeError('Incorrect version of Typogrify') from typogrify.filters import typogrify # At this point, we are happy to use Typogrify, meaning # it is installed and it is a recent enough version # that can be used to ignore all math # Instantiate markdown extension and append it to the current extensions pelicanobj.settings['TYPOGRIFY_IGNORE_TAGS'].extend(['.math', 'script']) # ignore math class and script except (ImportError, TypeError) as e: pelicanobj.settings['TYPOGRIFY'] = False # disable Typogrify if isinstance(e, ImportError): print("\nTypogrify is not installed, so it is being ignored.\nIf you want to use it, please install via: pip install typogrify\n") if isinstance(e, TypeError): print("\nA more recent version of Typogrify is needed for the render_math module.\nPlease upgrade Typogrify to the latest version (anything equal or above version 2.0.7 is okay).\nTypogrify will be turned off due to this reason.\n")
python
{ "resource": "" }
q24225
process_mathjax_script
train
def process_mathjax_script(mathjax_settings): """Load the mathjax script template from file, and render with the settings""" # Read the mathjax javascript template from file with open (os.path.dirname(os.path.realpath(__file__)) + '/mathjax_script_template', 'r') as mathjax_script_template: mathjax_template = mathjax_script_template.read() return mathjax_template.format(**mathjax_settings)
python
{ "resource": "" }
q24226
mathjax_for_markdown
train
def mathjax_for_markdown(pelicanobj, mathjax_script, mathjax_settings): """Instantiates a customized markdown extension for handling mathjax related content""" # Create the configuration for the markdown template config = {} config['mathjax_script'] = mathjax_script config['math_tag_class'] = 'math' config['auto_insert'] = mathjax_settings['auto_insert'] # Instantiate markdown extension and append it to the current extensions try: if isinstance(pelicanobj.settings.get('MD_EXTENSIONS'), list): # pelican 3.6.3 and earlier pelicanobj.settings['MD_EXTENSIONS'].append(PelicanMathJaxExtension(config)) else: pelicanobj.settings['MARKDOWN'].setdefault('extensions', []).append(PelicanMathJaxExtension(config)) except: sys.excepthook(*sys.exc_info()) sys.stderr.write("\nError - the pelican mathjax markdown extension failed to configure. MathJax is non-functional.\n") sys.stderr.flush()
python
{ "resource": "" }
q24227
mathjax_for_rst
train
def mathjax_for_rst(pelicanobj, mathjax_script, mathjax_settings): """Setup math for RST""" docutils_settings = pelicanobj.settings.get('DOCUTILS_SETTINGS', {}) docutils_settings.setdefault('math_output', 'MathJax %s' % mathjax_settings['source']) pelicanobj.settings['DOCUTILS_SETTINGS'] = docutils_settings rst_add_mathjax.mathjax_script = mathjax_script
python
{ "resource": "" }
q24228
pelican_init
train
def pelican_init(pelicanobj): """ Loads the mathjax script according to the settings. Instantiate the Python markdown extension, passing in the mathjax script as config parameter. """ # Process settings, and set global var mathjax_settings = process_settings(pelicanobj) # Generate mathjax script mathjax_script = process_mathjax_script(mathjax_settings) # Configure Typogrify configure_typogrify(pelicanobj, mathjax_settings) # Configure Mathjax For Markdown if PelicanMathJaxExtension: mathjax_for_markdown(pelicanobj, mathjax_script, mathjax_settings) # Configure Mathjax For RST mathjax_for_rst(pelicanobj, mathjax_script, mathjax_settings) # Set process_summary's mathjax_script variable process_summary.mathjax_script = None if mathjax_settings['process_summary']: process_summary.mathjax_script = mathjax_script
python
{ "resource": "" }
q24229
rst_add_mathjax
train
def rst_add_mathjax(content): """Adds mathjax script for reStructuredText""" # .rst is the only valid extension for reStructuredText files _, ext = os.path.splitext(os.path.basename(content.source_path)) if ext != '.rst': return # If math class is present in text, add the javascript # note that RST hardwires mathjax to be class "math" if 'class="math"' in content._content: content._content += "<script type='text/javascript'>%s</script>" % rst_add_mathjax.mathjax_script
python
{ "resource": "" }
q24230
process_rst_and_summaries
train
def process_rst_and_summaries(content_generators): """ Ensure mathjax script is applied to RST and summaries are corrected if specified in user settings. Handles content attached to ArticleGenerator and PageGenerator objects, since the plugin doesn't know how to handle other Generator types. For reStructuredText content, examine both articles and pages. If article or page is reStructuredText and there is math present, append the mathjax script. Also process summaries if present (only applies to articles) and user wants summaries processed (via user settings) """ for generator in content_generators: if isinstance(generator, generators.ArticlesGenerator): for article in ( generator.articles + generator.translations + generator.drafts): rst_add_mathjax(article) #optionally fix truncated formulae in summaries. if process_summary.mathjax_script is not None: process_summary(article) elif isinstance(generator, generators.PagesGenerator): for page in generator.pages: rst_add_mathjax(page) for page in generator.hidden_pages: rst_add_mathjax(page)
python
{ "resource": "" }
q24231
get_permalink_ids_iter
train
def get_permalink_ids_iter(self): ''' Method to get permalink ids from content. To be bound to the class last thing. ''' permalink_id_key = self.settings['PERMALINK_ID_METADATA_KEY'] permalink_ids = self.metadata.get(permalink_id_key, '') for permalink_id in permalink_ids.split(','): if permalink_id: yield permalink_id.strip()
python
{ "resource": "" }
q24232
get_permalink_path
train
def get_permalink_path(self): """Get just path component of permalink.""" try: first_permalink_id = next(self.get_permalink_ids_iter()) except StopIteration: return None return '/{settings[PERMALINK_PATH]}/{first_permalink}.html'.format( settings=self.settings, first_permalink=first_permalink_id)
python
{ "resource": "" }
q24233
add_permalink_methods
train
def add_permalink_methods(content_inst): ''' Add permalink methods to object ''' for permalink_method in PERMALINK_METHODS: setattr( content_inst, permalink_method.__name__, permalink_method.__get__(content_inst, content_inst.__class__))
python
{ "resource": "" }
q24234
PermalinkGenerator.generate_output
train
def generate_output(self, writer=None): ''' Generate redirect files ''' logger.info( 'Generating permalink files in %r', self.permalink_output_path) clean_output_dir(self.permalink_output_path, []) mkdir_p(self.permalink_output_path) for content in itertools.chain( self.context['articles'], self.context['pages']): for permalink_id in content.get_permalink_ids_iter(): permalink_path = os.path.join( self.permalink_output_path, permalink_id) + '.html' redirect_string = REDIRECT_STRING.format( url=article_url(content), title=content.title) open(permalink_path, 'w').write(redirect_string)
python
{ "resource": "" }
q24235
getText
train
def getText(node, recursive=False): """Get all the text associated with this node. With recursive == True, all text from child nodes is retrieved.""" L = [u''] for n in node.childNodes: if n.nodeType in (node.TEXT_NODE, node.CDATA_SECTION_NODE): L.append(n.data) else: if not recursive: return None L.append(getText(n)) return u''.join(L)
python
{ "resource": "" }
q24236
datetime_from_timestamp
train
def datetime_from_timestamp(timestamp, content): """ Helper function to add timezone information to datetime, so that datetime is comparable to other datetime objects in recent versions that now also have timezone information. """ return set_date_tzinfo( datetime.fromtimestamp(timestamp), tz_name=content.settings.get('TIMEZONE', None))
python
{ "resource": "" }
q24237
get_info
train
def get_info(photo_id, api_key): ''' Get photo informations from flickr api. ''' query = urlencode({ 'method': 'flickr.photos.getInfo', 'api_key': api_key, 'photo_id': photo_id, 'format': 'json', 'nojsoncallback': '1' }) r = urlopen('https://api.flickr.com/services/rest/?' + query) info = json.loads(r.read().decode('utf-8')) if info['stat'] == 'fail': raise ValueError(info['message']) return info
python
{ "resource": "" }
q24238
source_url
train
def source_url(farm, server, id, secret, size): ''' Url for direct jpg use. ''' if size == 'small': img_size = 'n' elif size == 'medium': img_size = 'c' elif size == 'large': img_size = 'b' return 'https://farm{}.staticflickr.com/{}/{}_{}_{}.jpg'.format( farm, server, id, secret, img_size)
python
{ "resource": "" }
q24239
generate_html
train
def generate_html(attrs, api_key): ''' Returns html code. ''' # getting flickr api data flickr_data = get_info(attrs['photo_id'], api_key) # if size is not defined it will use large as image size if 'size' not in attrs.keys(): attrs['size'] = 'large' # if no alt is defined it will use the flickr image title if 'alt' not in attrs.keys(): attrs['alt'] = flickr_data['photo']['title']['_content'] # return final html code return '<a href="{}"><img src="{}" alt="{}"></a>'.format( flickr_data['photo']['urls']['url'][0]['_content'], source_url(flickr_data['photo']['farm'], flickr_data['photo']['server'], attrs['photo_id'], flickr_data['photo']['secret'], attrs['size']), attrs['alt'])
python
{ "resource": "" }
q24240
create_gzip_file
train
def create_gzip_file(filepath, overwrite): '''Create a gzipped file in the same directory with a filepath.gz name. :param filepath: A file to compress :param overwrite: Whether the original file should be overwritten ''' compressed_path = filepath + '.gz' with open(filepath, 'rb') as uncompressed: gzip_compress_obj = zlib.compressobj(COMPRESSION_LEVEL, zlib.DEFLATED, WBITS) uncompressed_data = uncompressed.read() gzipped_data = gzip_compress_obj.compress(uncompressed_data) gzipped_data += gzip_compress_obj.flush() if len(gzipped_data) >= len(uncompressed_data): logger.debug('No improvement: %s' % filepath) return with open(compressed_path, 'wb') as compressed: logger.debug('Compressing: %s' % filepath) try: compressed.write(gzipped_data) except Exception as ex: logger.critical('Gzip compression failed: %s' % ex) if overwrite: logger.debug('Overwriting: %s with %s' % (filepath, compressed_path)) os.remove(filepath) os.rename(compressed_path, filepath)
python
{ "resource": "" }
q24241
GitContentAdapter.get_oldest_filename
train
def get_oldest_filename(self): ''' Get the original filename of this content. Implies follow ''' commit_and_name_iter = self.git.get_commits_and_names_iter( self.content.source_path) _commit, name = next(commit_and_name_iter) return name
python
{ "resource": "" }
q24242
GitContentAdapter.get_oldest_commit_date
train
def get_oldest_commit_date(self): ''' Get datetime of oldest commit involving this file :returns: Datetime of oldest commit ''' oldest_commit = self.get_oldest_commit() return self.git.get_commit_date(oldest_commit, self.tz_name)
python
{ "resource": "" }
q24243
GitContentAdapter.get_newest_commit_date
train
def get_newest_commit_date(self): ''' Get datetime of newest commit involving this file :returns: Datetime of newest commit ''' newest_commit = self.get_newest_commit() return self.git.get_commit_date(newest_commit, self.tz_name)
python
{ "resource": "" }
q24244
keyboard_role
train
def keyboard_role(name, rawtext, text, lineno, inliner, options={}, content=[]): """ This function creates an inline console input block as defined in the twitter bootstrap documentation overrides the default behaviour of the kbd role *usage:* :kbd:`<your code>` *Example:* :kbd:`<section>` This code is not highlighted """ new_element = nodes.literal(rawtext, text) new_element.set_class('kbd') return [new_element], []
python
{ "resource": "" }
q24245
glyph_role
train
def glyph_role(name, rawtext, text, lineno, inliner, options={}, content=[]): """ This function defines a glyph inline role that show a glyph icon from the twitter bootstrap framework *Usage:* :glyph:`<glyph_name>` *Example:* Love this music :glyph:`music` :) Can be subclassed to include a target *Example:* .. role:: story_time_glyph(glyph) :target: http://www.youtube.com/watch?v=5g8ykQLYnX0 :class: small text-info Love this music :story_time_glyph:`music` :) """ target = options.get('target', None) glyph_name = 'glyphicon-{}'.format(text) if target: target = utils.unescape(target) new_element = nodes.reference(rawtext, ' ', refuri=target) else: new_element = nodes.container() classes = options.setdefault('class', []) classes += ['glyphicon', glyph_name] for custom_class in classes: new_element.set_class(custom_class) return [new_element], []
python
{ "resource": "" }
q24246
TextileReader.read
train
def read(self, source_path): """Parse content and metadata of textile files.""" with pelican_open(source_path) as text: parts = text.split('----', 1) if len(parts) == 2: headerlines = parts[0].splitlines() headerpairs = map(lambda l: l.split(':', 1), headerlines) headerdict = {pair[0]: pair[1].strip() for pair in headerpairs if len(pair) == 2} metadata = self._parse_metadata(headerdict) content = textile(parts[1]) else: metadata = {} content = textile(text) return content, metadata
python
{ "resource": "" }
q24247
optimize_images
train
def optimize_images(pelican): """ Optimized jpg and png images :param pelican: The Pelican instance """ for dirpath, _, filenames in os.walk(pelican.settings['OUTPUT_PATH']): for name in filenames: if os.path.splitext(name)[1] in COMMANDS.keys(): optimize(dirpath, name)
python
{ "resource": "" }
q24248
optimize
train
def optimize(dirpath, filename): """ Check if the name is a type of file that should be optimized. And optimizes it if required. :param dirpath: Path of the file to be optimzed :param name: A file name to be optimized """ filepath = os.path.join(dirpath, filename) logger.info('optimizing %s', filepath) ext = os.path.splitext(filename)[1] command, silent, verbose = COMMANDS[ext] flags = verbose if SHOW_OUTPUT else silent command = command.format(filename=filepath, flags=flags) call(command, shell=True)
python
{ "resource": "" }
q24249
add_libravatar
train
def add_libravatar (generator, metadata): """Article generator connector for the Libravatar plugin""" missing = generator.settings.get ('LIBRAVATAR_MISSING') size = generator.settings.get ('LIBRAVATAR_SIZE') ## Check the presence of the Email header if 'email' not in metadata.keys (): try: metadata ['email'] = generator.settings.get ('AUTHOR_EMAIL') except: pass ## Add the Libravatar URL if metadata ['email']: ## Compose URL using the MD5 hash ## (the ascii encoding is necessary for Python3) email = metadata ['email'].lower ().encode ('ascii') md5 = hashlib.md5 (email).hexdigest () url = 'http://cdn.libravatar.org/avatar/' + md5 ## Add eventual "missing picture" option if missing or size: url = url + '?' if missing: url = url + 'd=' + missing if size: url = url + '&' if size: url = url + 's=' + str (size) ## Add URL to the article's metadata metadata ['author_libravatar'] = url
python
{ "resource": "" }
q24250
get_diag
train
def get_diag(code, command): """ Generate diagramm and return data """ import tempfile import shutil code = code + u'\n' try: tmpdir = tempfile.mkdtemp() fd, diag_name = tempfile.mkstemp(dir=tmpdir) f = os.fdopen(fd, "w") f.write(code.encode('utf-8')) f.close() format = _draw_mode.lower() draw_name = diag_name + '.' + format saved_argv = sys.argv argv = [diag_name, '-T', format, '-o', draw_name] if _draw_mode == 'SVG': argv += ['--ignore-pil'] # Run command command.main(argv) # Read image data from file file_name = diag_name + '.' + _publish_mode.lower() with io.open(file_name, 'rb') as f: data = f.read() f.close() finally: for file in os.listdir(tmpdir): os.unlink(tmpdir + "/" + file) # os.rmdir will fail -> use shutil shutil.rmtree(tmpdir) return data
python
{ "resource": "" }
q24251
fetch_github_activity
train
def fetch_github_activity(gen, metadata): """ registered handler for the github activity plugin it puts in generator.context the html needed to be displayed on a template """ if 'GITHUB_ACTIVITY_FEED' in gen.settings.keys(): gen.context['github_activity'] = gen.plugin_instance.fetch()
python
{ "resource": "" }
q24252
GitHubActivity.fetch
train
def fetch(self): """ returns a list of html snippets fetched from github actitivy feed """ entries = [] for activity in self.activities['entries']: entries.append( [element for element in [activity['title'], activity['content'][0]['value']]]) return entries[0:self.max_entries]
python
{ "resource": "" }
q24253
RdfReader.read
train
def read(self, source_path): """Parse content and metadata of an rdf file""" logger.debug("Loading graph described in "+source_path) graph = rdflib.Graph() graph.load(source_path) meta = {} queries = [ f for f in listdir(self.settings["VOC_QUERIES_PATH"]) if (isfile(join(self.settings["VOC_QUERIES_PATH"], f)) and f.endswith(".sparql"))] for query_path in queries: query_file_path = self.settings["VOC_QUERIES_PATH"]+"/"+query_path with open(query_file_path, "r") as query_file: query = query_file.read() # The name of the query identifies the elements in the context query_key=query_path.split(".")[0] result_set = graph.query(query) # Each query result will be stored as a dictionnary in the # vocabulary context, referenced by the query name as its key. # Multiple results are stored in a list. for result in result_set: if not query_key in meta.keys(): meta[query_key]=result.asdict() elif type(meta[query_key]) == list: meta[query_key].append(result.asdict()) else: meta[query_key]=[meta[query_key], result.asdict()] meta["iri"] = meta["lov_metadata"]["iri"] meta["description"] = meta["lov_metadata"]["description"] meta["version"] = meta["lov_metadata"]["version"] meta["title"] = meta["lov_metadata"]["title"] return "", meta
python
{ "resource": "" }
q24254
_get_file
train
def _get_file(src): """ Return content from local or remote file. """ try: if '://' in src or src[0:2] == '//': # Most likely this is remote file response = urllib2.urlopen(src) return response.read() else: with open(src, 'rb') as fh: return fh.read() except Exception as e: raise RuntimeError('Error generating base64image: {}'.format(e))
python
{ "resource": "" }
q24255
pelican_init
train
def pelican_init(pelicanobj): global global_siteurl global_siteurl = pelicanobj.settings['SITEURL'] """ Prepare configurations for the MD plugin """ try: import markdown from .plantuml_md import PlantUMLMarkdownExtension except: # Markdown not available logger.debug("[plantuml] Markdown support not available") return # Register the Markdown plugin config = { 'siteurl': pelicanobj.settings['SITEURL'] } try: if 'MD_EXTENSIONS' in pelicanobj.settings.keys(): # pre pelican 3.7.0 pelicanobj.settings['MD_EXTENSIONS'].append(PlantUMLMarkdownExtension(config)) elif 'MARKDOWN' in pelicanobj.settings.keys() and \ not ('extension_configs' in pelicanobj.settings['MARKDOWN']['extension_configs']): # from pelican 3.7.0 pelicanobj.settings['MARKDOWN']['extension_configs']['plantuml.plantuml_md'] = {} except: logger.error("[plantuml] Unable to configure plantuml markdown extension")
python
{ "resource": "" }
q24256
LiquidTags.register
train
def register(cls, tag): """Decorator to register a new include tag""" def dec(func): if tag in _LiquidTagsPreprocessor._tags: warnings.warn("Enhanced Markdown: overriding tag '%s'" % tag) _LiquidTagsPreprocessor._tags[tag] = func return func return dec
python
{ "resource": "" }
q24257
git_wrapper
train
def git_wrapper(path): ''' Get appropriate wrapper factory and cache instance for path ''' path = os.path.abspath(path) if path not in _wrapper_cache: if hasattr(Repo, 'commits'): _wrapper_cache[path] = _GitWrapperLegacy(path) else: _wrapper_cache[path] = _GitWrapper(path) return _wrapper_cache[path]
python
{ "resource": "" }
q24258
_GitWrapperCommon.get_commits_and_names_iter
train
def get_commits_and_names_iter(self, path): ''' Get all commits including a given path following renames ''' log_result = self.git.log( '--pretty=%H', '--follow', '--name-only', '--', path).splitlines() for commit_sha, _, filename in grouper(log_result, 3): yield self.repo.commit(commit_sha), filename
python
{ "resource": "" }
q24259
_GitWrapperCommon.get_commits
train
def get_commits(self, path, follow=False): ''' Get all commits including path :param path: Path which we will find commits for :param bool follow: If True we will follow path through renames :returns: Sequence of commit objects. Newest to oldest ''' if follow: return self.get_commits_following(path) else: return self._get_commits(path)
python
{ "resource": "" }
q24260
_GitWrapperLegacy.get_commit_date
train
def get_commit_date(commit, tz_name): ''' Get datetime of commit comitted_date ''' return set_date_tzinfo( datetime.fromtimestamp(mktime(commit.committed_date)), tz_name=tz_name)
python
{ "resource": "" }
q24261
temporary_locale
train
def temporary_locale(temp_locale=None): '''Enable code to run in a context with a temporary locale Resets the locale back when exiting context. Can set a temporary locale if provided ''' orig_locale = locale.setlocale(locale.LC_ALL) if temp_locale is not None: locale.setlocale(locale.LC_ALL, temp_locale) yield locale.setlocale(locale.LC_ALL, orig_locale)
python
{ "resource": "" }
q24262
initialize_dbs
train
def initialize_dbs(settings): '''Initialize internal DBs using the Pelican settings dict This clears the DBs for e.g. autoreload mode to work ''' global _MAIN_SETTINGS, _MAIN_SITEURL, _MAIN_LANG, _SUBSITE_QUEUE _MAIN_SETTINGS = settings _MAIN_LANG = settings['DEFAULT_LANG'] _MAIN_SITEURL = settings['SITEURL'] _SUBSITE_QUEUE = settings.get('I18N_SUBSITES', {}).copy() prepare_site_db_and_overrides() # clear databases in case of autoreload mode _SITES_RELPATH_DB.clear() _NATIVE_CONTENT_URL_DB.clear() _GENERATOR_DB.clear()
python
{ "resource": "" }
q24263
prepare_site_db_and_overrides
train
def prepare_site_db_and_overrides(): '''Prepare overrides and create _SITE_DB _SITE_DB.keys() need to be ready for filter_translations ''' _SITE_DB.clear() _SITE_DB[_MAIN_LANG] = _MAIN_SITEURL # make sure it works for both root-relative and absolute main_siteurl = '/' if _MAIN_SITEURL == '' else _MAIN_SITEURL for lang, overrides in _SUBSITE_QUEUE.items(): if 'SITEURL' not in overrides: overrides['SITEURL'] = posixpath.join(main_siteurl, lang) _SITE_DB[lang] = overrides['SITEURL'] # default subsite hierarchy if 'OUTPUT_PATH' not in overrides: overrides['OUTPUT_PATH'] = os.path.join( _MAIN_SETTINGS['OUTPUT_PATH'], lang) if 'CACHE_PATH' not in overrides: overrides['CACHE_PATH'] = os.path.join( _MAIN_SETTINGS['CACHE_PATH'], lang) if 'STATIC_PATHS' not in overrides: overrides['STATIC_PATHS'] = [] if ('THEME' not in overrides and 'THEME_STATIC_DIR' not in overrides and 'THEME_STATIC_PATHS' not in overrides): relpath = relpath_to_site(lang, _MAIN_LANG) overrides['THEME_STATIC_DIR'] = posixpath.join( relpath, _MAIN_SETTINGS['THEME_STATIC_DIR']) overrides['THEME_STATIC_PATHS'] = [] # to change what is perceived as translations overrides['DEFAULT_LANG'] = lang
python
{ "resource": "" }
q24264
relpath_to_site
train
def relpath_to_site(lang, target_lang): '''Get relative path from siteurl of lang to siteurl of base_lang the output is cached in _SITES_RELPATH_DB ''' path = _SITES_RELPATH_DB.get((lang, target_lang), None) if path is None: siteurl = _SITE_DB.get(lang, _MAIN_SITEURL) target_siteurl = _SITE_DB.get(target_lang, _MAIN_SITEURL) path = posixpath.relpath(get_site_path(target_siteurl), get_site_path(siteurl)) _SITES_RELPATH_DB[(lang, target_lang)] = path return path
python
{ "resource": "" }
q24265
article2draft
train
def article2draft(article): '''Transform an Article to Draft''' draft = Draft(article._content, article.metadata, article.settings, article.source_path, article._context) draft.status = 'draft' return draft
python
{ "resource": "" }
q24266
filter_contents_translations
train
def filter_contents_translations(generator): '''Filter the content and translations lists of a generator Filters out 1) translations which will be generated in a different site 2) content that is not in the language of the currently generated site but in that of a different site, content in a language which has no site is generated always. The filtering method bay be modified by the respective untranslated policy ''' inspector = GeneratorInspector(generator) current_lang = generator.settings['DEFAULT_LANG'] langs_with_sites = _SITE_DB.keys() removed_contents = _GENERATOR_DB[generator] for translations in inspector.translations_lists(): for translation in translations[:]: # copy to be able to remove if translation.lang in langs_with_sites: translations.remove(translation) removed_contents.append(translation) hiding_func = inspector.hiding_function() untrans_policy = inspector.untranslated_policy(default='hide') for (contents, other_contents) in inspector.contents_list_pairs(): for content in other_contents: # save any hidden native content first if content.lang == current_lang: # in native lang # save the native URL attr formatted in the current locale _NATIVE_CONTENT_URL_DB[content.source_path] = content.url for content in contents[:]: # copy for removing in loop if content.lang == current_lang: # in native lang # save the native URL attr formatted in the current locale _NATIVE_CONTENT_URL_DB[content.source_path] = content.url elif content.lang in langs_with_sites and untrans_policy != 'keep': contents.remove(content) if untrans_policy == 'hide': other_contents.append(hiding_func(content)) elif untrans_policy == 'remove': removed_contents.append(content)
python
{ "resource": "" }
q24267
install_templates_translations
train
def install_templates_translations(generator): '''Install gettext translations in the jinja2.Environment Only if the 'jinja2.ext.i18n' jinja2 extension is enabled the translations for the current DEFAULT_LANG are installed. ''' if 'JINJA_ENVIRONMENT' in generator.settings: # pelican 3.7+ jinja_extensions = generator.settings['JINJA_ENVIRONMENT'].get( 'extensions', []) else: jinja_extensions = generator.settings['JINJA_EXTENSIONS'] if 'jinja2.ext.i18n' in jinja_extensions: domain = generator.settings.get('I18N_GETTEXT_DOMAIN', 'messages') localedir = generator.settings.get('I18N_GETTEXT_LOCALEDIR') if localedir is None: localedir = os.path.join(generator.theme, 'translations') current_lang = generator.settings['DEFAULT_LANG'] if current_lang == generator.settings.get('I18N_TEMPLATES_LANG', _MAIN_LANG): translations = gettext.NullTranslations() else: langs = [current_lang] try: translations = gettext.translation(domain, localedir, langs) except (IOError, OSError): _LOGGER.error(( "Cannot find translations for language '{}' in '{}' with " "domain '{}'. Installing NullTranslations.").format( langs[0], localedir, domain)) translations = gettext.NullTranslations() newstyle = generator.settings.get('I18N_GETTEXT_NEWSTYLE', True) generator.env.install_gettext_translations(translations, newstyle)
python
{ "resource": "" }
q24268
add_variables_to_context
train
def add_variables_to_context(generator): '''Adds useful iterable variables to template context''' context = generator.context # minimize attr lookup context['relpath_to_site'] = relpath_to_site context['main_siteurl'] = _MAIN_SITEURL context['main_lang'] = _MAIN_LANG context['lang_siteurls'] = _SITE_DB current_lang = generator.settings['DEFAULT_LANG'] extra_siteurls = _SITE_DB.copy() extra_siteurls.pop(current_lang) context['extra_siteurls'] = extra_siteurls
python
{ "resource": "" }
q24269
interlink_translations
train
def interlink_translations(content): '''Link content to translations in their main language so the URL (including localized month names) of the different subsites will be honored ''' lang = content.lang # sort translations by lang content.translations.sort(key=attrgetter('lang')) for translation in content.translations: relpath = relpath_to_site(lang, translation.lang) url = _NATIVE_CONTENT_URL_DB[translation.source_path] translation.override_url = posixpath.join(relpath, url)
python
{ "resource": "" }
q24270
interlink_translated_content
train
def interlink_translated_content(generator): '''Make translations link to the native locations for generators that may contain translated content ''' inspector = GeneratorInspector(generator) for content in inspector.all_contents(): interlink_translations(content)
python
{ "resource": "" }
q24271
interlink_removed_content
train
def interlink_removed_content(generator): '''For all contents removed from generation queue update interlinks link to the native location ''' current_lang = generator.settings['DEFAULT_LANG'] for content in _GENERATOR_DB[generator]: url = _NATIVE_CONTENT_URL_DB[content.source_path] relpath = relpath_to_site(current_lang, content.lang) content.override_url = posixpath.join(relpath, url)
python
{ "resource": "" }
q24272
interlink_static_files
train
def interlink_static_files(generator): '''Add links to static files in the main site if necessary''' if generator.settings['STATIC_PATHS'] != []: return # customized STATIC_PATHS try: # minimize attr lookup static_content = generator.context['static_content'] except KeyError: static_content = generator.context['filenames'] relpath = relpath_to_site(generator.settings['DEFAULT_LANG'], _MAIN_LANG) for staticfile in _MAIN_STATIC_FILES: if staticfile.get_relative_source_path() not in static_content: staticfile = copy(staticfile) # prevent override in main site staticfile.override_url = posixpath.join(relpath, staticfile.url) try: generator.add_source_path(staticfile, static=True) except TypeError: generator.add_source_path(staticfile)
python
{ "resource": "" }
q24273
update_generators
train
def update_generators(): '''Update the context of all generators Ads useful variables and translations into the template context and interlink translations ''' for generator in _GENERATOR_DB.keys(): install_templates_translations(generator) add_variables_to_context(generator) interlink_static_files(generator) interlink_removed_content(generator) interlink_translated_content(generator)
python
{ "resource": "" }
q24274
get_pelican_cls
train
def get_pelican_cls(settings): '''Get the Pelican class requested in settings''' cls = settings['PELICAN_CLASS'] if isinstance(cls, six.string_types): module, cls_name = cls.rsplit('.', 1) module = __import__(module) cls = getattr(module, cls_name) return cls
python
{ "resource": "" }
q24275
create_next_subsite
train
def create_next_subsite(pelican_obj): '''Create the next subsite using the lang-specific config If there are no more subsites in the generation queue, update all the generators (interlink translations and removed content, add variables and translations to template context). Otherwise get the language and overrides for next the subsite in the queue and apply overrides. Then generate the subsite using a PELICAN_CLASS instance and its run method. Finally, restore the previous locale. ''' global _MAIN_SETTINGS if len(_SUBSITE_QUEUE) == 0: _LOGGER.debug( 'i18n: Updating cross-site links and context of all generators.') update_generators() _MAIN_SETTINGS = None # to initialize next time else: with temporary_locale(): settings = _MAIN_SETTINGS.copy() lang, overrides = _SUBSITE_QUEUE.popitem() settings.update(overrides) settings = configure_settings(settings) # to set LOCALE, etc. cls = get_pelican_cls(settings) new_pelican_obj = cls(settings) _LOGGER.debug(("Generating i18n subsite for language '{}' " "using class {}").format(lang, cls)) new_pelican_obj.run()
python
{ "resource": "" }
q24276
register
train
def register(): '''Register the plugin only if required signals are available''' for sig_name in _SIGNAL_HANDLERS_DB.keys(): if not hasattr(signals, sig_name): _LOGGER.error(( 'The i18n_subsites plugin requires the {} ' 'signal available for sure in Pelican 3.4.0 and later, ' 'plugin will not be used.').format(sig_name)) return for sig_name, handler in _SIGNAL_HANDLERS_DB.items(): sig = getattr(signals, sig_name) sig.connect(handler)
python
{ "resource": "" }
q24277
GeneratorInspector.translations_lists
train
def translations_lists(self): '''Iterator over lists of content translations''' return (getattr(self.generator, name) for name in self.info.get('translations_lists', []))
python
{ "resource": "" }
q24278
GeneratorInspector.contents_list_pairs
train
def contents_list_pairs(self): '''Iterator over pairs of normal and hidden contents''' return (tuple(getattr(self.generator, name) for name in names) for names in self.info.get('contents_lists', []))
python
{ "resource": "" }
q24279
GeneratorInspector.untranslated_policy
train
def untranslated_policy(self, default): '''Get the policy for untranslated content''' return self.generator.settings.get(self.info.get('policy', None), default)
python
{ "resource": "" }
q24280
GeneratorInspector.all_contents
train
def all_contents(self): '''Iterator over all contents''' translations_iterator = chain(*self.translations_lists()) return chain(translations_iterator, *(pair[i] for pair in self.contents_list_pairs() for i in (0, 1)))
python
{ "resource": "" }
q24281
CodeInclude.run
train
def run(self): """Include a file as part of the content of this reST file.""" if not self.state.document.settings.file_insertion_enabled: raise self.warning('"%s" directive disabled.' % self.name) source = self.state_machine.input_lines.source( self.lineno - self.state_machine.input_offset - 1) source_dir = os.path.dirname(os.path.abspath(source)) path = directives.path(self.arguments[0]) path = os.path.normpath(os.path.join(source_dir, path)) path = utils.relative_path(None, path) path = nodes.reprunicode(path) encoding = self.options.get( 'encoding', self.state.document.settings.input_encoding) e_handler = self.state.document.settings.input_encoding_error_handler tab_width = self.options.get( 'tab-width', self.state.document.settings.tab_width) try: self.state.document.settings.record_dependencies.add(path) include_file = io.FileInput(source_path=path, encoding=encoding, error_handler=e_handler) except UnicodeEncodeError as error: raise self.severe('Problems with "%s" directive path:\n' 'Cannot encode input file path "%s" ' '(wrong locale?).' % (self.name, SafeString(path))) except IOError as error: raise self.severe('Problems with "%s" directive path:\n%s.' % (self.name, ErrorString(error))) startline = self.options.get('start-line', None) endline = self.options.get('end-line', None) try: if startline or (endline is not None): lines = include_file.readlines() rawtext = ''.join(lines[startline:endline]) else: rawtext = include_file.read() except UnicodeError as error: raise self.severe('Problem with "%s" directive:\n%s' % (self.name, ErrorString(error))) include_lines = statemachine.string2lines(rawtext, tab_width, convert_whitespace=True) # default lexer to 'text' lexer = self.options.get('lexer', 'text') self.options['source'] = path codeblock = Pygments(self.name, [lexer], # arguments {}, # no options for this directive include_lines, # content self.lineno, self.content_offset, self.block_text, self.state, self.state_machine) return codeblock.run()
python
{ "resource": "" }
q24282
parse_article
train
def parse_article(generator, metadata): """Collect articles metadata to be used for building the event calendar :returns: None """ if 'event-start' not in metadata: return dtstart = parse_tstamp(metadata, 'event-start') if 'event-end' in metadata: dtend = parse_tstamp(metadata, 'event-end') elif 'event-duration' in metadata: dtdelta = parse_timedelta(metadata) dtend = dtstart + dtdelta else: msg = "Either 'event-end' or 'event-duration' must be" + \ " speciefied in the event named '%s'" % metadata['title'] log.error(msg) raise ValueError(msg) events.append(Event(dtstart, dtend, metadata))
python
{ "resource": "" }
q24283
generate_ical_file
train
def generate_ical_file(generator): """Generate an iCalendar file """ global events ics_fname = generator.settings['PLUGIN_EVENTS']['ics_fname'] if not ics_fname: return ics_fname = os.path.join(generator.settings['OUTPUT_PATH'], ics_fname) log.debug("Generating calendar at %s with %d events" % (ics_fname, len(events))) tz = generator.settings.get('TIMEZONE', 'UTC') tz = pytz.timezone(tz) ical = icalendar.Calendar() ical.add('prodid', '-//My calendar product//mxm.dk//') ical.add('version', '2.0') DEFAULT_LANG = generator.settings['DEFAULT_LANG'] curr_events = events if not localized_events else localized_events[DEFAULT_LANG] for e in curr_events: ie = icalendar.Event( summary=e.metadata['summary'], dtstart=e.dtstart, dtend=e.dtend, dtstamp=e.metadata['date'], priority=5, uid=e.metadata['title'] + e.metadata['summary'], ) if 'event-location' in e.metadata: ie.add('location', e.metadata['event-location']) ical.add_component(ie) with open(ics_fname, 'wb') as f: f.write(ical.to_ical())
python
{ "resource": "" }
q24284
generate_localized_events
train
def generate_localized_events(generator): """ Generates localized events dict if i18n_subsites plugin is active """ if "i18n_subsites" in generator.settings["PLUGINS"]: if not os.path.exists(generator.settings['OUTPUT_PATH']): os.makedirs(generator.settings['OUTPUT_PATH']) for e in events: if "lang" in e.metadata: localized_events[e.metadata["lang"]].append(e) else: log.debug("event %s contains no lang attribute" % (e.metadata["title"],))
python
{ "resource": "" }
q24285
generate_events_list
train
def generate_events_list(generator): """Populate the event_list variable to be used in jinja templates""" if not localized_events: generator.context['events_list'] = sorted(events, reverse = True, key=lambda ev: (ev.dtstart, ev.dtend)) else: generator.context['events_list'] = {k: sorted(v, reverse = True, key=lambda ev: (ev.dtstart, ev.dtend)) for k, v in localized_events.items()}
python
{ "resource": "" }
q24286
register
train
def register(): """ Calls the shots, based on signals """ signals.article_generator_finalized.connect(link_source_files) signals.page_generator_finalized.connect(link_source_files) signals.page_writer_finalized.connect(write_source_files)
python
{ "resource": "" }
q24287
CreoleReader.read
train
def read(self, source_path): """Parse content and metadata of creole files""" self._metadata = {} with pelican_open(source_path) as text: content = creole2html(text, macros={'header': self._parse_header_macro, 'code': self._parse_code_macro}) return content, self._metadata
python
{ "resource": "" }
q24288
generate_thumbnail_download_link_vimeo
train
def generate_thumbnail_download_link_vimeo(video_id_from_shortcode): """Thumbnail URL generator for Vimeo videos.""" # Following the Vimeo API at https://developer.vimeo.com/api#video-request, we need to request the video's metadata and get the thumbnail from that. First, then, we'll get the metadata in JSON format, and then will parse it to find the thumbnail URL. video_metadata = urlopen("https://vimeo.com/api/v2/video/" + str(video_id_from_shortcode) + ".json").read() # Download the video's metadata in JSON format. video_metadata_parsed = json.loads(video_metadata.decode('utf-8')) # Parse the JSON video_thumbnail_large_location = video_metadata_parsed[0]['thumbnail_large'] # Go into the JSON and get the URL of the thumbnail. return video_thumbnail_large_location
python
{ "resource": "" }
q24289
IdenticonRendererBase.render
train
def render(self, size): """ render identicon to PIL.Image @param size identicon patchsize. (image size is 3 * [size]) @return PIL.Image """ # decode the code middle, corner, side, foreColor, backColor = self.decode(self.code) size = int(size) # make image image = Image.new("RGB", (size * 3, size * 3)) draw = ImageDraw.Draw(image) # fill background draw.rectangle((0, 0, image.size[0], image.size[1]), fill=0) kwds = { 'draw': draw, 'size': size, 'foreColor': foreColor, 'backColor': backColor} # middle patch self.drawPatch((1, 1), middle[2], middle[1], middle[0], **kwds) # side patch kwds['type'] = side[0] for i in range(4): pos = [(1, 0), (2, 1), (1, 2), (0, 1)][i] self.drawPatch(pos, side[2] + 1 + i, side[1], **kwds) # corner patch kwds['type'] = corner[0] for i in range(4): pos = [(0, 0), (2, 0), (2, 2), (0, 2)][i] self.drawPatch(pos, corner[2] + 1 + i, corner[1], **kwds) return image
python
{ "resource": "" }
q24290
make_posts
train
def make_posts(generator, metadata, url): """ Make posts on reddit if it's not a draft, on whatever subs are specified """ reddit = generator.get_reddit() title = lxml.html.fromstring(metadata['title']).text_content() if reddit is None: log.info("Reddit plugin not enabled") return if metadata.get('status') == "draft": # people don't want to post drafts log.debug("ignoring draft %s" % title) return collection = generator.settings['REDDIT_POSTER_COLLECT_SUB'] sub = reddit.subreddit(collection) results = sub.search(title) if len([result for result in results]) > 0: log.debug("ignoring %s because it is already on sub %s " % (title, collection)) # post already was made to this sub return try: submission = sub.submit(title, url=url, resubmit=False) cross_post(reddit, submission, metadata.get('subreddit')) except praw.exceptions.APIException as e: log.error("got an api exception: %s", e) except AssertionError as e: log.error("Received an assertion error %s", e)
python
{ "resource": "" }
q24291
init_reddit
train
def init_reddit(generator): """ this is a hack to make sure the reddit object keeps track of a session trough article scanning, speeding up networking as the connection can be kept alive. """ auth_dict = generator.settings.get('REDDIT_POSTER_AUTH') if auth_dict is None: log.info("Could not find REDDIT_POSTER_AUTH key in settings, reddit plugin won't function") generator.get_reddit = lambda: None return reddit = praw.Reddit(**auth_dict) generator.get_reddit = lambda: reddit
python
{ "resource": "" }
q24292
ReduceOpacity
train
def ReduceOpacity(im, opacity): """Reduces Opacity. Returns an image with reduced opacity. Taken from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/362879 """ assert opacity >= 0 and opacity <= 1 if isalpha(im): im = im.copy() else: im = im.convert('RGBA') alpha = im.split()[3] alpha = ImageEnhance.Brightness(alpha).enhance(opacity) im.putalpha(alpha) return im
python
{ "resource": "" }
q24293
detect_images_and_galleries
train
def detect_images_and_galleries(generators): """Runs generator on both pages and articles.""" for generator in generators: if isinstance(generator, ArticlesGenerator): for article in itertools.chain(generator.articles, generator.translations, generator.drafts): detect_image(generator, article) detect_gallery(generator, article) elif isinstance(generator, PagesGenerator): for page in itertools.chain(generator.pages, generator.translations, generator.hidden_pages): detect_image(generator, page) detect_gallery(generator, page)
python
{ "resource": "" }
q24294
add_jinja2_ext
train
def add_jinja2_ext(pelican): """Add Webassets to Jinja2 extensions in Pelican settings.""" if 'JINJA_ENVIRONMENT' in pelican.settings: # pelican 3.7+ pelican.settings['JINJA_ENVIRONMENT']['extensions'].append(AssetsExtension) else: pelican.settings['JINJA_EXTENSIONS'].append(AssetsExtension)
python
{ "resource": "" }
q24295
create_assets_env
train
def create_assets_env(generator): """Define the assets environment and pass it to the generator.""" theme_static_dir = generator.settings['THEME_STATIC_DIR'] assets_destination = os.path.join(generator.output_path, theme_static_dir) generator.env.assets_environment = Environment( assets_destination, theme_static_dir) if 'ASSET_CONFIG' in generator.settings: for item in generator.settings['ASSET_CONFIG']: generator.env.assets_environment.config[item[0]] = item[1] if 'ASSET_BUNDLES' in generator.settings: for name, args, kwargs in generator.settings['ASSET_BUNDLES']: generator.env.assets_environment.register(name, *args, **kwargs) if 'ASSET_DEBUG' in generator.settings: generator.env.assets_environment.debug = generator.settings['ASSET_DEBUG'] elif logging.getLevelName(logger.getEffectiveLevel()) == "DEBUG": generator.env.assets_environment.debug = True for path in (generator.settings['THEME_STATIC_PATHS'] + generator.settings.get('ASSET_SOURCE_PATHS', [])): full_path = os.path.join(generator.theme, path) generator.env.assets_environment.append_path(full_path)
python
{ "resource": "" }
q24296
run_pygal
train
def run_pygal(data, options=[], format='svg'): """ Runs pygal programs and returns image data """ import pygal chart_title = data.get('title', None) chart_type = data.get('type', '').lower() # Config options are pretty much proxied straight through from the JSON dict into the object config = pygal.Config() config_dict = data.get('config', {}) for key in config_dict.keys(): setattr(config, key, config_dict[key]) if chart_type == 'bar': chart = pygal.HorizontalBar(config) if data.get('horizontal', False) else pygal.Bar(config) elif chart_type == 'line': chart = pygal.Line(config) elif chart_type == 'pie': ir=data.get('inner_radius', 0.0) hp=data.get('half_pie', False) chart = pygal.Pie(config, inner_radius=ir, half_pie=hp) else: print('undefined or unknown chart type') if chart is not None: chart.title = data.get('title', None) # Do labels (if present) label_data = data.get('x-labels', None) if isinstance(label_data, list): # use list chart.x_labels = label_data elif isinstance(label_data, dict): # use a range range_from = label_data.get('from', 0) range_to = label_data.get('to', 0) chart.x_labels = map(str, range(range_from, range_to)) # insert data for data_set in data.get('data', []): title = data_set.get('title', None) values = data_set.get('values', None) chart.add(title, values) # now render result = chart.render_data_uri() else: result = None return result
python
{ "resource": "" }
q24297
pygal_parser
train
def pygal_parser(preprocessor, tag, markup): """ Simple pygal parser """ # Find JSON payload data = loads(markup) if tag == 'pygal' and data is not None: # Run generation of chart output = run_pygal(data) # Return embedded SVG image return '<div class="pygal" style="text-align: center;"><embed type="image/svg+xml" src=%s style="max-width:1000px"/></div>' % output else: raise ValueError('Error processing input. \nExpected syntax: {0}'.format(SYNTAX))
python
{ "resource": "" }
q24298
resize_thumbnails
train
def resize_thumbnails(pelican): """ Resize a directory tree full of images into thumbnails :param pelican: The pelican instance :return: None """ global enabled if not enabled: return in_path = _image_path(pelican) include_regex = pelican.settings.get('THUMBNAIL_INCLUDE_REGEX') if include_regex: pattern = re.compile(include_regex) is_included = lambda name: pattern.match(name) else: is_included = lambda name: not name.startswith('.') sizes = pelican.settings.get('THUMBNAIL_SIZES', DEFAULT_THUMBNAIL_SIZES) resizers = dict((k, _resizer(k, v, in_path)) for k,v in sizes.items()) logger.debug("Thumbnailer Started") for dirpath, _, filenames in os.walk(in_path): for filename in filenames: if is_included(filename): for name, resizer in resizers.items(): in_filename = path.join(dirpath, filename) out_path = get_out_path(pelican, in_path, in_filename, name) resizer.resize_file_to( in_filename, out_path, pelican.settings.get('THUMBNAIL_KEEP_NAME'))
python
{ "resource": "" }
q24299
expand_gallery
train
def expand_gallery(generator, metadata): """ Expand a gallery tag to include all of the files in a specific directory under IMAGE_PATH :param pelican: The pelican instance :return: None """ if "gallery" not in metadata or metadata['gallery'] is None: return # If no gallery specified, we do nothing lines = [ ] base_path = _image_path(generator) in_path = path.join(base_path, metadata['gallery']) template = generator.settings.get('GALLERY_TEMPLATE', DEFAULT_TEMPLATE) thumbnail_name = generator.settings.get("GALLERY_THUMBNAIL", DEFAULT_GALLERY_THUMB) thumbnail_prefix = generator.settings.get("") resizer = _resizer(thumbnail_name, '?x?', base_path) for dirpath, _, filenames in os.walk(in_path): for filename in filenames: if not filename.startswith('.'): url = path.join(dirpath, filename).replace(base_path, "")[1:] url = path.join('/static', generator.settings.get('IMAGE_PATH', DEFAULT_IMAGE_DIR), url).replace('\\', '/') logger.debug("GALLERY: {0}".format(url)) thumbnail = resizer.get_thumbnail_name(filename) thumbnail = path.join('/', generator.settings.get('THUMBNAIL_DIR', DEFAULT_THUMBNAIL_DIR), thumbnail).replace('\\', '/') lines.append(template.format( filename=filename, url=url, thumbnail=thumbnail, )) metadata['gallery_content'] = "\n".join(lines)
python
{ "resource": "" }