repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
pwwang/liquidpy
liquid/__init__.py
Liquid.split
def split (s, delimter, trim = True, limit = 0): # pragma: no cover """ Split a string using a single-character delimter @params: `s`: the string `delimter`: the single-character delimter `trim`: whether to trim each part. Default: True @examples: ```python ret = split("'a,b',c", ",") # ret == ["'a,b'", "c"] # ',' inside quotes will be recognized. ``` @returns: The list of substrings """ ret = [] special1 = ['(', ')', '[', ']', '{', '}'] special2 = ['\'', '"'] special3 = '\\' flags1 = [0, 0, 0] flags2 = [False, False] flags3 = False start = 0 nlim = 0 for i, c in enumerate(s): if c == special3: # next char is escaped flags3 = not flags3 elif not flags3: # no escape if c in special1: index = special1.index(c) if index % 2 == 0: flags1[int(index/2)] += 1 else: flags1[int(index/2)] -= 1 elif c in special2: index = special2.index(c) flags2[index] = not flags2[index] elif c == delimter and not any(flags1) and not any(flags2): r = s[start:i] if trim: r = r.strip() ret.append(r) start = i + 1 nlim = nlim + 1 if limit and nlim >= limit: break else: # escaping closed flags3 = False r = s[start:] if trim: r = r.strip() ret.append(r) return ret
python
def split (s, delimter, trim = True, limit = 0): # pragma: no cover """ Split a string using a single-character delimter @params: `s`: the string `delimter`: the single-character delimter `trim`: whether to trim each part. Default: True @examples: ```python ret = split("'a,b',c", ",") # ret == ["'a,b'", "c"] # ',' inside quotes will be recognized. ``` @returns: The list of substrings """ ret = [] special1 = ['(', ')', '[', ']', '{', '}'] special2 = ['\'', '"'] special3 = '\\' flags1 = [0, 0, 0] flags2 = [False, False] flags3 = False start = 0 nlim = 0 for i, c in enumerate(s): if c == special3: # next char is escaped flags3 = not flags3 elif not flags3: # no escape if c in special1: index = special1.index(c) if index % 2 == 0: flags1[int(index/2)] += 1 else: flags1[int(index/2)] -= 1 elif c in special2: index = special2.index(c) flags2[index] = not flags2[index] elif c == delimter and not any(flags1) and not any(flags2): r = s[start:i] if trim: r = r.strip() ret.append(r) start = i + 1 nlim = nlim + 1 if limit and nlim >= limit: break else: # escaping closed flags3 = False r = s[start:] if trim: r = r.strip() ret.append(r) return ret
[ "def", "split", "(", "s", ",", "delimter", ",", "trim", "=", "True", ",", "limit", "=", "0", ")", ":", "# pragma: no cover", "ret", "=", "[", "]", "special1", "=", "[", "'('", ",", "')'", ",", "'['", ",", "']'", ",", "'{'", ",", "'}'", "]", "special2", "=", "[", "'\\''", ",", "'\"'", "]", "special3", "=", "'\\\\'", "flags1", "=", "[", "0", ",", "0", ",", "0", "]", "flags2", "=", "[", "False", ",", "False", "]", "flags3", "=", "False", "start", "=", "0", "nlim", "=", "0", "for", "i", ",", "c", "in", "enumerate", "(", "s", ")", ":", "if", "c", "==", "special3", ":", "# next char is escaped", "flags3", "=", "not", "flags3", "elif", "not", "flags3", ":", "# no escape", "if", "c", "in", "special1", ":", "index", "=", "special1", ".", "index", "(", "c", ")", "if", "index", "%", "2", "==", "0", ":", "flags1", "[", "int", "(", "index", "/", "2", ")", "]", "+=", "1", "else", ":", "flags1", "[", "int", "(", "index", "/", "2", ")", "]", "-=", "1", "elif", "c", "in", "special2", ":", "index", "=", "special2", ".", "index", "(", "c", ")", "flags2", "[", "index", "]", "=", "not", "flags2", "[", "index", "]", "elif", "c", "==", "delimter", "and", "not", "any", "(", "flags1", ")", "and", "not", "any", "(", "flags2", ")", ":", "r", "=", "s", "[", "start", ":", "i", "]", "if", "trim", ":", "r", "=", "r", ".", "strip", "(", ")", "ret", ".", "append", "(", "r", ")", "start", "=", "i", "+", "1", "nlim", "=", "nlim", "+", "1", "if", "limit", "and", "nlim", ">=", "limit", ":", "break", "else", ":", "# escaping closed", "flags3", "=", "False", "r", "=", "s", "[", "start", ":", "]", "if", "trim", ":", "r", "=", "r", ".", "strip", "(", ")", "ret", ".", "append", "(", "r", ")", "return", "ret" ]
Split a string using a single-character delimter @params: `s`: the string `delimter`: the single-character delimter `trim`: whether to trim each part. Default: True @examples: ```python ret = split("'a,b',c", ",") # ret == ["'a,b'", "c"] # ',' inside quotes will be recognized. ``` @returns: The list of substrings
[ "Split", "a", "string", "using", "a", "single", "-", "character", "delimter" ]
train
https://github.com/pwwang/liquidpy/blob/f422af836740b7facfbc6b89e5162a17d619dd07/liquid/__init__.py#L367-L421
pwwang/liquidpy
liquid/__init__.py
Liquid.render
def render(self, **context): """ Render this template by applying it to `context`. @params: `context`: a dictionary of values to use in this rendering. @returns: The rendered string """ # Make the complete context we'll use. localns = self.envs.copy() localns.update(context) try: exec(str(self.code), None, localns) return localns[Liquid.COMPLIED_RENDERED_STR] except Exception: stacks = list(reversed(traceback.format_exc().splitlines())) for stack in stacks: stack = stack.strip() if stack.startswith('File "<string>"'): lineno = int(stack.split(', ')[1].split()[-1]) source = [] if 'NameError:' in stacks[0]: source.append('Do you forget to provide the data?') import math source.append('\nCompiled source (use debug mode to see full source):') source.append('---------------------------------------------------') nlines = len(self.code.codes) nbit = int(math.log(nlines, 10)) + 3 for i, line in enumerate(self.code.codes): if i - 7 > lineno or i + 9 < lineno: continue if i + 1 != lineno: source.append(' ' + (str(i+1) + '.').ljust(nbit) + str(line).rstrip()) else: source.append('* ' + (str(i+1) + '.').ljust(nbit) + str(line).rstrip()) raise LiquidRenderError( stacks[0], repr(self.code.codes[lineno - 1]) + '\n' + '\n'.join(source) + '\n\nPREVIOUS EXCEPTION:\n------------------\n' + '\n'.join(stacks) + '\n' + '\nCONTEXT:\n------------------\n' + '\n'.join( ' ' + key + ': ' + str(val) for key, val in localns.items() if not key.startswith('_liquid_') and not key.startswith('__') ) + '\n' ) raise
python
def render(self, **context): """ Render this template by applying it to `context`. @params: `context`: a dictionary of values to use in this rendering. @returns: The rendered string """ # Make the complete context we'll use. localns = self.envs.copy() localns.update(context) try: exec(str(self.code), None, localns) return localns[Liquid.COMPLIED_RENDERED_STR] except Exception: stacks = list(reversed(traceback.format_exc().splitlines())) for stack in stacks: stack = stack.strip() if stack.startswith('File "<string>"'): lineno = int(stack.split(', ')[1].split()[-1]) source = [] if 'NameError:' in stacks[0]: source.append('Do you forget to provide the data?') import math source.append('\nCompiled source (use debug mode to see full source):') source.append('---------------------------------------------------') nlines = len(self.code.codes) nbit = int(math.log(nlines, 10)) + 3 for i, line in enumerate(self.code.codes): if i - 7 > lineno or i + 9 < lineno: continue if i + 1 != lineno: source.append(' ' + (str(i+1) + '.').ljust(nbit) + str(line).rstrip()) else: source.append('* ' + (str(i+1) + '.').ljust(nbit) + str(line).rstrip()) raise LiquidRenderError( stacks[0], repr(self.code.codes[lineno - 1]) + '\n' + '\n'.join(source) + '\n\nPREVIOUS EXCEPTION:\n------------------\n' + '\n'.join(stacks) + '\n' + '\nCONTEXT:\n------------------\n' + '\n'.join( ' ' + key + ': ' + str(val) for key, val in localns.items() if not key.startswith('_liquid_') and not key.startswith('__') ) + '\n' ) raise
[ "def", "render", "(", "self", ",", "*", "*", "context", ")", ":", "# Make the complete context we'll use.", "localns", "=", "self", ".", "envs", ".", "copy", "(", ")", "localns", ".", "update", "(", "context", ")", "try", ":", "exec", "(", "str", "(", "self", ".", "code", ")", ",", "None", ",", "localns", ")", "return", "localns", "[", "Liquid", ".", "COMPLIED_RENDERED_STR", "]", "except", "Exception", ":", "stacks", "=", "list", "(", "reversed", "(", "traceback", ".", "format_exc", "(", ")", ".", "splitlines", "(", ")", ")", ")", "for", "stack", "in", "stacks", ":", "stack", "=", "stack", ".", "strip", "(", ")", "if", "stack", ".", "startswith", "(", "'File \"<string>\"'", ")", ":", "lineno", "=", "int", "(", "stack", ".", "split", "(", "', '", ")", "[", "1", "]", ".", "split", "(", ")", "[", "-", "1", "]", ")", "source", "=", "[", "]", "if", "'NameError:'", "in", "stacks", "[", "0", "]", ":", "source", ".", "append", "(", "'Do you forget to provide the data?'", ")", "import", "math", "source", ".", "append", "(", "'\\nCompiled source (use debug mode to see full source):'", ")", "source", ".", "append", "(", "'---------------------------------------------------'", ")", "nlines", "=", "len", "(", "self", ".", "code", ".", "codes", ")", "nbit", "=", "int", "(", "math", ".", "log", "(", "nlines", ",", "10", ")", ")", "+", "3", "for", "i", ",", "line", "in", "enumerate", "(", "self", ".", "code", ".", "codes", ")", ":", "if", "i", "-", "7", ">", "lineno", "or", "i", "+", "9", "<", "lineno", ":", "continue", "if", "i", "+", "1", "!=", "lineno", ":", "source", ".", "append", "(", "' '", "+", "(", "str", "(", "i", "+", "1", ")", "+", "'.'", ")", ".", "ljust", "(", "nbit", ")", "+", "str", "(", "line", ")", ".", "rstrip", "(", ")", ")", "else", ":", "source", ".", "append", "(", "'* '", "+", "(", "str", "(", "i", "+", "1", ")", "+", "'.'", ")", ".", "ljust", "(", "nbit", ")", "+", "str", "(", "line", ")", ".", "rstrip", "(", ")", ")", "raise", "LiquidRenderError", "(", "stacks", "[", "0", "]", ",", "repr", "(", "self", ".", "code", ".", "codes", "[", "lineno", "-", "1", "]", ")", "+", "'\\n'", "+", "'\\n'", ".", "join", "(", "source", ")", "+", "'\\n\\nPREVIOUS EXCEPTION:\\n------------------\\n'", "+", "'\\n'", ".", "join", "(", "stacks", ")", "+", "'\\n'", "+", "'\\nCONTEXT:\\n------------------\\n'", "+", "'\\n'", ".", "join", "(", "' '", "+", "key", "+", "': '", "+", "str", "(", "val", ")", "for", "key", ",", "val", "in", "localns", ".", "items", "(", ")", "if", "not", "key", ".", "startswith", "(", "'_liquid_'", ")", "and", "not", "key", ".", "startswith", "(", "'__'", ")", ")", "+", "'\\n'", ")", "raise" ]
Render this template by applying it to `context`. @params: `context`: a dictionary of values to use in this rendering. @returns: The rendered string
[ "Render", "this", "template", "by", "applying", "it", "to", "context", "." ]
train
https://github.com/pwwang/liquidpy/blob/f422af836740b7facfbc6b89e5162a17d619dd07/liquid/__init__.py#L575-L624
pwwang/liquidpy
liquid/builder.py
LiquidCode.addLine
def addLine(self, line): """ Add a line of source to the code. Indentation and newline will be added for you, don't provide them. @params: `line`: The line to add """ if not isinstance(line, LiquidLine): line = LiquidLine(line) line.ndent = self.ndent self.codes.append(line)
python
def addLine(self, line): """ Add a line of source to the code. Indentation and newline will be added for you, don't provide them. @params: `line`: The line to add """ if not isinstance(line, LiquidLine): line = LiquidLine(line) line.ndent = self.ndent self.codes.append(line)
[ "def", "addLine", "(", "self", ",", "line", ")", ":", "if", "not", "isinstance", "(", "line", ",", "LiquidLine", ")", ":", "line", "=", "LiquidLine", "(", "line", ")", "line", ".", "ndent", "=", "self", ".", "ndent", "self", ".", "codes", ".", "append", "(", "line", ")" ]
Add a line of source to the code. Indentation and newline will be added for you, don't provide them. @params: `line`: The line to add
[ "Add", "a", "line", "of", "source", "to", "the", "code", ".", "Indentation", "and", "newline", "will", "be", "added", "for", "you", "don", "t", "provide", "them", "." ]
train
https://github.com/pwwang/liquidpy/blob/f422af836740b7facfbc6b89e5162a17d619dd07/liquid/builder.py#L57-L67
ManufacturaInd/python-zenlog
zenlog/__init__.py
Log.level
def level(self, lvl=None): '''Get or set the logging level.''' if not lvl: return self._lvl self._lvl = self._parse_level(lvl) self.stream.setLevel(self._lvl) logging.root.setLevel(self._lvl)
python
def level(self, lvl=None): '''Get or set the logging level.''' if not lvl: return self._lvl self._lvl = self._parse_level(lvl) self.stream.setLevel(self._lvl) logging.root.setLevel(self._lvl)
[ "def", "level", "(", "self", ",", "lvl", "=", "None", ")", ":", "if", "not", "lvl", ":", "return", "self", ".", "_lvl", "self", ".", "_lvl", "=", "self", ".", "_parse_level", "(", "lvl", ")", "self", ".", "stream", ".", "setLevel", "(", "self", ".", "_lvl", ")", "logging", ".", "root", ".", "setLevel", "(", "self", ".", "_lvl", ")" ]
Get or set the logging level.
[ "Get", "or", "set", "the", "logging", "level", "." ]
train
https://github.com/ManufacturaInd/python-zenlog/blob/8f4ddf281287c99e286b78ef2d0d949a7172ba4c/zenlog/__init__.py#L90-L96
mirumee/django-prices-openexchangerates
django_prices_openexchangerates/__init__.py
get_rate_from_db
def get_rate_from_db(currency: str) -> Decimal: """ Fetch currency conversion rate from the database """ from .models import ConversionRate try: rate = ConversionRate.objects.get_rate(currency) except ConversionRate.DoesNotExist: # noqa raise ValueError('No conversion rate for %s' % (currency, )) return rate.rate
python
def get_rate_from_db(currency: str) -> Decimal: """ Fetch currency conversion rate from the database """ from .models import ConversionRate try: rate = ConversionRate.objects.get_rate(currency) except ConversionRate.DoesNotExist: # noqa raise ValueError('No conversion rate for %s' % (currency, )) return rate.rate
[ "def", "get_rate_from_db", "(", "currency", ":", "str", ")", "->", "Decimal", ":", "from", ".", "models", "import", "ConversionRate", "try", ":", "rate", "=", "ConversionRate", ".", "objects", ".", "get_rate", "(", "currency", ")", "except", "ConversionRate", ".", "DoesNotExist", ":", "# noqa", "raise", "ValueError", "(", "'No conversion rate for %s'", "%", "(", "currency", ",", ")", ")", "return", "rate", ".", "rate" ]
Fetch currency conversion rate from the database
[ "Fetch", "currency", "conversion", "rate", "from", "the", "database" ]
train
https://github.com/mirumee/django-prices-openexchangerates/blob/3f633ad20f62dce03c526e9af93335f2b6b1a950/django_prices_openexchangerates/__init__.py#L15-L24
mirumee/django-prices-openexchangerates
django_prices_openexchangerates/__init__.py
get_conversion_rate
def get_conversion_rate(from_currency: str, to_currency: str) -> Decimal: """ Get conversion rate to use in exchange """ reverse_rate = False if to_currency == BASE_CURRENCY: # Fetch exchange rate for base currency and use 1 / rate for conversion rate_currency = from_currency reverse_rate = True else: rate_currency = to_currency rate = get_rate_from_db(rate_currency) if reverse_rate: conversion_rate = Decimal(1) / rate else: conversion_rate = rate return conversion_rate
python
def get_conversion_rate(from_currency: str, to_currency: str) -> Decimal: """ Get conversion rate to use in exchange """ reverse_rate = False if to_currency == BASE_CURRENCY: # Fetch exchange rate for base currency and use 1 / rate for conversion rate_currency = from_currency reverse_rate = True else: rate_currency = to_currency rate = get_rate_from_db(rate_currency) if reverse_rate: conversion_rate = Decimal(1) / rate else: conversion_rate = rate return conversion_rate
[ "def", "get_conversion_rate", "(", "from_currency", ":", "str", ",", "to_currency", ":", "str", ")", "->", "Decimal", ":", "reverse_rate", "=", "False", "if", "to_currency", "==", "BASE_CURRENCY", ":", "# Fetch exchange rate for base currency and use 1 / rate for conversion", "rate_currency", "=", "from_currency", "reverse_rate", "=", "True", "else", ":", "rate_currency", "=", "to_currency", "rate", "=", "get_rate_from_db", "(", "rate_currency", ")", "if", "reverse_rate", ":", "conversion_rate", "=", "Decimal", "(", "1", ")", "/", "rate", "else", ":", "conversion_rate", "=", "rate", "return", "conversion_rate" ]
Get conversion rate to use in exchange
[ "Get", "conversion", "rate", "to", "use", "in", "exchange" ]
train
https://github.com/mirumee/django-prices-openexchangerates/blob/3f633ad20f62dce03c526e9af93335f2b6b1a950/django_prices_openexchangerates/__init__.py#L27-L44
mirumee/django-prices-openexchangerates
django_prices_openexchangerates/__init__.py
exchange_currency
def exchange_currency( base: T, to_currency: str, *, conversion_rate: Decimal=None) -> T: """ Exchanges Money, TaxedMoney and their ranges to the specified currency. get_rate parameter is a callable taking single argument (target currency) that returns proper conversion rate """ if base.currency == to_currency: return base if base.currency != BASE_CURRENCY and to_currency != BASE_CURRENCY: # Exchange to base currency first base = exchange_currency(base, BASE_CURRENCY) if conversion_rate is None: conversion_rate = get_conversion_rate(base.currency, to_currency) if isinstance(base, Money): return Money(base.amount * conversion_rate, currency=to_currency) if isinstance(base, MoneyRange): return MoneyRange( exchange_currency( base.start, to_currency, conversion_rate=conversion_rate), exchange_currency( base.stop, to_currency, conversion_rate=conversion_rate)) if isinstance(base, TaxedMoney): return TaxedMoney( exchange_currency( base.net, to_currency, conversion_rate=conversion_rate), exchange_currency( base.gross, to_currency, conversion_rate=conversion_rate)) if isinstance(base, TaxedMoneyRange): return TaxedMoneyRange( exchange_currency( base.start, to_currency, conversion_rate=conversion_rate), exchange_currency( base.stop, to_currency, conversion_rate=conversion_rate)) # base.currency was set but we don't know how to exchange given type raise TypeError('Unknown base for exchange_currency: %r' % (base,))
python
def exchange_currency( base: T, to_currency: str, *, conversion_rate: Decimal=None) -> T: """ Exchanges Money, TaxedMoney and their ranges to the specified currency. get_rate parameter is a callable taking single argument (target currency) that returns proper conversion rate """ if base.currency == to_currency: return base if base.currency != BASE_CURRENCY and to_currency != BASE_CURRENCY: # Exchange to base currency first base = exchange_currency(base, BASE_CURRENCY) if conversion_rate is None: conversion_rate = get_conversion_rate(base.currency, to_currency) if isinstance(base, Money): return Money(base.amount * conversion_rate, currency=to_currency) if isinstance(base, MoneyRange): return MoneyRange( exchange_currency( base.start, to_currency, conversion_rate=conversion_rate), exchange_currency( base.stop, to_currency, conversion_rate=conversion_rate)) if isinstance(base, TaxedMoney): return TaxedMoney( exchange_currency( base.net, to_currency, conversion_rate=conversion_rate), exchange_currency( base.gross, to_currency, conversion_rate=conversion_rate)) if isinstance(base, TaxedMoneyRange): return TaxedMoneyRange( exchange_currency( base.start, to_currency, conversion_rate=conversion_rate), exchange_currency( base.stop, to_currency, conversion_rate=conversion_rate)) # base.currency was set but we don't know how to exchange given type raise TypeError('Unknown base for exchange_currency: %r' % (base,))
[ "def", "exchange_currency", "(", "base", ":", "T", ",", "to_currency", ":", "str", ",", "*", ",", "conversion_rate", ":", "Decimal", "=", "None", ")", "->", "T", ":", "if", "base", ".", "currency", "==", "to_currency", ":", "return", "base", "if", "base", ".", "currency", "!=", "BASE_CURRENCY", "and", "to_currency", "!=", "BASE_CURRENCY", ":", "# Exchange to base currency first", "base", "=", "exchange_currency", "(", "base", ",", "BASE_CURRENCY", ")", "if", "conversion_rate", "is", "None", ":", "conversion_rate", "=", "get_conversion_rate", "(", "base", ".", "currency", ",", "to_currency", ")", "if", "isinstance", "(", "base", ",", "Money", ")", ":", "return", "Money", "(", "base", ".", "amount", "*", "conversion_rate", ",", "currency", "=", "to_currency", ")", "if", "isinstance", "(", "base", ",", "MoneyRange", ")", ":", "return", "MoneyRange", "(", "exchange_currency", "(", "base", ".", "start", ",", "to_currency", ",", "conversion_rate", "=", "conversion_rate", ")", ",", "exchange_currency", "(", "base", ".", "stop", ",", "to_currency", ",", "conversion_rate", "=", "conversion_rate", ")", ")", "if", "isinstance", "(", "base", ",", "TaxedMoney", ")", ":", "return", "TaxedMoney", "(", "exchange_currency", "(", "base", ".", "net", ",", "to_currency", ",", "conversion_rate", "=", "conversion_rate", ")", ",", "exchange_currency", "(", "base", ".", "gross", ",", "to_currency", ",", "conversion_rate", "=", "conversion_rate", ")", ")", "if", "isinstance", "(", "base", ",", "TaxedMoneyRange", ")", ":", "return", "TaxedMoneyRange", "(", "exchange_currency", "(", "base", ".", "start", ",", "to_currency", ",", "conversion_rate", "=", "conversion_rate", ")", ",", "exchange_currency", "(", "base", ".", "stop", ",", "to_currency", ",", "conversion_rate", "=", "conversion_rate", ")", ")", "# base.currency was set but we don't know how to exchange given type", "raise", "TypeError", "(", "'Unknown base for exchange_currency: %r'", "%", "(", "base", ",", ")", ")" ]
Exchanges Money, TaxedMoney and their ranges to the specified currency. get_rate parameter is a callable taking single argument (target currency) that returns proper conversion rate
[ "Exchanges", "Money", "TaxedMoney", "and", "their", "ranges", "to", "the", "specified", "currency", ".", "get_rate", "parameter", "is", "a", "callable", "taking", "single", "argument", "(", "target", "currency", ")", "that", "returns", "proper", "conversion", "rate" ]
train
https://github.com/mirumee/django-prices-openexchangerates/blob/3f633ad20f62dce03c526e9af93335f2b6b1a950/django_prices_openexchangerates/__init__.py#L47-L85
mdredze/carmen-python
carmen/resolvers/profile.py
normalize
def normalize(location_name, preserve_commas=False): """Normalize *location_name* by stripping punctuation and collapsing runs of whitespace, and return the normalized name.""" def replace(match): if preserve_commas and ',' in match.group(0): return ',' return ' ' return NORMALIZATION_RE.sub(replace, location_name).strip().lower()
python
def normalize(location_name, preserve_commas=False): """Normalize *location_name* by stripping punctuation and collapsing runs of whitespace, and return the normalized name.""" def replace(match): if preserve_commas and ',' in match.group(0): return ',' return ' ' return NORMALIZATION_RE.sub(replace, location_name).strip().lower()
[ "def", "normalize", "(", "location_name", ",", "preserve_commas", "=", "False", ")", ":", "def", "replace", "(", "match", ")", ":", "if", "preserve_commas", "and", "','", "in", "match", ".", "group", "(", "0", ")", ":", "return", "','", "return", "' '", "return", "NORMALIZATION_RE", ".", "sub", "(", "replace", ",", "location_name", ")", ".", "strip", "(", ")", ".", "lower", "(", ")" ]
Normalize *location_name* by stripping punctuation and collapsing runs of whitespace, and return the normalized name.
[ "Normalize", "*", "location_name", "*", "by", "stripping", "punctuation", "and", "collapsing", "runs", "of", "whitespace", "and", "return", "the", "normalized", "name", "." ]
train
https://github.com/mdredze/carmen-python/blob/070b974222b5407f7aae2518ffbdf9df198b8e96/carmen/resolvers/profile.py#L15-L22
kristianfoerster/melodist
melodist/stationstatistics.py
StationStatistics.calc_precipitation_stats
def calc_precipitation_stats(self, months=None, avg_stats=True, percentile=50): """ Calculates precipitation statistics for the cascade model while aggregating hourly observations Parameters ---------- months : Months for each seasons to be used for statistics (array of numpy array, default=1-12, e.g., [np.arange(12) + 1]) avg_stats : average statistics for all levels True/False (default=True) percentile : percentil for splitting the dataset in small and high intensities (default=50) """ if months is None: months = [np.arange(12) + 1] self.precip.months = months self.precip.stats = melodist.build_casc(self.data, months=months, avg_stats=avg_stats, percentile=percentile)
python
def calc_precipitation_stats(self, months=None, avg_stats=True, percentile=50): """ Calculates precipitation statistics for the cascade model while aggregating hourly observations Parameters ---------- months : Months for each seasons to be used for statistics (array of numpy array, default=1-12, e.g., [np.arange(12) + 1]) avg_stats : average statistics for all levels True/False (default=True) percentile : percentil for splitting the dataset in small and high intensities (default=50) """ if months is None: months = [np.arange(12) + 1] self.precip.months = months self.precip.stats = melodist.build_casc(self.data, months=months, avg_stats=avg_stats, percentile=percentile)
[ "def", "calc_precipitation_stats", "(", "self", ",", "months", "=", "None", ",", "avg_stats", "=", "True", ",", "percentile", "=", "50", ")", ":", "if", "months", "is", "None", ":", "months", "=", "[", "np", ".", "arange", "(", "12", ")", "+", "1", "]", "self", ".", "precip", ".", "months", "=", "months", "self", ".", "precip", ".", "stats", "=", "melodist", ".", "build_casc", "(", "self", ".", "data", ",", "months", "=", "months", ",", "avg_stats", "=", "avg_stats", ",", "percentile", "=", "percentile", ")" ]
Calculates precipitation statistics for the cascade model while aggregating hourly observations Parameters ---------- months : Months for each seasons to be used for statistics (array of numpy array, default=1-12, e.g., [np.arange(12) + 1]) avg_stats : average statistics for all levels True/False (default=True) percentile : percentil for splitting the dataset in small and high intensities (default=50)
[ "Calculates", "precipitation", "statistics", "for", "the", "cascade", "model", "while", "aggregating", "hourly", "observations" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/stationstatistics.py#L77-L92
kristianfoerster/melodist
melodist/stationstatistics.py
StationStatistics.calc_wind_stats
def calc_wind_stats(self): """ Calculates statistics in order to derive diurnal patterns of wind speed """ a, b, t_shift = melodist.fit_cosine_function(self.data.wind) self.wind.update(a=a, b=b, t_shift=t_shift)
python
def calc_wind_stats(self): """ Calculates statistics in order to derive diurnal patterns of wind speed """ a, b, t_shift = melodist.fit_cosine_function(self.data.wind) self.wind.update(a=a, b=b, t_shift=t_shift)
[ "def", "calc_wind_stats", "(", "self", ")", ":", "a", ",", "b", ",", "t_shift", "=", "melodist", ".", "fit_cosine_function", "(", "self", ".", "data", ".", "wind", ")", "self", ".", "wind", ".", "update", "(", "a", "=", "a", ",", "b", "=", "b", ",", "t_shift", "=", "t_shift", ")" ]
Calculates statistics in order to derive diurnal patterns of wind speed
[ "Calculates", "statistics", "in", "order", "to", "derive", "diurnal", "patterns", "of", "wind", "speed" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/stationstatistics.py#L94-L99
kristianfoerster/melodist
melodist/stationstatistics.py
StationStatistics.calc_humidity_stats
def calc_humidity_stats(self): """ Calculates statistics in order to derive diurnal patterns of relative humidity. """ a1, a0 = melodist.calculate_dewpoint_regression(self.data, return_stats=False) self.hum.update(a0=a0, a1=a1) self.hum.kr = 12 self.hum.month_hour_precip_mean = melodist.calculate_month_hour_precip_mean(self.data)
python
def calc_humidity_stats(self): """ Calculates statistics in order to derive diurnal patterns of relative humidity. """ a1, a0 = melodist.calculate_dewpoint_regression(self.data, return_stats=False) self.hum.update(a0=a0, a1=a1) self.hum.kr = 12 self.hum.month_hour_precip_mean = melodist.calculate_month_hour_precip_mean(self.data)
[ "def", "calc_humidity_stats", "(", "self", ")", ":", "a1", ",", "a0", "=", "melodist", ".", "calculate_dewpoint_regression", "(", "self", ".", "data", ",", "return_stats", "=", "False", ")", "self", ".", "hum", ".", "update", "(", "a0", "=", "a0", ",", "a1", "=", "a1", ")", "self", ".", "hum", ".", "kr", "=", "12", "self", ".", "hum", ".", "month_hour_precip_mean", "=", "melodist", ".", "calculate_month_hour_precip_mean", "(", "self", ".", "data", ")" ]
Calculates statistics in order to derive diurnal patterns of relative humidity.
[ "Calculates", "statistics", "in", "order", "to", "derive", "diurnal", "patterns", "of", "relative", "humidity", "." ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/stationstatistics.py#L101-L109
kristianfoerster/melodist
melodist/stationstatistics.py
StationStatistics.calc_temperature_stats
def calc_temperature_stats(self): """ Calculates statistics in order to derive diurnal patterns of temperature """ self.temp.max_delta = melodist.get_shift_by_data(self.data.temp, self._lon, self._lat, self._timezone) self.temp.mean_course = melodist.util.calculate_mean_daily_course_by_month(self.data.temp, normalize=True)
python
def calc_temperature_stats(self): """ Calculates statistics in order to derive diurnal patterns of temperature """ self.temp.max_delta = melodist.get_shift_by_data(self.data.temp, self._lon, self._lat, self._timezone) self.temp.mean_course = melodist.util.calculate_mean_daily_course_by_month(self.data.temp, normalize=True)
[ "def", "calc_temperature_stats", "(", "self", ")", ":", "self", ".", "temp", ".", "max_delta", "=", "melodist", ".", "get_shift_by_data", "(", "self", ".", "data", ".", "temp", ",", "self", ".", "_lon", ",", "self", ".", "_lat", ",", "self", ".", "_timezone", ")", "self", ".", "temp", ".", "mean_course", "=", "melodist", ".", "util", ".", "calculate_mean_daily_course_by_month", "(", "self", ".", "data", ".", "temp", ",", "normalize", "=", "True", ")" ]
Calculates statistics in order to derive diurnal patterns of temperature
[ "Calculates", "statistics", "in", "order", "to", "derive", "diurnal", "patterns", "of", "temperature" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/stationstatistics.py#L111-L116
kristianfoerster/melodist
melodist/stationstatistics.py
StationStatistics.calc_radiation_stats
def calc_radiation_stats(self, data_daily=None, day_length=None, how='all'): """ Calculates statistics in order to derive solar radiation from sunshine duration or minimum/maximum temperature. Parameters ---------- data_daily : DataFrame, optional Daily data from the associated ``Station`` object. day_length : Series, optional Day lengths as calculated by ``calc_sun_times``. """ assert how in ('all', 'seasonal', 'monthly') self.glob.mean_course = melodist.util.calculate_mean_daily_course_by_month(self.data.glob) if data_daily is not None: pot_rad = melodist.potential_radiation( melodist.util.hourly_index(data_daily.index), self._lon, self._lat, self._timezone) pot_rad_daily = pot_rad.resample('D').mean() obs_rad_daily = self.data.glob.resample('D').mean() if how == 'all': month_ranges = [np.arange(12) + 1] elif how == 'seasonal': month_ranges = [[3, 4, 5], [6, 7, 8], [9, 10, 11], [12, 1, 2]] elif how == 'monthly': month_ranges = zip(np.arange(12) + 1) def myisin(s, v): return pd.Series(s).isin(v).values def extract_months(s, months): return s[myisin(s.index.month, months)] if 'ssd' in data_daily and day_length is not None: for months in month_ranges: a, b = melodist.fit_angstroem_params( extract_months(data_daily.ssd, months), extract_months(day_length, months), extract_months(pot_rad_daily, months), extract_months(obs_rad_daily, months), ) for month in months: self.glob.angstroem.loc[month] = a, b if 'tmin' in data_daily and 'tmax' in data_daily: df = pd.DataFrame( data=dict( tmin=data_daily.tmin, tmax=data_daily.tmax, pot_rad=pot_rad_daily, obs_rad=obs_rad_daily, ) ).dropna(how='any') for months in month_ranges: a, c = melodist.fit_bristow_campbell_params( extract_months(df.tmin, months), extract_months(df.tmax, months), extract_months(df.pot_rad, months), extract_months(df.obs_rad, months), ) for month in months: self.glob.bristcamp.loc[month] = a, c
python
def calc_radiation_stats(self, data_daily=None, day_length=None, how='all'): """ Calculates statistics in order to derive solar radiation from sunshine duration or minimum/maximum temperature. Parameters ---------- data_daily : DataFrame, optional Daily data from the associated ``Station`` object. day_length : Series, optional Day lengths as calculated by ``calc_sun_times``. """ assert how in ('all', 'seasonal', 'monthly') self.glob.mean_course = melodist.util.calculate_mean_daily_course_by_month(self.data.glob) if data_daily is not None: pot_rad = melodist.potential_radiation( melodist.util.hourly_index(data_daily.index), self._lon, self._lat, self._timezone) pot_rad_daily = pot_rad.resample('D').mean() obs_rad_daily = self.data.glob.resample('D').mean() if how == 'all': month_ranges = [np.arange(12) + 1] elif how == 'seasonal': month_ranges = [[3, 4, 5], [6, 7, 8], [9, 10, 11], [12, 1, 2]] elif how == 'monthly': month_ranges = zip(np.arange(12) + 1) def myisin(s, v): return pd.Series(s).isin(v).values def extract_months(s, months): return s[myisin(s.index.month, months)] if 'ssd' in data_daily and day_length is not None: for months in month_ranges: a, b = melodist.fit_angstroem_params( extract_months(data_daily.ssd, months), extract_months(day_length, months), extract_months(pot_rad_daily, months), extract_months(obs_rad_daily, months), ) for month in months: self.glob.angstroem.loc[month] = a, b if 'tmin' in data_daily and 'tmax' in data_daily: df = pd.DataFrame( data=dict( tmin=data_daily.tmin, tmax=data_daily.tmax, pot_rad=pot_rad_daily, obs_rad=obs_rad_daily, ) ).dropna(how='any') for months in month_ranges: a, c = melodist.fit_bristow_campbell_params( extract_months(df.tmin, months), extract_months(df.tmax, months), extract_months(df.pot_rad, months), extract_months(df.obs_rad, months), ) for month in months: self.glob.bristcamp.loc[month] = a, c
[ "def", "calc_radiation_stats", "(", "self", ",", "data_daily", "=", "None", ",", "day_length", "=", "None", ",", "how", "=", "'all'", ")", ":", "assert", "how", "in", "(", "'all'", ",", "'seasonal'", ",", "'monthly'", ")", "self", ".", "glob", ".", "mean_course", "=", "melodist", ".", "util", ".", "calculate_mean_daily_course_by_month", "(", "self", ".", "data", ".", "glob", ")", "if", "data_daily", "is", "not", "None", ":", "pot_rad", "=", "melodist", ".", "potential_radiation", "(", "melodist", ".", "util", ".", "hourly_index", "(", "data_daily", ".", "index", ")", ",", "self", ".", "_lon", ",", "self", ".", "_lat", ",", "self", ".", "_timezone", ")", "pot_rad_daily", "=", "pot_rad", ".", "resample", "(", "'D'", ")", ".", "mean", "(", ")", "obs_rad_daily", "=", "self", ".", "data", ".", "glob", ".", "resample", "(", "'D'", ")", ".", "mean", "(", ")", "if", "how", "==", "'all'", ":", "month_ranges", "=", "[", "np", ".", "arange", "(", "12", ")", "+", "1", "]", "elif", "how", "==", "'seasonal'", ":", "month_ranges", "=", "[", "[", "3", ",", "4", ",", "5", "]", ",", "[", "6", ",", "7", ",", "8", "]", ",", "[", "9", ",", "10", ",", "11", "]", ",", "[", "12", ",", "1", ",", "2", "]", "]", "elif", "how", "==", "'monthly'", ":", "month_ranges", "=", "zip", "(", "np", ".", "arange", "(", "12", ")", "+", "1", ")", "def", "myisin", "(", "s", ",", "v", ")", ":", "return", "pd", ".", "Series", "(", "s", ")", ".", "isin", "(", "v", ")", ".", "values", "def", "extract_months", "(", "s", ",", "months", ")", ":", "return", "s", "[", "myisin", "(", "s", ".", "index", ".", "month", ",", "months", ")", "]", "if", "'ssd'", "in", "data_daily", "and", "day_length", "is", "not", "None", ":", "for", "months", "in", "month_ranges", ":", "a", ",", "b", "=", "melodist", ".", "fit_angstroem_params", "(", "extract_months", "(", "data_daily", ".", "ssd", ",", "months", ")", ",", "extract_months", "(", "day_length", ",", "months", ")", ",", "extract_months", "(", "pot_rad_daily", ",", "months", ")", ",", "extract_months", "(", "obs_rad_daily", ",", "months", ")", ",", ")", "for", "month", "in", "months", ":", "self", ".", "glob", ".", "angstroem", ".", "loc", "[", "month", "]", "=", "a", ",", "b", "if", "'tmin'", "in", "data_daily", "and", "'tmax'", "in", "data_daily", ":", "df", "=", "pd", ".", "DataFrame", "(", "data", "=", "dict", "(", "tmin", "=", "data_daily", ".", "tmin", ",", "tmax", "=", "data_daily", ".", "tmax", ",", "pot_rad", "=", "pot_rad_daily", ",", "obs_rad", "=", "obs_rad_daily", ",", ")", ")", ".", "dropna", "(", "how", "=", "'any'", ")", "for", "months", "in", "month_ranges", ":", "a", ",", "c", "=", "melodist", ".", "fit_bristow_campbell_params", "(", "extract_months", "(", "df", ".", "tmin", ",", "months", ")", ",", "extract_months", "(", "df", ".", "tmax", ",", "months", ")", ",", "extract_months", "(", "df", ".", "pot_rad", ",", "months", ")", ",", "extract_months", "(", "df", ".", "obs_rad", ",", "months", ")", ",", ")", "for", "month", "in", "months", ":", "self", ".", "glob", ".", "bristcamp", ".", "loc", "[", "month", "]", "=", "a", ",", "c" ]
Calculates statistics in order to derive solar radiation from sunshine duration or minimum/maximum temperature. Parameters ---------- data_daily : DataFrame, optional Daily data from the associated ``Station`` object. day_length : Series, optional Day lengths as calculated by ``calc_sun_times``.
[ "Calculates", "statistics", "in", "order", "to", "derive", "solar", "radiation", "from", "sunshine", "duration", "or", "minimum", "/", "maximum", "temperature", "." ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/stationstatistics.py#L118-L186
kristianfoerster/melodist
melodist/stationstatistics.py
StationStatistics.to_json
def to_json(self, filename=None): """ Exports statistical data to a JSON formatted file Parameters ---------- filename: output file that holds statistics data """ def json_encoder(obj): if isinstance(obj, pd.DataFrame) or isinstance(obj, pd.Series): if isinstance(obj.index, pd.core.index.MultiIndex): obj = obj.reset_index() # convert MultiIndex to columns return json.loads(obj.to_json(date_format='iso')) elif isinstance(obj, melodist.cascade.CascadeStatistics): return obj.__dict__ elif isinstance(obj, np.ndarray): return obj.tolist() else: raise TypeError('%s not supported' % type(obj)) d = dict( temp=self.temp, wind=self.wind, precip=self.precip, hum=self.hum, glob=self.glob ) j = json.dumps(d, default=json_encoder, indent=4) if filename is None: return j else: with open(filename, 'w') as f: f.write(j)
python
def to_json(self, filename=None): """ Exports statistical data to a JSON formatted file Parameters ---------- filename: output file that holds statistics data """ def json_encoder(obj): if isinstance(obj, pd.DataFrame) or isinstance(obj, pd.Series): if isinstance(obj.index, pd.core.index.MultiIndex): obj = obj.reset_index() # convert MultiIndex to columns return json.loads(obj.to_json(date_format='iso')) elif isinstance(obj, melodist.cascade.CascadeStatistics): return obj.__dict__ elif isinstance(obj, np.ndarray): return obj.tolist() else: raise TypeError('%s not supported' % type(obj)) d = dict( temp=self.temp, wind=self.wind, precip=self.precip, hum=self.hum, glob=self.glob ) j = json.dumps(d, default=json_encoder, indent=4) if filename is None: return j else: with open(filename, 'w') as f: f.write(j)
[ "def", "to_json", "(", "self", ",", "filename", "=", "None", ")", ":", "def", "json_encoder", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "pd", ".", "DataFrame", ")", "or", "isinstance", "(", "obj", ",", "pd", ".", "Series", ")", ":", "if", "isinstance", "(", "obj", ".", "index", ",", "pd", ".", "core", ".", "index", ".", "MultiIndex", ")", ":", "obj", "=", "obj", ".", "reset_index", "(", ")", "# convert MultiIndex to columns", "return", "json", ".", "loads", "(", "obj", ".", "to_json", "(", "date_format", "=", "'iso'", ")", ")", "elif", "isinstance", "(", "obj", ",", "melodist", ".", "cascade", ".", "CascadeStatistics", ")", ":", "return", "obj", ".", "__dict__", "elif", "isinstance", "(", "obj", ",", "np", ".", "ndarray", ")", ":", "return", "obj", ".", "tolist", "(", ")", "else", ":", "raise", "TypeError", "(", "'%s not supported'", "%", "type", "(", "obj", ")", ")", "d", "=", "dict", "(", "temp", "=", "self", ".", "temp", ",", "wind", "=", "self", ".", "wind", ",", "precip", "=", "self", ".", "precip", ",", "hum", "=", "self", ".", "hum", ",", "glob", "=", "self", ".", "glob", ")", "j", "=", "json", ".", "dumps", "(", "d", ",", "default", "=", "json_encoder", ",", "indent", "=", "4", ")", "if", "filename", "is", "None", ":", "return", "j", "else", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "j", ")" ]
Exports statistical data to a JSON formatted file Parameters ---------- filename: output file that holds statistics data
[ "Exports", "statistical", "data", "to", "a", "JSON", "formatted", "file" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/stationstatistics.py#L188-L223
kristianfoerster/melodist
melodist/stationstatistics.py
StationStatistics.from_json
def from_json(cls, filename): """ Imports statistical data from a JSON formatted file Parameters ---------- filename: input file that holds statistics data """ def json_decoder(d): if 'p01' in d and 'pxx' in d: # we assume this is a CascadeStatistics object return melodist.cascade.CascadeStatistics.from_dict(d) return d with open(filename) as f: d = json.load(f, object_hook=json_decoder) stats = cls() stats.temp.update(d['temp']) stats.hum.update(d['hum']) stats.precip.update(d['precip']) stats.wind.update(d['wind']) stats.glob.update(d['glob']) if stats.temp.max_delta is not None: stats.temp.max_delta = pd.read_json(json.dumps(stats.temp.max_delta), typ='series').sort_index() if stats.temp.mean_course is not None: mc = pd.read_json(json.dumps(stats.temp.mean_course), typ='frame').sort_index()[np.arange(1, 12 + 1)] stats.temp.mean_course = mc.sort_index()[np.arange(1, 12 + 1)] if stats.hum.month_hour_precip_mean is not None: mhpm = pd.read_json(json.dumps(stats.hum.month_hour_precip_mean), typ='frame').sort_index() mhpm = mhpm.set_index(['level_0', 'level_1', 'level_2']) # convert to MultiIndex mhpm = mhpm.squeeze() # convert to Series mhpm = mhpm.rename_axis([None, None, None]) # remove index labels stats.hum.month_hour_precip_mean = mhpm for var in ('angstroem', 'bristcamp', 'mean_course'): if stats.glob[var] is not None: stats.glob[var] = pd.read_json(json.dumps(stats.glob[var])).sort_index() if stats.glob.mean_course is not None: stats.glob.mean_course = stats.glob.mean_course[np.arange(1, 12 + 1)] return stats
python
def from_json(cls, filename): """ Imports statistical data from a JSON formatted file Parameters ---------- filename: input file that holds statistics data """ def json_decoder(d): if 'p01' in d and 'pxx' in d: # we assume this is a CascadeStatistics object return melodist.cascade.CascadeStatistics.from_dict(d) return d with open(filename) as f: d = json.load(f, object_hook=json_decoder) stats = cls() stats.temp.update(d['temp']) stats.hum.update(d['hum']) stats.precip.update(d['precip']) stats.wind.update(d['wind']) stats.glob.update(d['glob']) if stats.temp.max_delta is not None: stats.temp.max_delta = pd.read_json(json.dumps(stats.temp.max_delta), typ='series').sort_index() if stats.temp.mean_course is not None: mc = pd.read_json(json.dumps(stats.temp.mean_course), typ='frame').sort_index()[np.arange(1, 12 + 1)] stats.temp.mean_course = mc.sort_index()[np.arange(1, 12 + 1)] if stats.hum.month_hour_precip_mean is not None: mhpm = pd.read_json(json.dumps(stats.hum.month_hour_precip_mean), typ='frame').sort_index() mhpm = mhpm.set_index(['level_0', 'level_1', 'level_2']) # convert to MultiIndex mhpm = mhpm.squeeze() # convert to Series mhpm = mhpm.rename_axis([None, None, None]) # remove index labels stats.hum.month_hour_precip_mean = mhpm for var in ('angstroem', 'bristcamp', 'mean_course'): if stats.glob[var] is not None: stats.glob[var] = pd.read_json(json.dumps(stats.glob[var])).sort_index() if stats.glob.mean_course is not None: stats.glob.mean_course = stats.glob.mean_course[np.arange(1, 12 + 1)] return stats
[ "def", "from_json", "(", "cls", ",", "filename", ")", ":", "def", "json_decoder", "(", "d", ")", ":", "if", "'p01'", "in", "d", "and", "'pxx'", "in", "d", ":", "# we assume this is a CascadeStatistics object", "return", "melodist", ".", "cascade", ".", "CascadeStatistics", ".", "from_dict", "(", "d", ")", "return", "d", "with", "open", "(", "filename", ")", "as", "f", ":", "d", "=", "json", ".", "load", "(", "f", ",", "object_hook", "=", "json_decoder", ")", "stats", "=", "cls", "(", ")", "stats", ".", "temp", ".", "update", "(", "d", "[", "'temp'", "]", ")", "stats", ".", "hum", ".", "update", "(", "d", "[", "'hum'", "]", ")", "stats", ".", "precip", ".", "update", "(", "d", "[", "'precip'", "]", ")", "stats", ".", "wind", ".", "update", "(", "d", "[", "'wind'", "]", ")", "stats", ".", "glob", ".", "update", "(", "d", "[", "'glob'", "]", ")", "if", "stats", ".", "temp", ".", "max_delta", "is", "not", "None", ":", "stats", ".", "temp", ".", "max_delta", "=", "pd", ".", "read_json", "(", "json", ".", "dumps", "(", "stats", ".", "temp", ".", "max_delta", ")", ",", "typ", "=", "'series'", ")", ".", "sort_index", "(", ")", "if", "stats", ".", "temp", ".", "mean_course", "is", "not", "None", ":", "mc", "=", "pd", ".", "read_json", "(", "json", ".", "dumps", "(", "stats", ".", "temp", ".", "mean_course", ")", ",", "typ", "=", "'frame'", ")", ".", "sort_index", "(", ")", "[", "np", ".", "arange", "(", "1", ",", "12", "+", "1", ")", "]", "stats", ".", "temp", ".", "mean_course", "=", "mc", ".", "sort_index", "(", ")", "[", "np", ".", "arange", "(", "1", ",", "12", "+", "1", ")", "]", "if", "stats", ".", "hum", ".", "month_hour_precip_mean", "is", "not", "None", ":", "mhpm", "=", "pd", ".", "read_json", "(", "json", ".", "dumps", "(", "stats", ".", "hum", ".", "month_hour_precip_mean", ")", ",", "typ", "=", "'frame'", ")", ".", "sort_index", "(", ")", "mhpm", "=", "mhpm", ".", "set_index", "(", "[", "'level_0'", ",", "'level_1'", ",", "'level_2'", "]", ")", "# convert to MultiIndex", "mhpm", "=", "mhpm", ".", "squeeze", "(", ")", "# convert to Series", "mhpm", "=", "mhpm", ".", "rename_axis", "(", "[", "None", ",", "None", ",", "None", "]", ")", "# remove index labels", "stats", ".", "hum", ".", "month_hour_precip_mean", "=", "mhpm", "for", "var", "in", "(", "'angstroem'", ",", "'bristcamp'", ",", "'mean_course'", ")", ":", "if", "stats", ".", "glob", "[", "var", "]", "is", "not", "None", ":", "stats", ".", "glob", "[", "var", "]", "=", "pd", ".", "read_json", "(", "json", ".", "dumps", "(", "stats", ".", "glob", "[", "var", "]", ")", ")", ".", "sort_index", "(", ")", "if", "stats", ".", "glob", ".", "mean_course", "is", "not", "None", ":", "stats", ".", "glob", ".", "mean_course", "=", "stats", ".", "glob", ".", "mean_course", "[", "np", ".", "arange", "(", "1", ",", "12", "+", "1", ")", "]", "return", "stats" ]
Imports statistical data from a JSON formatted file Parameters ---------- filename: input file that holds statistics data
[ "Imports", "statistical", "data", "from", "a", "JSON", "formatted", "file" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/stationstatistics.py#L226-L272
kristianfoerster/melodist
melodist/radiation.py
disaggregate_radiation
def disaggregate_radiation(data_daily, sun_times=None, pot_rad=None, method='pot_rad', angstr_a=0.25, angstr_b=0.5, bristcamp_a=0.75, bristcamp_c=2.4, mean_course=None): """general function for radiation disaggregation Args: daily_data: daily values sun_times: daily dataframe including results of the util.sun_times function pot_rad: hourly dataframe including potential radiation method: keyword specifying the disaggregation method to be used angstr_a: parameter a of the Angstrom model (intercept) angstr_b: parameter b of the Angstrom model (slope) mean_course: monthly values of the mean hourly radiation course Returns: Disaggregated hourly values of shortwave radiation. """ # check if disaggregation method has a valid value if method not in ('pot_rad', 'pot_rad_via_ssd', 'pot_rad_via_bc', 'mean_course'): raise ValueError('Invalid option') glob_disagg = pd.Series(index=melodist.util.hourly_index(data_daily.index)) if method == 'mean_course': assert mean_course is not None pot_rad = pd.Series(index=glob_disagg.index) pot_rad[:] = mean_course.unstack().loc[list(zip(pot_rad.index.month, pot_rad.index.hour))].values else: assert pot_rad is not None pot_rad_daily = pot_rad.resample('D').mean() if method in ('pot_rad', 'mean_course'): globalrad = data_daily.glob elif method == 'pot_rad_via_ssd': # in this case use the Angstrom model globalrad = pd.Series(index=data_daily.index, data=0.) dates = sun_times.index[sun_times.daylength > 0] # account for polar nights globalrad[dates] = angstroem(data_daily.ssd[dates], sun_times.daylength[dates], pot_rad_daily[dates], angstr_a, angstr_b) elif method == 'pot_rad_via_bc': # using data from Bristow-Campbell model globalrad = bristow_campbell(data_daily.tmin, data_daily.tmax, pot_rad_daily, bristcamp_a, bristcamp_c) globalrad_equal = globalrad.reindex(pot_rad.index, method='ffill') # hourly values (replicate daily mean value for each hour) pot_rad_daily_equal = pot_rad_daily.reindex(pot_rad.index, method='ffill') glob_disagg = pot_rad / pot_rad_daily_equal * globalrad_equal glob_disagg[glob_disagg < 1e-2] = 0. return glob_disagg
python
def disaggregate_radiation(data_daily, sun_times=None, pot_rad=None, method='pot_rad', angstr_a=0.25, angstr_b=0.5, bristcamp_a=0.75, bristcamp_c=2.4, mean_course=None): """general function for radiation disaggregation Args: daily_data: daily values sun_times: daily dataframe including results of the util.sun_times function pot_rad: hourly dataframe including potential radiation method: keyword specifying the disaggregation method to be used angstr_a: parameter a of the Angstrom model (intercept) angstr_b: parameter b of the Angstrom model (slope) mean_course: monthly values of the mean hourly radiation course Returns: Disaggregated hourly values of shortwave radiation. """ # check if disaggregation method has a valid value if method not in ('pot_rad', 'pot_rad_via_ssd', 'pot_rad_via_bc', 'mean_course'): raise ValueError('Invalid option') glob_disagg = pd.Series(index=melodist.util.hourly_index(data_daily.index)) if method == 'mean_course': assert mean_course is not None pot_rad = pd.Series(index=glob_disagg.index) pot_rad[:] = mean_course.unstack().loc[list(zip(pot_rad.index.month, pot_rad.index.hour))].values else: assert pot_rad is not None pot_rad_daily = pot_rad.resample('D').mean() if method in ('pot_rad', 'mean_course'): globalrad = data_daily.glob elif method == 'pot_rad_via_ssd': # in this case use the Angstrom model globalrad = pd.Series(index=data_daily.index, data=0.) dates = sun_times.index[sun_times.daylength > 0] # account for polar nights globalrad[dates] = angstroem(data_daily.ssd[dates], sun_times.daylength[dates], pot_rad_daily[dates], angstr_a, angstr_b) elif method == 'pot_rad_via_bc': # using data from Bristow-Campbell model globalrad = bristow_campbell(data_daily.tmin, data_daily.tmax, pot_rad_daily, bristcamp_a, bristcamp_c) globalrad_equal = globalrad.reindex(pot_rad.index, method='ffill') # hourly values (replicate daily mean value for each hour) pot_rad_daily_equal = pot_rad_daily.reindex(pot_rad.index, method='ffill') glob_disagg = pot_rad / pot_rad_daily_equal * globalrad_equal glob_disagg[glob_disagg < 1e-2] = 0. return glob_disagg
[ "def", "disaggregate_radiation", "(", "data_daily", ",", "sun_times", "=", "None", ",", "pot_rad", "=", "None", ",", "method", "=", "'pot_rad'", ",", "angstr_a", "=", "0.25", ",", "angstr_b", "=", "0.5", ",", "bristcamp_a", "=", "0.75", ",", "bristcamp_c", "=", "2.4", ",", "mean_course", "=", "None", ")", ":", "# check if disaggregation method has a valid value\r", "if", "method", "not", "in", "(", "'pot_rad'", ",", "'pot_rad_via_ssd'", ",", "'pot_rad_via_bc'", ",", "'mean_course'", ")", ":", "raise", "ValueError", "(", "'Invalid option'", ")", "glob_disagg", "=", "pd", ".", "Series", "(", "index", "=", "melodist", ".", "util", ".", "hourly_index", "(", "data_daily", ".", "index", ")", ")", "if", "method", "==", "'mean_course'", ":", "assert", "mean_course", "is", "not", "None", "pot_rad", "=", "pd", ".", "Series", "(", "index", "=", "glob_disagg", ".", "index", ")", "pot_rad", "[", ":", "]", "=", "mean_course", ".", "unstack", "(", ")", ".", "loc", "[", "list", "(", "zip", "(", "pot_rad", ".", "index", ".", "month", ",", "pot_rad", ".", "index", ".", "hour", ")", ")", "]", ".", "values", "else", ":", "assert", "pot_rad", "is", "not", "None", "pot_rad_daily", "=", "pot_rad", ".", "resample", "(", "'D'", ")", ".", "mean", "(", ")", "if", "method", "in", "(", "'pot_rad'", ",", "'mean_course'", ")", ":", "globalrad", "=", "data_daily", ".", "glob", "elif", "method", "==", "'pot_rad_via_ssd'", ":", "# in this case use the Angstrom model\r", "globalrad", "=", "pd", ".", "Series", "(", "index", "=", "data_daily", ".", "index", ",", "data", "=", "0.", ")", "dates", "=", "sun_times", ".", "index", "[", "sun_times", ".", "daylength", ">", "0", "]", "# account for polar nights\r", "globalrad", "[", "dates", "]", "=", "angstroem", "(", "data_daily", ".", "ssd", "[", "dates", "]", ",", "sun_times", ".", "daylength", "[", "dates", "]", ",", "pot_rad_daily", "[", "dates", "]", ",", "angstr_a", ",", "angstr_b", ")", "elif", "method", "==", "'pot_rad_via_bc'", ":", "# using data from Bristow-Campbell model\r", "globalrad", "=", "bristow_campbell", "(", "data_daily", ".", "tmin", ",", "data_daily", ".", "tmax", ",", "pot_rad_daily", ",", "bristcamp_a", ",", "bristcamp_c", ")", "globalrad_equal", "=", "globalrad", ".", "reindex", "(", "pot_rad", ".", "index", ",", "method", "=", "'ffill'", ")", "# hourly values (replicate daily mean value for each hour)\r", "pot_rad_daily_equal", "=", "pot_rad_daily", ".", "reindex", "(", "pot_rad", ".", "index", ",", "method", "=", "'ffill'", ")", "glob_disagg", "=", "pot_rad", "/", "pot_rad_daily_equal", "*", "globalrad_equal", "glob_disagg", "[", "glob_disagg", "<", "1e-2", "]", "=", "0.", "return", "glob_disagg" ]
general function for radiation disaggregation Args: daily_data: daily values sun_times: daily dataframe including results of the util.sun_times function pot_rad: hourly dataframe including potential radiation method: keyword specifying the disaggregation method to be used angstr_a: parameter a of the Angstrom model (intercept) angstr_b: parameter b of the Angstrom model (slope) mean_course: monthly values of the mean hourly radiation course Returns: Disaggregated hourly values of shortwave radiation.
[ "general", "function", "for", "radiation", "disaggregation", "Args", ":", "daily_data", ":", "daily", "values", "sun_times", ":", "daily", "dataframe", "including", "results", "of", "the", "util", ".", "sun_times", "function", "pot_rad", ":", "hourly", "dataframe", "including", "potential", "radiation", "method", ":", "keyword", "specifying", "the", "disaggregation", "method", "to", "be", "used", "angstr_a", ":", "parameter", "a", "of", "the", "Angstrom", "model", "(", "intercept", ")", "angstr_b", ":", "parameter", "b", "of", "the", "Angstrom", "model", "(", "slope", ")", "mean_course", ":", "monthly", "values", "of", "the", "mean", "hourly", "radiation", "course", "Returns", ":", "Disaggregated", "hourly", "values", "of", "shortwave", "radiation", "." ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/radiation.py#L37-L93
kristianfoerster/melodist
melodist/radiation.py
potential_radiation
def potential_radiation(dates, lon, lat, timezone, terrain_slope=0, terrain_slope_azimuth=0, cloud_fraction=0, split=False): """ Calculate potential shortwave radiation for a specific location and time. This routine calculates global radiation as described in: Liston, G. E. and Elder, K. (2006): A Meteorological Distribution System for High-Resolution Terrestrial Modeling (MicroMet), J. Hydrometeorol., 7, 217–234. Corrections for eccentricity are carried out following: Paltridge, G.W., Platt, C.M.R., 1976. Radiative processes in Meteorology and Climatology. Elsevier Scientific Publishing Company, Amsterdam, Oxford, New York. Parameters ---------- dates : DatetimeIndex or array-like The dates for which potential radiation shall be calculated lon : float Longitude (degrees) lat : float Latitude (degrees) timezone : float Time zone terrain_slope : float, default 0 Terrain slope as defined in Liston & Elder (2006) (eq. 12) terrain_slope_azimuth : float, default 0 Terrain slope azimuth as defined in Liston & Elder (2006) (eq. 13) cloud_fraction : float, default 0 Cloud fraction between 0 and 1 split : boolean, default False If True, return a DataFrame containing direct and diffuse radiation, otherwise return a Series containing total radiation """ solar_constant = 1367. days_per_year = 365.25 tropic_of_cancer = np.deg2rad(23.43697) solstice = 173.0 dates = pd.DatetimeIndex(dates) dates_hour = np.array(dates.hour) dates_minute = np.array(dates.minute) day_of_year = np.array(dates.dayofyear) # compute solar decline in rad solar_decline = tropic_of_cancer * np.cos(2.0 * np.pi * (day_of_year - solstice) / days_per_year) # compute the sun hour angle in rad standard_meridian = timezone * 15. delta_lat_time = (lon - standard_meridian) * 24. / 360. hour_angle = np.pi * (((dates_hour + dates_minute / 60. + delta_lat_time) / 12.) - 1.) # get solar zenith angle cos_solar_zenith = (np.sin(solar_decline) * np.sin(np.deg2rad(lat)) + np.cos(solar_decline) * np.cos(np.deg2rad(lat)) * np.cos(hour_angle)) cos_solar_zenith = cos_solar_zenith.clip(min=0) solar_zenith_angle = np.arccos(cos_solar_zenith) # compute transmissivities for direct and diffus radiation using cloud fraction transmissivity_direct = (0.6 + 0.2 * cos_solar_zenith) * (1.0 - cloud_fraction) transmissivity_diffuse = (0.3 + 0.1 * cos_solar_zenith) * cloud_fraction # modify solar constant for eccentricity beta = 2. * np.pi * (day_of_year / days_per_year) radius_ratio = (1.00011 + 0.034221 * np.cos(beta) + 0.00128 * np.sin(beta) + 0.000719 * np.cos(2. * beta) + 0.000077 * np.sin(2 * beta)) solar_constant_times_radius_ratio = solar_constant * radius_ratio mu = np.arcsin(np.cos(solar_decline) * np.sin(hour_angle) / np.sin(solar_zenith_angle)) cosi = (np.cos(terrain_slope) * cos_solar_zenith + np.sin(terrain_slope) * np.sin(solar_zenith_angle) * np.cos(mu - terrain_slope_azimuth)) # get total shortwave radiation direct_radiation = solar_constant_times_radius_ratio * transmissivity_direct * cosi diffuse_radiation = solar_constant_times_radius_ratio * transmissivity_diffuse * cos_solar_zenith direct_radiation = direct_radiation.clip(min=0) df = pd.DataFrame(index=dates, data=dict(direct=direct_radiation, diffuse=diffuse_radiation)) if split: return df else: return df.direct + df.diffuse
python
def potential_radiation(dates, lon, lat, timezone, terrain_slope=0, terrain_slope_azimuth=0, cloud_fraction=0, split=False): """ Calculate potential shortwave radiation for a specific location and time. This routine calculates global radiation as described in: Liston, G. E. and Elder, K. (2006): A Meteorological Distribution System for High-Resolution Terrestrial Modeling (MicroMet), J. Hydrometeorol., 7, 217–234. Corrections for eccentricity are carried out following: Paltridge, G.W., Platt, C.M.R., 1976. Radiative processes in Meteorology and Climatology. Elsevier Scientific Publishing Company, Amsterdam, Oxford, New York. Parameters ---------- dates : DatetimeIndex or array-like The dates for which potential radiation shall be calculated lon : float Longitude (degrees) lat : float Latitude (degrees) timezone : float Time zone terrain_slope : float, default 0 Terrain slope as defined in Liston & Elder (2006) (eq. 12) terrain_slope_azimuth : float, default 0 Terrain slope azimuth as defined in Liston & Elder (2006) (eq. 13) cloud_fraction : float, default 0 Cloud fraction between 0 and 1 split : boolean, default False If True, return a DataFrame containing direct and diffuse radiation, otherwise return a Series containing total radiation """ solar_constant = 1367. days_per_year = 365.25 tropic_of_cancer = np.deg2rad(23.43697) solstice = 173.0 dates = pd.DatetimeIndex(dates) dates_hour = np.array(dates.hour) dates_minute = np.array(dates.minute) day_of_year = np.array(dates.dayofyear) # compute solar decline in rad solar_decline = tropic_of_cancer * np.cos(2.0 * np.pi * (day_of_year - solstice) / days_per_year) # compute the sun hour angle in rad standard_meridian = timezone * 15. delta_lat_time = (lon - standard_meridian) * 24. / 360. hour_angle = np.pi * (((dates_hour + dates_minute / 60. + delta_lat_time) / 12.) - 1.) # get solar zenith angle cos_solar_zenith = (np.sin(solar_decline) * np.sin(np.deg2rad(lat)) + np.cos(solar_decline) * np.cos(np.deg2rad(lat)) * np.cos(hour_angle)) cos_solar_zenith = cos_solar_zenith.clip(min=0) solar_zenith_angle = np.arccos(cos_solar_zenith) # compute transmissivities for direct and diffus radiation using cloud fraction transmissivity_direct = (0.6 + 0.2 * cos_solar_zenith) * (1.0 - cloud_fraction) transmissivity_diffuse = (0.3 + 0.1 * cos_solar_zenith) * cloud_fraction # modify solar constant for eccentricity beta = 2. * np.pi * (day_of_year / days_per_year) radius_ratio = (1.00011 + 0.034221 * np.cos(beta) + 0.00128 * np.sin(beta) + 0.000719 * np.cos(2. * beta) + 0.000077 * np.sin(2 * beta)) solar_constant_times_radius_ratio = solar_constant * radius_ratio mu = np.arcsin(np.cos(solar_decline) * np.sin(hour_angle) / np.sin(solar_zenith_angle)) cosi = (np.cos(terrain_slope) * cos_solar_zenith + np.sin(terrain_slope) * np.sin(solar_zenith_angle) * np.cos(mu - terrain_slope_azimuth)) # get total shortwave radiation direct_radiation = solar_constant_times_radius_ratio * transmissivity_direct * cosi diffuse_radiation = solar_constant_times_radius_ratio * transmissivity_diffuse * cos_solar_zenith direct_radiation = direct_radiation.clip(min=0) df = pd.DataFrame(index=dates, data=dict(direct=direct_radiation, diffuse=diffuse_radiation)) if split: return df else: return df.direct + df.diffuse
[ "def", "potential_radiation", "(", "dates", ",", "lon", ",", "lat", ",", "timezone", ",", "terrain_slope", "=", "0", ",", "terrain_slope_azimuth", "=", "0", ",", "cloud_fraction", "=", "0", ",", "split", "=", "False", ")", ":", "solar_constant", "=", "1367.", "days_per_year", "=", "365.25", "tropic_of_cancer", "=", "np", ".", "deg2rad", "(", "23.43697", ")", "solstice", "=", "173.0", "dates", "=", "pd", ".", "DatetimeIndex", "(", "dates", ")", "dates_hour", "=", "np", ".", "array", "(", "dates", ".", "hour", ")", "dates_minute", "=", "np", ".", "array", "(", "dates", ".", "minute", ")", "day_of_year", "=", "np", ".", "array", "(", "dates", ".", "dayofyear", ")", "# compute solar decline in rad\r", "solar_decline", "=", "tropic_of_cancer", "*", "np", ".", "cos", "(", "2.0", "*", "np", ".", "pi", "*", "(", "day_of_year", "-", "solstice", ")", "/", "days_per_year", ")", "# compute the sun hour angle in rad\r", "standard_meridian", "=", "timezone", "*", "15.", "delta_lat_time", "=", "(", "lon", "-", "standard_meridian", ")", "*", "24.", "/", "360.", "hour_angle", "=", "np", ".", "pi", "*", "(", "(", "(", "dates_hour", "+", "dates_minute", "/", "60.", "+", "delta_lat_time", ")", "/", "12.", ")", "-", "1.", ")", "# get solar zenith angle\r", "cos_solar_zenith", "=", "(", "np", ".", "sin", "(", "solar_decline", ")", "*", "np", ".", "sin", "(", "np", ".", "deg2rad", "(", "lat", ")", ")", "+", "np", ".", "cos", "(", "solar_decline", ")", "*", "np", ".", "cos", "(", "np", ".", "deg2rad", "(", "lat", ")", ")", "*", "np", ".", "cos", "(", "hour_angle", ")", ")", "cos_solar_zenith", "=", "cos_solar_zenith", ".", "clip", "(", "min", "=", "0", ")", "solar_zenith_angle", "=", "np", ".", "arccos", "(", "cos_solar_zenith", ")", "# compute transmissivities for direct and diffus radiation using cloud fraction\r", "transmissivity_direct", "=", "(", "0.6", "+", "0.2", "*", "cos_solar_zenith", ")", "*", "(", "1.0", "-", "cloud_fraction", ")", "transmissivity_diffuse", "=", "(", "0.3", "+", "0.1", "*", "cos_solar_zenith", ")", "*", "cloud_fraction", "# modify solar constant for eccentricity\r", "beta", "=", "2.", "*", "np", ".", "pi", "*", "(", "day_of_year", "/", "days_per_year", ")", "radius_ratio", "=", "(", "1.00011", "+", "0.034221", "*", "np", ".", "cos", "(", "beta", ")", "+", "0.00128", "*", "np", ".", "sin", "(", "beta", ")", "+", "0.000719", "*", "np", ".", "cos", "(", "2.", "*", "beta", ")", "+", "0.000077", "*", "np", ".", "sin", "(", "2", "*", "beta", ")", ")", "solar_constant_times_radius_ratio", "=", "solar_constant", "*", "radius_ratio", "mu", "=", "np", ".", "arcsin", "(", "np", ".", "cos", "(", "solar_decline", ")", "*", "np", ".", "sin", "(", "hour_angle", ")", "/", "np", ".", "sin", "(", "solar_zenith_angle", ")", ")", "cosi", "=", "(", "np", ".", "cos", "(", "terrain_slope", ")", "*", "cos_solar_zenith", "+", "np", ".", "sin", "(", "terrain_slope", ")", "*", "np", ".", "sin", "(", "solar_zenith_angle", ")", "*", "np", ".", "cos", "(", "mu", "-", "terrain_slope_azimuth", ")", ")", "# get total shortwave radiation\r", "direct_radiation", "=", "solar_constant_times_radius_ratio", "*", "transmissivity_direct", "*", "cosi", "diffuse_radiation", "=", "solar_constant_times_radius_ratio", "*", "transmissivity_diffuse", "*", "cos_solar_zenith", "direct_radiation", "=", "direct_radiation", ".", "clip", "(", "min", "=", "0", ")", "df", "=", "pd", ".", "DataFrame", "(", "index", "=", "dates", ",", "data", "=", "dict", "(", "direct", "=", "direct_radiation", ",", "diffuse", "=", "diffuse_radiation", ")", ")", "if", "split", ":", "return", "df", "else", ":", "return", "df", ".", "direct", "+", "df", ".", "diffuse" ]
Calculate potential shortwave radiation for a specific location and time. This routine calculates global radiation as described in: Liston, G. E. and Elder, K. (2006): A Meteorological Distribution System for High-Resolution Terrestrial Modeling (MicroMet), J. Hydrometeorol., 7, 217–234. Corrections for eccentricity are carried out following: Paltridge, G.W., Platt, C.M.R., 1976. Radiative processes in Meteorology and Climatology. Elsevier Scientific Publishing Company, Amsterdam, Oxford, New York. Parameters ---------- dates : DatetimeIndex or array-like The dates for which potential radiation shall be calculated lon : float Longitude (degrees) lat : float Latitude (degrees) timezone : float Time zone terrain_slope : float, default 0 Terrain slope as defined in Liston & Elder (2006) (eq. 12) terrain_slope_azimuth : float, default 0 Terrain slope azimuth as defined in Liston & Elder (2006) (eq. 13) cloud_fraction : float, default 0 Cloud fraction between 0 and 1 split : boolean, default False If True, return a DataFrame containing direct and diffuse radiation, otherwise return a Series containing total radiation
[ "Calculate", "potential", "shortwave", "radiation", "for", "a", "specific", "location", "and", "time", ".", "This", "routine", "calculates", "global", "radiation", "as", "described", "in", ":", "Liston", "G", ".", "E", ".", "and", "Elder", "K", ".", "(", "2006", ")", ":", "A", "Meteorological", "Distribution", "System", "for", "High", "-", "Resolution", "Terrestrial", "Modeling", "(", "MicroMet", ")", "J", ".", "Hydrometeorol", ".", "7", "217–234", ".", "Corrections", "for", "eccentricity", "are", "carried", "out", "following", ":", "Paltridge", "G", ".", "W", ".", "Platt", "C", ".", "M", ".", "R", ".", "1976", ".", "Radiative", "processes", "in", "Meteorology", "and", "Climatology", ".", "Elsevier", "Scientific", "Publishing", "Company", "Amsterdam", "Oxford", "New", "York", ".", "Parameters", "----------", "dates", ":", "DatetimeIndex", "or", "array", "-", "like", "The", "dates", "for", "which", "potential", "radiation", "shall", "be", "calculated", "lon", ":", "float", "Longitude", "(", "degrees", ")", "lat", ":", "float", "Latitude", "(", "degrees", ")", "timezone", ":", "float", "Time", "zone", "terrain_slope", ":", "float", "default", "0", "Terrain", "slope", "as", "defined", "in", "Liston", "&", "Elder", "(", "2006", ")", "(", "eq", ".", "12", ")", "terrain_slope_azimuth", ":", "float", "default", "0", "Terrain", "slope", "azimuth", "as", "defined", "in", "Liston", "&", "Elder", "(", "2006", ")", "(", "eq", ".", "13", ")", "cloud_fraction", ":", "float", "default", "0", "Cloud", "fraction", "between", "0", "and", "1", "split", ":", "boolean", "default", "False", "If", "True", "return", "a", "DataFrame", "containing", "direct", "and", "diffuse", "radiation", "otherwise", "return", "a", "Series", "containing", "total", "radiation" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/radiation.py#L96-L177
kristianfoerster/melodist
melodist/radiation.py
bristow_campbell
def bristow_campbell(tmin, tmax, pot_rad_daily, A, C): """calculates potential shortwave radiation based on minimum and maximum temperature This routine calculates global radiation as described in: Bristow, Keith L., and Gaylon S. Campbell: On the relationship between incoming solar radiation and daily maximum and minimum temperature. Agricultural and forest meteorology 31.2 (1984): 159-166. Args: daily_data: time series (daily data) including at least minimum and maximum temeprature pot_rad_daily: mean potential daily radiation A: parameter A of the Bristow-Campbell model C: parameter C of the Bristow-Campbell model Returns: series of potential shortwave radiation """ assert tmin.index.equals(tmax.index) temp = pd.DataFrame(data=dict(tmin=tmin, tmax=tmax)) temp = temp.reindex(pd.DatetimeIndex(start=temp.index[0], end=temp.index[-1], freq='D')) temp['tmin_nextday'] = temp.tmin temp.tmin_nextday.iloc[:-1] = temp.tmin.iloc[1:].values temp = temp.loc[tmin.index] pot_rad_daily = pot_rad_daily.loc[tmin.index] dT = temp.tmax - (temp.tmin + temp.tmin_nextday) / 2 dT_m_avg = dT.groupby(dT.index.month).mean() B = 0.036 * np.exp(-0.154 * dT_m_avg[temp.index.month]) B.index = temp.index if isinstance(A, pd.Series): months = temp.index.month A = A.loc[months].values C = C.loc[months].values transmissivity = A * (1 - np.exp(-B * dT**C)) R0 = transmissivity * pot_rad_daily return R0
python
def bristow_campbell(tmin, tmax, pot_rad_daily, A, C): """calculates potential shortwave radiation based on minimum and maximum temperature This routine calculates global radiation as described in: Bristow, Keith L., and Gaylon S. Campbell: On the relationship between incoming solar radiation and daily maximum and minimum temperature. Agricultural and forest meteorology 31.2 (1984): 159-166. Args: daily_data: time series (daily data) including at least minimum and maximum temeprature pot_rad_daily: mean potential daily radiation A: parameter A of the Bristow-Campbell model C: parameter C of the Bristow-Campbell model Returns: series of potential shortwave radiation """ assert tmin.index.equals(tmax.index) temp = pd.DataFrame(data=dict(tmin=tmin, tmax=tmax)) temp = temp.reindex(pd.DatetimeIndex(start=temp.index[0], end=temp.index[-1], freq='D')) temp['tmin_nextday'] = temp.tmin temp.tmin_nextday.iloc[:-1] = temp.tmin.iloc[1:].values temp = temp.loc[tmin.index] pot_rad_daily = pot_rad_daily.loc[tmin.index] dT = temp.tmax - (temp.tmin + temp.tmin_nextday) / 2 dT_m_avg = dT.groupby(dT.index.month).mean() B = 0.036 * np.exp(-0.154 * dT_m_avg[temp.index.month]) B.index = temp.index if isinstance(A, pd.Series): months = temp.index.month A = A.loc[months].values C = C.loc[months].values transmissivity = A * (1 - np.exp(-B * dT**C)) R0 = transmissivity * pot_rad_daily return R0
[ "def", "bristow_campbell", "(", "tmin", ",", "tmax", ",", "pot_rad_daily", ",", "A", ",", "C", ")", ":", "assert", "tmin", ".", "index", ".", "equals", "(", "tmax", ".", "index", ")", "temp", "=", "pd", ".", "DataFrame", "(", "data", "=", "dict", "(", "tmin", "=", "tmin", ",", "tmax", "=", "tmax", ")", ")", "temp", "=", "temp", ".", "reindex", "(", "pd", ".", "DatetimeIndex", "(", "start", "=", "temp", ".", "index", "[", "0", "]", ",", "end", "=", "temp", ".", "index", "[", "-", "1", "]", ",", "freq", "=", "'D'", ")", ")", "temp", "[", "'tmin_nextday'", "]", "=", "temp", ".", "tmin", "temp", ".", "tmin_nextday", ".", "iloc", "[", ":", "-", "1", "]", "=", "temp", ".", "tmin", ".", "iloc", "[", "1", ":", "]", ".", "values", "temp", "=", "temp", ".", "loc", "[", "tmin", ".", "index", "]", "pot_rad_daily", "=", "pot_rad_daily", ".", "loc", "[", "tmin", ".", "index", "]", "dT", "=", "temp", ".", "tmax", "-", "(", "temp", ".", "tmin", "+", "temp", ".", "tmin_nextday", ")", "/", "2", "dT_m_avg", "=", "dT", ".", "groupby", "(", "dT", ".", "index", ".", "month", ")", ".", "mean", "(", ")", "B", "=", "0.036", "*", "np", ".", "exp", "(", "-", "0.154", "*", "dT_m_avg", "[", "temp", ".", "index", ".", "month", "]", ")", "B", ".", "index", "=", "temp", ".", "index", "if", "isinstance", "(", "A", ",", "pd", ".", "Series", ")", ":", "months", "=", "temp", ".", "index", ".", "month", "A", "=", "A", ".", "loc", "[", "months", "]", ".", "values", "C", "=", "C", ".", "loc", "[", "months", "]", ".", "values", "transmissivity", "=", "A", "*", "(", "1", "-", "np", ".", "exp", "(", "-", "B", "*", "dT", "**", "C", ")", ")", "R0", "=", "transmissivity", "*", "pot_rad_daily", "return", "R0" ]
calculates potential shortwave radiation based on minimum and maximum temperature This routine calculates global radiation as described in: Bristow, Keith L., and Gaylon S. Campbell: On the relationship between incoming solar radiation and daily maximum and minimum temperature. Agricultural and forest meteorology 31.2 (1984): 159-166. Args: daily_data: time series (daily data) including at least minimum and maximum temeprature pot_rad_daily: mean potential daily radiation A: parameter A of the Bristow-Campbell model C: parameter C of the Bristow-Campbell model Returns: series of potential shortwave radiation
[ "calculates", "potential", "shortwave", "radiation", "based", "on", "minimum", "and", "maximum", "temperature", "This", "routine", "calculates", "global", "radiation", "as", "described", "in", ":", "Bristow", "Keith", "L", ".", "and", "Gaylon", "S", ".", "Campbell", ":", "On", "the", "relationship", "between", "incoming", "solar", "radiation", "and", "daily", "maximum", "and", "minimum", "temperature", ".", "Agricultural", "and", "forest", "meteorology", "31", ".", "2", "(", "1984", ")", ":", "159", "-", "166", ".", "Args", ":", "daily_data", ":", "time", "series", "(", "daily", "data", ")", "including", "at", "least", "minimum", "and", "maximum", "temeprature", "pot_rad_daily", ":", "mean", "potential", "daily", "radiation", "A", ":", "parameter", "A", "of", "the", "Bristow", "-", "Campbell", "model", "C", ":", "parameter", "C", "of", "the", "Bristow", "-", "Campbell", "model", "Returns", ":", "series", "of", "potential", "shortwave", "radiation" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/radiation.py#L180-L221
kristianfoerster/melodist
melodist/radiation.py
fit_bristow_campbell_params
def fit_bristow_campbell_params(tmin, tmax, pot_rad_daily, obs_rad_daily): """ Fit the A and C parameters for the Bristow & Campbell (1984) model using observed daily minimum and maximum temperature and mean daily (e.g. aggregated from hourly values) solar radiation. Parameters ---------- tmin : Series Observed daily minimum temperature. tmax : Series Observed daily maximum temperature. pot_rad_daily : Series Mean potential daily solar radiation. obs_rad_daily : Series Mean observed daily solar radiation. """ def bc_absbias(ac): return np.abs(np.mean(bristow_campbell(df.tmin, df.tmax, df.pot, ac[0], ac[1]) - df.obs)) df = pd.DataFrame(data=dict(tmin=tmin, tmax=tmax, pot=pot_rad_daily, obs=obs_rad_daily)).dropna(how='any') res = scipy.optimize.minimize(bc_absbias, [0.75, 2.4]) # i.e. we minimize the absolute bias return res.x
python
def fit_bristow_campbell_params(tmin, tmax, pot_rad_daily, obs_rad_daily): """ Fit the A and C parameters for the Bristow & Campbell (1984) model using observed daily minimum and maximum temperature and mean daily (e.g. aggregated from hourly values) solar radiation. Parameters ---------- tmin : Series Observed daily minimum temperature. tmax : Series Observed daily maximum temperature. pot_rad_daily : Series Mean potential daily solar radiation. obs_rad_daily : Series Mean observed daily solar radiation. """ def bc_absbias(ac): return np.abs(np.mean(bristow_campbell(df.tmin, df.tmax, df.pot, ac[0], ac[1]) - df.obs)) df = pd.DataFrame(data=dict(tmin=tmin, tmax=tmax, pot=pot_rad_daily, obs=obs_rad_daily)).dropna(how='any') res = scipy.optimize.minimize(bc_absbias, [0.75, 2.4]) # i.e. we minimize the absolute bias return res.x
[ "def", "fit_bristow_campbell_params", "(", "tmin", ",", "tmax", ",", "pot_rad_daily", ",", "obs_rad_daily", ")", ":", "def", "bc_absbias", "(", "ac", ")", ":", "return", "np", ".", "abs", "(", "np", ".", "mean", "(", "bristow_campbell", "(", "df", ".", "tmin", ",", "df", ".", "tmax", ",", "df", ".", "pot", ",", "ac", "[", "0", "]", ",", "ac", "[", "1", "]", ")", "-", "df", ".", "obs", ")", ")", "df", "=", "pd", ".", "DataFrame", "(", "data", "=", "dict", "(", "tmin", "=", "tmin", ",", "tmax", "=", "tmax", ",", "pot", "=", "pot_rad_daily", ",", "obs", "=", "obs_rad_daily", ")", ")", ".", "dropna", "(", "how", "=", "'any'", ")", "res", "=", "scipy", ".", "optimize", ".", "minimize", "(", "bc_absbias", ",", "[", "0.75", ",", "2.4", "]", ")", "# i.e. we minimize the absolute bias\r", "return", "res", ".", "x" ]
Fit the A and C parameters for the Bristow & Campbell (1984) model using observed daily minimum and maximum temperature and mean daily (e.g. aggregated from hourly values) solar radiation. Parameters ---------- tmin : Series Observed daily minimum temperature. tmax : Series Observed daily maximum temperature. pot_rad_daily : Series Mean potential daily solar radiation. obs_rad_daily : Series Mean observed daily solar radiation.
[ "Fit", "the", "A", "and", "C", "parameters", "for", "the", "Bristow", "&", "Campbell", "(", "1984", ")", "model", "using", "observed", "daily", "minimum", "and", "maximum", "temperature", "and", "mean", "daily", "(", "e", ".", "g", ".", "aggregated", "from", "hourly", "values", ")", "solar", "radiation", ".", "Parameters", "----------", "tmin", ":", "Series", "Observed", "daily", "minimum", "temperature", ".", "tmax", ":", "Series", "Observed", "daily", "maximum", "temperature", ".", "pot_rad_daily", ":", "Series", "Mean", "potential", "daily", "solar", "radiation", ".", "obs_rad_daily", ":", "Series", "Mean", "observed", "daily", "solar", "radiation", "." ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/radiation.py#L224-L250
kristianfoerster/melodist
melodist/radiation.py
angstroem
def angstroem(ssd, day_length, pot_rad_daily, a, b): """ Calculate mean daily radiation from observed sunshine duration according to Angstroem (1924). Parameters ---------- ssd : Series Observed daily sunshine duration. day_length : Series Day lengths as calculated by ``calc_sun_times``. pot_rad_daily : Series Mean potential daily solar radiation. a : float First parameter for the Angstroem model (originally 0.25). b : float Second parameter for the Angstroem model (originally 0.75). """ if isinstance(a, pd.Series): months = ssd.index.month a = a.loc[months].values b = b.loc[months].values glob_day = (a + b * ssd / day_length) * pot_rad_daily return glob_day
python
def angstroem(ssd, day_length, pot_rad_daily, a, b): """ Calculate mean daily radiation from observed sunshine duration according to Angstroem (1924). Parameters ---------- ssd : Series Observed daily sunshine duration. day_length : Series Day lengths as calculated by ``calc_sun_times``. pot_rad_daily : Series Mean potential daily solar radiation. a : float First parameter for the Angstroem model (originally 0.25). b : float Second parameter for the Angstroem model (originally 0.75). """ if isinstance(a, pd.Series): months = ssd.index.month a = a.loc[months].values b = b.loc[months].values glob_day = (a + b * ssd / day_length) * pot_rad_daily return glob_day
[ "def", "angstroem", "(", "ssd", ",", "day_length", ",", "pot_rad_daily", ",", "a", ",", "b", ")", ":", "if", "isinstance", "(", "a", ",", "pd", ".", "Series", ")", ":", "months", "=", "ssd", ".", "index", ".", "month", "a", "=", "a", ".", "loc", "[", "months", "]", ".", "values", "b", "=", "b", ".", "loc", "[", "months", "]", ".", "values", "glob_day", "=", "(", "a", "+", "b", "*", "ssd", "/", "day_length", ")", "*", "pot_rad_daily", "return", "glob_day" ]
Calculate mean daily radiation from observed sunshine duration according to Angstroem (1924). Parameters ---------- ssd : Series Observed daily sunshine duration. day_length : Series Day lengths as calculated by ``calc_sun_times``. pot_rad_daily : Series Mean potential daily solar radiation. a : float First parameter for the Angstroem model (originally 0.25). b : float Second parameter for the Angstroem model (originally 0.75).
[ "Calculate", "mean", "daily", "radiation", "from", "observed", "sunshine", "duration", "according", "to", "Angstroem", "(", "1924", ")", ".", "Parameters", "----------", "ssd", ":", "Series", "Observed", "daily", "sunshine", "duration", ".", "day_length", ":", "Series", "Day", "lengths", "as", "calculated", "by", "calc_sun_times", ".", "pot_rad_daily", ":", "Series", "Mean", "potential", "daily", "solar", "radiation", ".", "a", ":", "float", "First", "parameter", "for", "the", "Angstroem", "model", "(", "originally", "0", ".", "25", ")", ".", "b", ":", "float", "Second", "parameter", "for", "the", "Angstroem", "model", "(", "originally", "0", ".", "75", ")", "." ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/radiation.py#L253-L281
kristianfoerster/melodist
melodist/radiation.py
fit_angstroem_params
def fit_angstroem_params(ssd, day_length, pot_rad_daily, obs_rad_daily): """ Fit the a and b parameters for the Angstroem (1924) model using observed daily sunshine duration and mean daily (e.g. aggregated from hourly values) solar radiation. Parameters ---------- ssd : Series Observed daily sunshine duration. day_length : Series Day lengths as calculated by ``calc_sun_times``. pot_rad_daily : Series Mean potential daily solar radiation. obs_rad_daily : Series Mean observed daily solar radiation. """ df = pd.DataFrame(data=dict(ssd=ssd, day_length=day_length, pot=pot_rad_daily, obs=obs_rad_daily)).dropna(how='any') def angstroem_opt(x, a, b): return angstroem(x[0], x[1], x[2], a, b) x = np.array([df.ssd, df.day_length, df.pot]) popt, pcov = scipy.optimize.curve_fit(angstroem_opt, x, df.obs, p0=[0.25, 0.75]) return popt
python
def fit_angstroem_params(ssd, day_length, pot_rad_daily, obs_rad_daily): """ Fit the a and b parameters for the Angstroem (1924) model using observed daily sunshine duration and mean daily (e.g. aggregated from hourly values) solar radiation. Parameters ---------- ssd : Series Observed daily sunshine duration. day_length : Series Day lengths as calculated by ``calc_sun_times``. pot_rad_daily : Series Mean potential daily solar radiation. obs_rad_daily : Series Mean observed daily solar radiation. """ df = pd.DataFrame(data=dict(ssd=ssd, day_length=day_length, pot=pot_rad_daily, obs=obs_rad_daily)).dropna(how='any') def angstroem_opt(x, a, b): return angstroem(x[0], x[1], x[2], a, b) x = np.array([df.ssd, df.day_length, df.pot]) popt, pcov = scipy.optimize.curve_fit(angstroem_opt, x, df.obs, p0=[0.25, 0.75]) return popt
[ "def", "fit_angstroem_params", "(", "ssd", ",", "day_length", ",", "pot_rad_daily", ",", "obs_rad_daily", ")", ":", "df", "=", "pd", ".", "DataFrame", "(", "data", "=", "dict", "(", "ssd", "=", "ssd", ",", "day_length", "=", "day_length", ",", "pot", "=", "pot_rad_daily", ",", "obs", "=", "obs_rad_daily", ")", ")", ".", "dropna", "(", "how", "=", "'any'", ")", "def", "angstroem_opt", "(", "x", ",", "a", ",", "b", ")", ":", "return", "angstroem", "(", "x", "[", "0", "]", ",", "x", "[", "1", "]", ",", "x", "[", "2", "]", ",", "a", ",", "b", ")", "x", "=", "np", ".", "array", "(", "[", "df", ".", "ssd", ",", "df", ".", "day_length", ",", "df", ".", "pot", "]", ")", "popt", ",", "pcov", "=", "scipy", ".", "optimize", ".", "curve_fit", "(", "angstroem_opt", ",", "x", ",", "df", ".", "obs", ",", "p0", "=", "[", "0.25", ",", "0.75", "]", ")", "return", "popt" ]
Fit the a and b parameters for the Angstroem (1924) model using observed daily sunshine duration and mean daily (e.g. aggregated from hourly values) solar radiation. Parameters ---------- ssd : Series Observed daily sunshine duration. day_length : Series Day lengths as calculated by ``calc_sun_times``. pot_rad_daily : Series Mean potential daily solar radiation. obs_rad_daily : Series Mean observed daily solar radiation.
[ "Fit", "the", "a", "and", "b", "parameters", "for", "the", "Angstroem", "(", "1924", ")", "model", "using", "observed", "daily", "sunshine", "duration", "and", "mean", "daily", "(", "e", ".", "g", ".", "aggregated", "from", "hourly", "values", ")", "solar", "radiation", ".", "Parameters", "----------", "ssd", ":", "Series", "Observed", "daily", "sunshine", "duration", ".", "day_length", ":", "Series", "Day", "lengths", "as", "calculated", "by", "calc_sun_times", ".", "pot_rad_daily", ":", "Series", "Mean", "potential", "daily", "solar", "radiation", ".", "obs_rad_daily", ":", "Series", "Mean", "observed", "daily", "solar", "radiation", "." ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/radiation.py#L284-L312
mdredze/carmen-python
carmen/resolver.py
register
def register(name): """Return a decorator that registers the decorated class as a resolver with the given *name*.""" def decorator(class_): if name in known_resolvers: raise ValueError('duplicate resolver name "%s"' % name) known_resolvers[name] = class_ return decorator
python
def register(name): """Return a decorator that registers the decorated class as a resolver with the given *name*.""" def decorator(class_): if name in known_resolvers: raise ValueError('duplicate resolver name "%s"' % name) known_resolvers[name] = class_ return decorator
[ "def", "register", "(", "name", ")", ":", "def", "decorator", "(", "class_", ")", ":", "if", "name", "in", "known_resolvers", ":", "raise", "ValueError", "(", "'duplicate resolver name \"%s\"'", "%", "name", ")", "known_resolvers", "[", "name", "]", "=", "class_", "return", "decorator" ]
Return a decorator that registers the decorated class as a resolver with the given *name*.
[ "Return", "a", "decorator", "that", "registers", "the", "decorated", "class", "as", "a", "resolver", "with", "the", "given", "*", "name", "*", "." ]
train
https://github.com/mdredze/carmen-python/blob/070b974222b5407f7aae2518ffbdf9df198b8e96/carmen/resolver.py#L105-L112
mdredze/carmen-python
carmen/resolver.py
get_resolver
def get_resolver(order=None, options=None, modules=None): """Return a location resolver. The *order* argument, if given, should be a list of resolver names; results from resolvers named earlier in the list are preferred over later ones. For a list of built-in resolver names, see :doc:`/resolvers`. The *options* argument can be used to pass configuration options to individual resolvers, in the form of a dictionary mapping resolver names to keyword arguments:: {'geocode': {'max_distance': 50}} The *modules* argument can be used to specify a list of additional modules to look for resolvers in. See :doc:`/develop` for details. """ if not known_resolvers: from . import resolvers as carmen_resolvers modules = [carmen_resolvers] + (modules or []) for module in modules: for loader, name, _ in pkgutil.iter_modules(module.__path__): full_name = module.__name__ + '.' + name loader.find_module(full_name).load_module(full_name) if order is None: order = ('place', 'geocode', 'profile') else: order = tuple(order) if options is None: options = {} resolvers = [] for resolver_name in order: if resolver_name not in known_resolvers: raise ValueError('unknown resolver name "%s"' % resolver_name) resolvers.append(( resolver_name, known_resolvers[resolver_name](**options.get(resolver_name, {})))) return ResolverCollection(resolvers)
python
def get_resolver(order=None, options=None, modules=None): """Return a location resolver. The *order* argument, if given, should be a list of resolver names; results from resolvers named earlier in the list are preferred over later ones. For a list of built-in resolver names, see :doc:`/resolvers`. The *options* argument can be used to pass configuration options to individual resolvers, in the form of a dictionary mapping resolver names to keyword arguments:: {'geocode': {'max_distance': 50}} The *modules* argument can be used to specify a list of additional modules to look for resolvers in. See :doc:`/develop` for details. """ if not known_resolvers: from . import resolvers as carmen_resolvers modules = [carmen_resolvers] + (modules or []) for module in modules: for loader, name, _ in pkgutil.iter_modules(module.__path__): full_name = module.__name__ + '.' + name loader.find_module(full_name).load_module(full_name) if order is None: order = ('place', 'geocode', 'profile') else: order = tuple(order) if options is None: options = {} resolvers = [] for resolver_name in order: if resolver_name not in known_resolvers: raise ValueError('unknown resolver name "%s"' % resolver_name) resolvers.append(( resolver_name, known_resolvers[resolver_name](**options.get(resolver_name, {})))) return ResolverCollection(resolvers)
[ "def", "get_resolver", "(", "order", "=", "None", ",", "options", "=", "None", ",", "modules", "=", "None", ")", ":", "if", "not", "known_resolvers", ":", "from", ".", "import", "resolvers", "as", "carmen_resolvers", "modules", "=", "[", "carmen_resolvers", "]", "+", "(", "modules", "or", "[", "]", ")", "for", "module", "in", "modules", ":", "for", "loader", ",", "name", ",", "_", "in", "pkgutil", ".", "iter_modules", "(", "module", ".", "__path__", ")", ":", "full_name", "=", "module", ".", "__name__", "+", "'.'", "+", "name", "loader", ".", "find_module", "(", "full_name", ")", ".", "load_module", "(", "full_name", ")", "if", "order", "is", "None", ":", "order", "=", "(", "'place'", ",", "'geocode'", ",", "'profile'", ")", "else", ":", "order", "=", "tuple", "(", "order", ")", "if", "options", "is", "None", ":", "options", "=", "{", "}", "resolvers", "=", "[", "]", "for", "resolver_name", "in", "order", ":", "if", "resolver_name", "not", "in", "known_resolvers", ":", "raise", "ValueError", "(", "'unknown resolver name \"%s\"'", "%", "resolver_name", ")", "resolvers", ".", "append", "(", "(", "resolver_name", ",", "known_resolvers", "[", "resolver_name", "]", "(", "*", "*", "options", ".", "get", "(", "resolver_name", ",", "{", "}", ")", ")", ")", ")", "return", "ResolverCollection", "(", "resolvers", ")" ]
Return a location resolver. The *order* argument, if given, should be a list of resolver names; results from resolvers named earlier in the list are preferred over later ones. For a list of built-in resolver names, see :doc:`/resolvers`. The *options* argument can be used to pass configuration options to individual resolvers, in the form of a dictionary mapping resolver names to keyword arguments:: {'geocode': {'max_distance': 50}} The *modules* argument can be used to specify a list of additional modules to look for resolvers in. See :doc:`/develop` for details.
[ "Return", "a", "location", "resolver", ".", "The", "*", "order", "*", "argument", "if", "given", "should", "be", "a", "list", "of", "resolver", "names", ";", "results", "from", "resolvers", "named", "earlier", "in", "the", "list", "are", "preferred", "over", "later", "ones", ".", "For", "a", "list", "of", "built", "-", "in", "resolver", "names", "see", ":", "doc", ":", "/", "resolvers", ".", "The", "*", "options", "*", "argument", "can", "be", "used", "to", "pass", "configuration", "options", "to", "individual", "resolvers", "in", "the", "form", "of", "a", "dictionary", "mapping", "resolver", "names", "to", "keyword", "arguments", "::" ]
train
https://github.com/mdredze/carmen-python/blob/070b974222b5407f7aae2518ffbdf9df198b8e96/carmen/resolver.py#L115-L149
mdredze/carmen-python
carmen/resolver.py
AbstractResolver.load_locations
def load_locations(self, location_file=None): """Load locations into this resolver from the given *location_file*, which should contain one JSON object per line representing a location. If *location_file* is not specified, an internal location database is used.""" if location_file is None: contents = pkgutil.get_data(__package__, 'data/locations.json') contents_string = contents.decode("ascii") locations = contents_string.split('\n') else: from .cli import open_file with open_file(location_file, 'rb') as input: locations = input.readlines() for location_string in locations: if location_string.strip(): location = Location(known=True, **json.loads(location_string)) self.location_id_to_location[location.id] = location self.add_location(location)
python
def load_locations(self, location_file=None): """Load locations into this resolver from the given *location_file*, which should contain one JSON object per line representing a location. If *location_file* is not specified, an internal location database is used.""" if location_file is None: contents = pkgutil.get_data(__package__, 'data/locations.json') contents_string = contents.decode("ascii") locations = contents_string.split('\n') else: from .cli import open_file with open_file(location_file, 'rb') as input: locations = input.readlines() for location_string in locations: if location_string.strip(): location = Location(known=True, **json.loads(location_string)) self.location_id_to_location[location.id] = location self.add_location(location)
[ "def", "load_locations", "(", "self", ",", "location_file", "=", "None", ")", ":", "if", "location_file", "is", "None", ":", "contents", "=", "pkgutil", ".", "get_data", "(", "__package__", ",", "'data/locations.json'", ")", "contents_string", "=", "contents", ".", "decode", "(", "\"ascii\"", ")", "locations", "=", "contents_string", ".", "split", "(", "'\\n'", ")", "else", ":", "from", ".", "cli", "import", "open_file", "with", "open_file", "(", "location_file", ",", "'rb'", ")", "as", "input", ":", "locations", "=", "input", ".", "readlines", "(", ")", "for", "location_string", "in", "locations", ":", "if", "location_string", ".", "strip", "(", ")", ":", "location", "=", "Location", "(", "known", "=", "True", ",", "*", "*", "json", ".", "loads", "(", "location_string", ")", ")", "self", ".", "location_id_to_location", "[", "location", ".", "id", "]", "=", "location", "self", ".", "add_location", "(", "location", ")" ]
Load locations into this resolver from the given *location_file*, which should contain one JSON object per line representing a location. If *location_file* is not specified, an internal location database is used.
[ "Load", "locations", "into", "this", "resolver", "from", "the", "given", "*", "location_file", "*", "which", "should", "contain", "one", "JSON", "object", "per", "line", "representing", "a", "location", ".", "If", "*", "location_file", "*", "is", "not", "specified", "an", "internal", "location", "database", "is", "used", "." ]
train
https://github.com/mdredze/carmen-python/blob/070b974222b5407f7aae2518ffbdf9df198b8e96/carmen/resolver.py#L22-L40
mdredze/carmen-python
carmen/location.py
Location.canonical
def canonical(self): """Return a tuple containing a canonicalized version of this location's country, state, county, and city names.""" try: return tuple(map(lambda x: x.lower(), self.name())) except: return tuple([x.lower() for x in self.name()])
python
def canonical(self): """Return a tuple containing a canonicalized version of this location's country, state, county, and city names.""" try: return tuple(map(lambda x: x.lower(), self.name())) except: return tuple([x.lower() for x in self.name()])
[ "def", "canonical", "(", "self", ")", ":", "try", ":", "return", "tuple", "(", "map", "(", "lambda", "x", ":", "x", ".", "lower", "(", ")", ",", "self", ".", "name", "(", ")", ")", ")", "except", ":", "return", "tuple", "(", "[", "x", ".", "lower", "(", ")", "for", "x", "in", "self", ".", "name", "(", ")", "]", ")" ]
Return a tuple containing a canonicalized version of this location's country, state, county, and city names.
[ "Return", "a", "tuple", "containing", "a", "canonicalized", "version", "of", "this", "location", "s", "country", "state", "county", "and", "city", "names", "." ]
train
https://github.com/mdredze/carmen-python/blob/070b974222b5407f7aae2518ffbdf9df198b8e96/carmen/location.py#L79-L85
mdredze/carmen-python
carmen/location.py
Location.name
def name(self): """Return a tuple containing this location's country, state, county, and city names.""" try: return tuple( getattr(self, x) if getattr(self, x) else u'' for x in ('country', 'state', 'county', 'city')) except: return tuple( getattr(self, x) if getattr(self, x) else '' for x in ('country', 'state', 'county', 'city'))
python
def name(self): """Return a tuple containing this location's country, state, county, and city names.""" try: return tuple( getattr(self, x) if getattr(self, x) else u'' for x in ('country', 'state', 'county', 'city')) except: return tuple( getattr(self, x) if getattr(self, x) else '' for x in ('country', 'state', 'county', 'city'))
[ "def", "name", "(", "self", ")", ":", "try", ":", "return", "tuple", "(", "getattr", "(", "self", ",", "x", ")", "if", "getattr", "(", "self", ",", "x", ")", "else", "u''", "for", "x", "in", "(", "'country'", ",", "'state'", ",", "'county'", ",", "'city'", ")", ")", "except", ":", "return", "tuple", "(", "getattr", "(", "self", ",", "x", ")", "if", "getattr", "(", "self", ",", "x", ")", "else", "''", "for", "x", "in", "(", "'country'", ",", "'state'", ",", "'county'", ",", "'city'", ")", ")" ]
Return a tuple containing this location's country, state, county, and city names.
[ "Return", "a", "tuple", "containing", "this", "location", "s", "country", "state", "county", "and", "city", "names", "." ]
train
https://github.com/mdredze/carmen-python/blob/070b974222b5407f7aae2518ffbdf9df198b8e96/carmen/location.py#L87-L97
mdredze/carmen-python
carmen/location.py
Location.parent
def parent(self): """Return a location representing the administrative unit above the one represented by this location.""" if self.city: return Location( country=self.country, state=self.state, county=self.county) if self.county: return Location(country=self.country, state=self.state) if self.state: return Location(country=self.country) return Location()
python
def parent(self): """Return a location representing the administrative unit above the one represented by this location.""" if self.city: return Location( country=self.country, state=self.state, county=self.county) if self.county: return Location(country=self.country, state=self.state) if self.state: return Location(country=self.country) return Location()
[ "def", "parent", "(", "self", ")", ":", "if", "self", ".", "city", ":", "return", "Location", "(", "country", "=", "self", ".", "country", ",", "state", "=", "self", ".", "state", ",", "county", "=", "self", ".", "county", ")", "if", "self", ".", "county", ":", "return", "Location", "(", "country", "=", "self", ".", "country", ",", "state", "=", "self", ".", "state", ")", "if", "self", ".", "state", ":", "return", "Location", "(", "country", "=", "self", ".", "country", ")", "return", "Location", "(", ")" ]
Return a location representing the administrative unit above the one represented by this location.
[ "Return", "a", "location", "representing", "the", "administrative", "unit", "above", "the", "one", "represented", "by", "this", "location", "." ]
train
https://github.com/mdredze/carmen-python/blob/070b974222b5407f7aae2518ffbdf9df198b8e96/carmen/location.py#L99-L109
kristianfoerster/melodist
melodist/humidity.py
disaggregate_humidity
def disaggregate_humidity(data_daily, method='equal', temp=None, a0=None, a1=None, kr=None, month_hour_precip_mean=None, preserve_daily_mean=False): """general function for humidity disaggregation Args: daily_data: daily values method: keyword specifying the disaggregation method to be used temp: hourly temperature time series (necessary for some methods) kr: parameter for linear_dewpoint_variation method (6 or 12) month_hour_precip_mean: [month, hour, precip(y/n)] categorical mean values preserve_daily_mean: if True, correct the daily mean values of the disaggregated data with the observed daily means. Returns: Disaggregated hourly values of relative humidity. """ assert method in ('equal', 'minimal', 'dewpoint_regression', 'min_max', 'linear_dewpoint_variation', 'month_hour_precip_mean'), 'Invalid option' if method == 'equal': hum_disagg = melodist.distribute_equally(data_daily.hum) elif method in ('minimal', 'dewpoint_regression', 'linear_dewpoint_variation'): if method == 'minimal': a0 = 0 a1 = 1 assert a0 is not None and a1 is not None, 'a0 and a1 must be specified' tdew_daily = a0 + a1 * data_daily.tmin tdew = melodist.distribute_equally(tdew_daily) if method == 'linear_dewpoint_variation': assert kr is not None, 'kr must be specified' assert kr in (6, 12), 'kr must be 6 or 12' tdew_delta = 0.5 * np.sin((temp.index.hour + 1) * np.pi / kr - 3. * np.pi / 4.) # eq. (21) from Debele et al. (2007) tdew_nextday = tdew.shift(-24) tdew_nextday.iloc[-24:] = tdew.iloc[-24:] # copy the last day # eq. (20) from Debele et al. (2007): # (corrected - the equation is wrong both in Debele et al. (2007) and Bregaglio et al. (2010) - it should # be (T_dp,day)_(d+1) - (T_dp,day)_d instead of the other way around) tdew += temp.index.hour / 24. * (tdew_nextday - tdew) + tdew_delta sat_vap_press_tdew = util.vapor_pressure(tdew, 100) sat_vap_press_t = util.vapor_pressure(temp, 100) hum_disagg = pd.Series(index=temp.index, data=100 * sat_vap_press_tdew / sat_vap_press_t) elif method == 'min_max': assert 'hum_min' in data_daily.columns and 'hum_max' in data_daily.columns, \ 'Minimum and maximum humidity must be present in data frame' hmin = melodist.distribute_equally(data_daily.hum_min) hmax = melodist.distribute_equally(data_daily.hum_max) tmin = melodist.distribute_equally(data_daily.tmin) tmax = melodist.distribute_equally(data_daily.tmax) hum_disagg = hmax + (temp - tmin) / (tmax - tmin) * (hmin - hmax) elif method == 'month_hour_precip_mean': assert month_hour_precip_mean is not None precip_equal = melodist.distribute_equally(data_daily.precip) # daily precipitation equally distributed to hourly values hum_disagg = pd.Series(index=precip_equal.index) locs = list(zip(hum_disagg.index.month, hum_disagg.index.hour, precip_equal > 0)) hum_disagg[:] = month_hour_precip_mean.loc[locs].values if preserve_daily_mean: daily_mean_df = pd.DataFrame(data=dict(obs=data_daily.hum, disagg=hum_disagg.resample('D').mean())) bias = melodist.util.distribute_equally(daily_mean_df.disagg - daily_mean_df.obs) bias = bias.fillna(0) hum_disagg -= bias return hum_disagg.clip(0, 100)
python
def disaggregate_humidity(data_daily, method='equal', temp=None, a0=None, a1=None, kr=None, month_hour_precip_mean=None, preserve_daily_mean=False): """general function for humidity disaggregation Args: daily_data: daily values method: keyword specifying the disaggregation method to be used temp: hourly temperature time series (necessary for some methods) kr: parameter for linear_dewpoint_variation method (6 or 12) month_hour_precip_mean: [month, hour, precip(y/n)] categorical mean values preserve_daily_mean: if True, correct the daily mean values of the disaggregated data with the observed daily means. Returns: Disaggregated hourly values of relative humidity. """ assert method in ('equal', 'minimal', 'dewpoint_regression', 'min_max', 'linear_dewpoint_variation', 'month_hour_precip_mean'), 'Invalid option' if method == 'equal': hum_disagg = melodist.distribute_equally(data_daily.hum) elif method in ('minimal', 'dewpoint_regression', 'linear_dewpoint_variation'): if method == 'minimal': a0 = 0 a1 = 1 assert a0 is not None and a1 is not None, 'a0 and a1 must be specified' tdew_daily = a0 + a1 * data_daily.tmin tdew = melodist.distribute_equally(tdew_daily) if method == 'linear_dewpoint_variation': assert kr is not None, 'kr must be specified' assert kr in (6, 12), 'kr must be 6 or 12' tdew_delta = 0.5 * np.sin((temp.index.hour + 1) * np.pi / kr - 3. * np.pi / 4.) # eq. (21) from Debele et al. (2007) tdew_nextday = tdew.shift(-24) tdew_nextday.iloc[-24:] = tdew.iloc[-24:] # copy the last day # eq. (20) from Debele et al. (2007): # (corrected - the equation is wrong both in Debele et al. (2007) and Bregaglio et al. (2010) - it should # be (T_dp,day)_(d+1) - (T_dp,day)_d instead of the other way around) tdew += temp.index.hour / 24. * (tdew_nextday - tdew) + tdew_delta sat_vap_press_tdew = util.vapor_pressure(tdew, 100) sat_vap_press_t = util.vapor_pressure(temp, 100) hum_disagg = pd.Series(index=temp.index, data=100 * sat_vap_press_tdew / sat_vap_press_t) elif method == 'min_max': assert 'hum_min' in data_daily.columns and 'hum_max' in data_daily.columns, \ 'Minimum and maximum humidity must be present in data frame' hmin = melodist.distribute_equally(data_daily.hum_min) hmax = melodist.distribute_equally(data_daily.hum_max) tmin = melodist.distribute_equally(data_daily.tmin) tmax = melodist.distribute_equally(data_daily.tmax) hum_disagg = hmax + (temp - tmin) / (tmax - tmin) * (hmin - hmax) elif method == 'month_hour_precip_mean': assert month_hour_precip_mean is not None precip_equal = melodist.distribute_equally(data_daily.precip) # daily precipitation equally distributed to hourly values hum_disagg = pd.Series(index=precip_equal.index) locs = list(zip(hum_disagg.index.month, hum_disagg.index.hour, precip_equal > 0)) hum_disagg[:] = month_hour_precip_mean.loc[locs].values if preserve_daily_mean: daily_mean_df = pd.DataFrame(data=dict(obs=data_daily.hum, disagg=hum_disagg.resample('D').mean())) bias = melodist.util.distribute_equally(daily_mean_df.disagg - daily_mean_df.obs) bias = bias.fillna(0) hum_disagg -= bias return hum_disagg.clip(0, 100)
[ "def", "disaggregate_humidity", "(", "data_daily", ",", "method", "=", "'equal'", ",", "temp", "=", "None", ",", "a0", "=", "None", ",", "a1", "=", "None", ",", "kr", "=", "None", ",", "month_hour_precip_mean", "=", "None", ",", "preserve_daily_mean", "=", "False", ")", ":", "assert", "method", "in", "(", "'equal'", ",", "'minimal'", ",", "'dewpoint_regression'", ",", "'min_max'", ",", "'linear_dewpoint_variation'", ",", "'month_hour_precip_mean'", ")", ",", "'Invalid option'", "if", "method", "==", "'equal'", ":", "hum_disagg", "=", "melodist", ".", "distribute_equally", "(", "data_daily", ".", "hum", ")", "elif", "method", "in", "(", "'minimal'", ",", "'dewpoint_regression'", ",", "'linear_dewpoint_variation'", ")", ":", "if", "method", "==", "'minimal'", ":", "a0", "=", "0", "a1", "=", "1", "assert", "a0", "is", "not", "None", "and", "a1", "is", "not", "None", ",", "'a0 and a1 must be specified'", "tdew_daily", "=", "a0", "+", "a1", "*", "data_daily", ".", "tmin", "tdew", "=", "melodist", ".", "distribute_equally", "(", "tdew_daily", ")", "if", "method", "==", "'linear_dewpoint_variation'", ":", "assert", "kr", "is", "not", "None", ",", "'kr must be specified'", "assert", "kr", "in", "(", "6", ",", "12", ")", ",", "'kr must be 6 or 12'", "tdew_delta", "=", "0.5", "*", "np", ".", "sin", "(", "(", "temp", ".", "index", ".", "hour", "+", "1", ")", "*", "np", ".", "pi", "/", "kr", "-", "3.", "*", "np", ".", "pi", "/", "4.", ")", "# eq. (21) from Debele et al. (2007)", "tdew_nextday", "=", "tdew", ".", "shift", "(", "-", "24", ")", "tdew_nextday", ".", "iloc", "[", "-", "24", ":", "]", "=", "tdew", ".", "iloc", "[", "-", "24", ":", "]", "# copy the last day", "# eq. (20) from Debele et al. (2007):", "# (corrected - the equation is wrong both in Debele et al. (2007) and Bregaglio et al. (2010) - it should", "# be (T_dp,day)_(d+1) - (T_dp,day)_d instead of the other way around)", "tdew", "+=", "temp", ".", "index", ".", "hour", "/", "24.", "*", "(", "tdew_nextday", "-", "tdew", ")", "+", "tdew_delta", "sat_vap_press_tdew", "=", "util", ".", "vapor_pressure", "(", "tdew", ",", "100", ")", "sat_vap_press_t", "=", "util", ".", "vapor_pressure", "(", "temp", ",", "100", ")", "hum_disagg", "=", "pd", ".", "Series", "(", "index", "=", "temp", ".", "index", ",", "data", "=", "100", "*", "sat_vap_press_tdew", "/", "sat_vap_press_t", ")", "elif", "method", "==", "'min_max'", ":", "assert", "'hum_min'", "in", "data_daily", ".", "columns", "and", "'hum_max'", "in", "data_daily", ".", "columns", ",", "'Minimum and maximum humidity must be present in data frame'", "hmin", "=", "melodist", ".", "distribute_equally", "(", "data_daily", ".", "hum_min", ")", "hmax", "=", "melodist", ".", "distribute_equally", "(", "data_daily", ".", "hum_max", ")", "tmin", "=", "melodist", ".", "distribute_equally", "(", "data_daily", ".", "tmin", ")", "tmax", "=", "melodist", ".", "distribute_equally", "(", "data_daily", ".", "tmax", ")", "hum_disagg", "=", "hmax", "+", "(", "temp", "-", "tmin", ")", "/", "(", "tmax", "-", "tmin", ")", "*", "(", "hmin", "-", "hmax", ")", "elif", "method", "==", "'month_hour_precip_mean'", ":", "assert", "month_hour_precip_mean", "is", "not", "None", "precip_equal", "=", "melodist", ".", "distribute_equally", "(", "data_daily", ".", "precip", ")", "# daily precipitation equally distributed to hourly values", "hum_disagg", "=", "pd", ".", "Series", "(", "index", "=", "precip_equal", ".", "index", ")", "locs", "=", "list", "(", "zip", "(", "hum_disagg", ".", "index", ".", "month", ",", "hum_disagg", ".", "index", ".", "hour", ",", "precip_equal", ">", "0", ")", ")", "hum_disagg", "[", ":", "]", "=", "month_hour_precip_mean", ".", "loc", "[", "locs", "]", ".", "values", "if", "preserve_daily_mean", ":", "daily_mean_df", "=", "pd", ".", "DataFrame", "(", "data", "=", "dict", "(", "obs", "=", "data_daily", ".", "hum", ",", "disagg", "=", "hum_disagg", ".", "resample", "(", "'D'", ")", ".", "mean", "(", ")", ")", ")", "bias", "=", "melodist", ".", "util", ".", "distribute_equally", "(", "daily_mean_df", ".", "disagg", "-", "daily_mean_df", ".", "obs", ")", "bias", "=", "bias", ".", "fillna", "(", "0", ")", "hum_disagg", "-=", "bias", "return", "hum_disagg", ".", "clip", "(", "0", ",", "100", ")" ]
general function for humidity disaggregation Args: daily_data: daily values method: keyword specifying the disaggregation method to be used temp: hourly temperature time series (necessary for some methods) kr: parameter for linear_dewpoint_variation method (6 or 12) month_hour_precip_mean: [month, hour, precip(y/n)] categorical mean values preserve_daily_mean: if True, correct the daily mean values of the disaggregated data with the observed daily means. Returns: Disaggregated hourly values of relative humidity.
[ "general", "function", "for", "humidity", "disaggregation" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/humidity.py#L33-L109
kristianfoerster/melodist
melodist/wind.py
_cosine_function
def _cosine_function(x, a, b, t_shift): """genrates a diurnal course of windspeed accroding to the cosine function Args: x: series of euqally distributed windspeed values a: parameter a for the cosine function b: parameter b for the cosine function t_shift: parameter t_shift for the cosine function Returns: series including diurnal course of windspeed. """ mean_wind, t = x return a * mean_wind * np.cos(np.pi * (t - t_shift) / 12) + b * mean_wind
python
def _cosine_function(x, a, b, t_shift): """genrates a diurnal course of windspeed accroding to the cosine function Args: x: series of euqally distributed windspeed values a: parameter a for the cosine function b: parameter b for the cosine function t_shift: parameter t_shift for the cosine function Returns: series including diurnal course of windspeed. """ mean_wind, t = x return a * mean_wind * np.cos(np.pi * (t - t_shift) / 12) + b * mean_wind
[ "def", "_cosine_function", "(", "x", ",", "a", ",", "b", ",", "t_shift", ")", ":", "mean_wind", ",", "t", "=", "x", "return", "a", "*", "mean_wind", "*", "np", ".", "cos", "(", "np", ".", "pi", "*", "(", "t", "-", "t_shift", ")", "/", "12", ")", "+", "b", "*", "mean_wind" ]
genrates a diurnal course of windspeed accroding to the cosine function Args: x: series of euqally distributed windspeed values a: parameter a for the cosine function b: parameter b for the cosine function t_shift: parameter t_shift for the cosine function Returns: series including diurnal course of windspeed.
[ "genrates", "a", "diurnal", "course", "of", "windspeed", "accroding", "to", "the", "cosine", "function" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/wind.py#L33-L47
kristianfoerster/melodist
melodist/wind.py
disaggregate_wind
def disaggregate_wind(wind_daily, method='equal', a=None, b=None, t_shift=None): """general function for windspeed disaggregation Args: wind_daily: daily values method: keyword specifying the disaggregation method to be used a: parameter a for the cosine function b: parameter b for the cosine function t_shift: parameter t_shift for the cosine function Returns: Disaggregated hourly values of windspeed. """ assert method in ('equal', 'cosine', 'random'), 'Invalid method' wind_eq = melodist.distribute_equally(wind_daily) if method == 'equal': wind_disagg = wind_eq elif method == 'cosine': assert None not in (a, b, t_shift) wind_disagg = _cosine_function(np.array([wind_eq.values, wind_eq.index.hour]), a, b, t_shift) elif method == 'random': wind_disagg = wind_eq * (-np.log(np.random.rand(len(wind_eq))))**0.3 return wind_disagg
python
def disaggregate_wind(wind_daily, method='equal', a=None, b=None, t_shift=None): """general function for windspeed disaggregation Args: wind_daily: daily values method: keyword specifying the disaggregation method to be used a: parameter a for the cosine function b: parameter b for the cosine function t_shift: parameter t_shift for the cosine function Returns: Disaggregated hourly values of windspeed. """ assert method in ('equal', 'cosine', 'random'), 'Invalid method' wind_eq = melodist.distribute_equally(wind_daily) if method == 'equal': wind_disagg = wind_eq elif method == 'cosine': assert None not in (a, b, t_shift) wind_disagg = _cosine_function(np.array([wind_eq.values, wind_eq.index.hour]), a, b, t_shift) elif method == 'random': wind_disagg = wind_eq * (-np.log(np.random.rand(len(wind_eq))))**0.3 return wind_disagg
[ "def", "disaggregate_wind", "(", "wind_daily", ",", "method", "=", "'equal'", ",", "a", "=", "None", ",", "b", "=", "None", ",", "t_shift", "=", "None", ")", ":", "assert", "method", "in", "(", "'equal'", ",", "'cosine'", ",", "'random'", ")", ",", "'Invalid method'", "wind_eq", "=", "melodist", ".", "distribute_equally", "(", "wind_daily", ")", "if", "method", "==", "'equal'", ":", "wind_disagg", "=", "wind_eq", "elif", "method", "==", "'cosine'", ":", "assert", "None", "not", "in", "(", "a", ",", "b", ",", "t_shift", ")", "wind_disagg", "=", "_cosine_function", "(", "np", ".", "array", "(", "[", "wind_eq", ".", "values", ",", "wind_eq", ".", "index", ".", "hour", "]", ")", ",", "a", ",", "b", ",", "t_shift", ")", "elif", "method", "==", "'random'", ":", "wind_disagg", "=", "wind_eq", "*", "(", "-", "np", ".", "log", "(", "np", ".", "random", ".", "rand", "(", "len", "(", "wind_eq", ")", ")", ")", ")", "**", "0.3", "return", "wind_disagg" ]
general function for windspeed disaggregation Args: wind_daily: daily values method: keyword specifying the disaggregation method to be used a: parameter a for the cosine function b: parameter b for the cosine function t_shift: parameter t_shift for the cosine function Returns: Disaggregated hourly values of windspeed.
[ "general", "function", "for", "windspeed", "disaggregation" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/wind.py#L50-L75
kristianfoerster/melodist
melodist/wind.py
fit_cosine_function
def fit_cosine_function(wind): """fits a cosine function to observed hourly windspeed data Args: wind: observed hourly windspeed data Returns: parameters needed to generate diurnal features of windspeed using a cosine function """ wind_daily = wind.groupby(wind.index.date).mean() wind_daily_hourly = pd.Series(index=wind.index, data=wind_daily.loc[wind.index.date].values) # daily values evenly distributed over the hours df = pd.DataFrame(data=dict(daily=wind_daily_hourly, hourly=wind)).dropna(how='any') x = np.array([df.daily, df.index.hour]) popt, pcov = scipy.optimize.curve_fit(_cosine_function, x, df.hourly) return popt
python
def fit_cosine_function(wind): """fits a cosine function to observed hourly windspeed data Args: wind: observed hourly windspeed data Returns: parameters needed to generate diurnal features of windspeed using a cosine function """ wind_daily = wind.groupby(wind.index.date).mean() wind_daily_hourly = pd.Series(index=wind.index, data=wind_daily.loc[wind.index.date].values) # daily values evenly distributed over the hours df = pd.DataFrame(data=dict(daily=wind_daily_hourly, hourly=wind)).dropna(how='any') x = np.array([df.daily, df.index.hour]) popt, pcov = scipy.optimize.curve_fit(_cosine_function, x, df.hourly) return popt
[ "def", "fit_cosine_function", "(", "wind", ")", ":", "wind_daily", "=", "wind", ".", "groupby", "(", "wind", ".", "index", ".", "date", ")", ".", "mean", "(", ")", "wind_daily_hourly", "=", "pd", ".", "Series", "(", "index", "=", "wind", ".", "index", ",", "data", "=", "wind_daily", ".", "loc", "[", "wind", ".", "index", ".", "date", "]", ".", "values", ")", "# daily values evenly distributed over the hours", "df", "=", "pd", ".", "DataFrame", "(", "data", "=", "dict", "(", "daily", "=", "wind_daily_hourly", ",", "hourly", "=", "wind", ")", ")", ".", "dropna", "(", "how", "=", "'any'", ")", "x", "=", "np", ".", "array", "(", "[", "df", ".", "daily", ",", "df", ".", "index", ".", "hour", "]", ")", "popt", ",", "pcov", "=", "scipy", ".", "optimize", ".", "curve_fit", "(", "_cosine_function", ",", "x", ",", "df", ".", "hourly", ")", "return", "popt" ]
fits a cosine function to observed hourly windspeed data Args: wind: observed hourly windspeed data Returns: parameters needed to generate diurnal features of windspeed using a cosine function
[ "fits", "a", "cosine", "function", "to", "observed", "hourly", "windspeed", "data" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/wind.py#L78-L94
kristianfoerster/melodist
melodist/data_io.py
read_smet
def read_smet(filename, mode): """Reads smet data and returns the data in required dataformat (pd df) See https://models.slf.ch/docserver/meteoio/SMET_specifications.pdf for further details on the specifications of this file format. Parameters ---- filename : SMET file to read mode : "d" for daily and "h" for hourly input Returns ---- [header, data] header: header as dict data : data as pd df """ # dictionary # based on smet spec V.1.1 and self defined # daily data dict_d = {'TA': 'tmean', 'TMAX': 'tmax', # no spec 'TMIN': 'tmin', # no spec 'PSUM': 'precip', 'ISWR': 'glob', # no spec 'RH': 'hum', 'VW': 'wind'} # hourly data dict_h = {'TA': 'temp', 'PSUM': 'precip', 'ISWR': 'glob', # no spec 'RH': 'hum', 'VW': 'wind'} with open(filename) as f: in_header = False data_start = None header = collections.OrderedDict() for line_num, line in enumerate(f): if line.strip() == '[HEADER]': in_header = True continue elif line.strip() == '[DATA]': data_start = line_num + 1 break if in_header: line_split = line.split('=') k = line_split[0].strip() v = line_split[1].strip() header[k] = v # get column names columns = header['fields'].split() multiplier = [float(x) for x in header['units_multiplier'].split()][1:] data = pd.read_table( filename, sep=r'\s+', na_values=[-999], skiprows=data_start, names=columns, index_col='timestamp', parse_dates=True, ) data = data*multiplier del data.index.name # rename columns if mode == "d": data = data.rename(columns=dict_d) if mode == "h": data = data.rename(columns=dict_h) return header, data
python
def read_smet(filename, mode): """Reads smet data and returns the data in required dataformat (pd df) See https://models.slf.ch/docserver/meteoio/SMET_specifications.pdf for further details on the specifications of this file format. Parameters ---- filename : SMET file to read mode : "d" for daily and "h" for hourly input Returns ---- [header, data] header: header as dict data : data as pd df """ # dictionary # based on smet spec V.1.1 and self defined # daily data dict_d = {'TA': 'tmean', 'TMAX': 'tmax', # no spec 'TMIN': 'tmin', # no spec 'PSUM': 'precip', 'ISWR': 'glob', # no spec 'RH': 'hum', 'VW': 'wind'} # hourly data dict_h = {'TA': 'temp', 'PSUM': 'precip', 'ISWR': 'glob', # no spec 'RH': 'hum', 'VW': 'wind'} with open(filename) as f: in_header = False data_start = None header = collections.OrderedDict() for line_num, line in enumerate(f): if line.strip() == '[HEADER]': in_header = True continue elif line.strip() == '[DATA]': data_start = line_num + 1 break if in_header: line_split = line.split('=') k = line_split[0].strip() v = line_split[1].strip() header[k] = v # get column names columns = header['fields'].split() multiplier = [float(x) for x in header['units_multiplier'].split()][1:] data = pd.read_table( filename, sep=r'\s+', na_values=[-999], skiprows=data_start, names=columns, index_col='timestamp', parse_dates=True, ) data = data*multiplier del data.index.name # rename columns if mode == "d": data = data.rename(columns=dict_d) if mode == "h": data = data.rename(columns=dict_h) return header, data
[ "def", "read_smet", "(", "filename", ",", "mode", ")", ":", "# dictionary", "# based on smet spec V.1.1 and self defined", "# daily data", "dict_d", "=", "{", "'TA'", ":", "'tmean'", ",", "'TMAX'", ":", "'tmax'", ",", "# no spec", "'TMIN'", ":", "'tmin'", ",", "# no spec", "'PSUM'", ":", "'precip'", ",", "'ISWR'", ":", "'glob'", ",", "# no spec", "'RH'", ":", "'hum'", ",", "'VW'", ":", "'wind'", "}", "# hourly data", "dict_h", "=", "{", "'TA'", ":", "'temp'", ",", "'PSUM'", ":", "'precip'", ",", "'ISWR'", ":", "'glob'", ",", "# no spec", "'RH'", ":", "'hum'", ",", "'VW'", ":", "'wind'", "}", "with", "open", "(", "filename", ")", "as", "f", ":", "in_header", "=", "False", "data_start", "=", "None", "header", "=", "collections", ".", "OrderedDict", "(", ")", "for", "line_num", ",", "line", "in", "enumerate", "(", "f", ")", ":", "if", "line", ".", "strip", "(", ")", "==", "'[HEADER]'", ":", "in_header", "=", "True", "continue", "elif", "line", ".", "strip", "(", ")", "==", "'[DATA]'", ":", "data_start", "=", "line_num", "+", "1", "break", "if", "in_header", ":", "line_split", "=", "line", ".", "split", "(", "'='", ")", "k", "=", "line_split", "[", "0", "]", ".", "strip", "(", ")", "v", "=", "line_split", "[", "1", "]", ".", "strip", "(", ")", "header", "[", "k", "]", "=", "v", "# get column names", "columns", "=", "header", "[", "'fields'", "]", ".", "split", "(", ")", "multiplier", "=", "[", "float", "(", "x", ")", "for", "x", "in", "header", "[", "'units_multiplier'", "]", ".", "split", "(", ")", "]", "[", "1", ":", "]", "data", "=", "pd", ".", "read_table", "(", "filename", ",", "sep", "=", "r'\\s+'", ",", "na_values", "=", "[", "-", "999", "]", ",", "skiprows", "=", "data_start", ",", "names", "=", "columns", ",", "index_col", "=", "'timestamp'", ",", "parse_dates", "=", "True", ",", ")", "data", "=", "data", "*", "multiplier", "del", "data", ".", "index", ".", "name", "# rename columns", "if", "mode", "==", "\"d\"", ":", "data", "=", "data", ".", "rename", "(", "columns", "=", "dict_d", ")", "if", "mode", "==", "\"h\"", ":", "data", "=", "data", ".", "rename", "(", "columns", "=", "dict_h", ")", "return", "header", ",", "data" ]
Reads smet data and returns the data in required dataformat (pd df) See https://models.slf.ch/docserver/meteoio/SMET_specifications.pdf for further details on the specifications of this file format. Parameters ---- filename : SMET file to read mode : "d" for daily and "h" for hourly input Returns ---- [header, data] header: header as dict data : data as pd df
[ "Reads", "smet", "data", "and", "returns", "the", "data", "in", "required", "dataformat", "(", "pd", "df", ")" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/data_io.py#L33-L113
kristianfoerster/melodist
melodist/data_io.py
read_dwd
def read_dwd(filename, metadata, mode="d", skip_last=True): """Reads dwd (German Weather Service) data and returns the data in required dataformat (pd df) Parameters ---- filename : DWD file to read (full path) / list of hourly files (RR+TU+FF) metadata : corresponding DWD metadata file to read mode : "d" for daily and "h" for hourly input skip_last : boolen, skips last line due to file format Returns ---- [header, data] header: header as dict data : data as pd df """ def read_single_dwd(filename, metadata, mode, skip_last): # Param names {'DWD':'dissag_def'} dict_d = {'LUFTTEMPERATUR': 'tmean', 'LUFTTEMPERATUR_MINIMUM': 'tmin', # no spec 'LUFTTEMPERATUR_MAXIMUM': 'tmax', # no spec 'NIEDERSCHLAGSHOEHE': 'precip', 'GLOBAL_KW_J': 'glob', # no spec 'REL_FEUCHTE': 'hum', 'WINDGESCHWINDIGKEIT': 'wind', 'SONNENSCHEINDAUER': 'sun_h'} # ---read meta------------------ meta = pd.read_csv( metadata, sep=';' ) # remove whitespace from header columns meta.rename(columns=lambda x: x.strip(), inplace=True) header = {"Stations_id": meta.Stations_id[meta.last_valid_index()], "Stationsname": meta.Stationsname[meta.last_valid_index()], # workaround for colnames with . (Geogr.Breite) "Breite": meta.iloc[meta.last_valid_index(), 2], # DezDeg "Laenge": meta.iloc[meta.last_valid_index(), 3] # DezDeg } # ---read data------------------ if skip_last is not None: num_lines = sum(1 for line in open(filename)) skip_last = [num_lines-1] # hourly data must be parsed by custom definition if mode == "d": data = pd.read_csv( filename, sep=';', na_values='-999', index_col=' MESS_DATUM', parse_dates=True, skiprows=skip_last ) # hourly data must be parsed by custom definition if mode == "h": def date_parser(date_time): hour = date_time[8:10] day = date_time[6:8] month = date_time[4:6] year = date_time[0:4] minute = '00' sec = '00' return pd.Timestamp('%s-%s-%s %s:%s:%s' % (year, month, day, hour, minute, sec)) data = pd.read_csv( filename, sep=';', na_values='-999', index_col=' MESS_DATUM', date_parser=date_parser, skiprows=skip_last ) # remove whitespace from header columns data.rename(columns=lambda x: x.strip(), inplace=True) # rename to dissag definition data = data.rename(columns=dict_d) # get colums which are not defined drop = [col for col in data.columns if col not in dict_d.values()] # delete columns data = data.drop(drop, axis=1) # convert temperatures to Kelvin (+273.15) if 'tmin' in data.columns: data["tmin"] = data["tmin"] + 273.15 if 'tmax' in data.columns: data["tmax"] = data["tmax"] + 273.15 if 'tmean' in data.columns: data["tmean"] = data["tmean"] + 273.15 if 'temp' in data.columns: data["temp"] = data["temp"] + 273.15 return header, data if type(filename) == list: i = 1 for file in filename: header, data_h = read_single_dwd(file, metadata, mode, skip_last) if i == 1: data = data_h else: data = data.join(data_h, how='outer') i += 1 else: header, data = read_single_dwd(filename, metadata, mode, skip_last) return header, data
python
def read_dwd(filename, metadata, mode="d", skip_last=True): """Reads dwd (German Weather Service) data and returns the data in required dataformat (pd df) Parameters ---- filename : DWD file to read (full path) / list of hourly files (RR+TU+FF) metadata : corresponding DWD metadata file to read mode : "d" for daily and "h" for hourly input skip_last : boolen, skips last line due to file format Returns ---- [header, data] header: header as dict data : data as pd df """ def read_single_dwd(filename, metadata, mode, skip_last): # Param names {'DWD':'dissag_def'} dict_d = {'LUFTTEMPERATUR': 'tmean', 'LUFTTEMPERATUR_MINIMUM': 'tmin', # no spec 'LUFTTEMPERATUR_MAXIMUM': 'tmax', # no spec 'NIEDERSCHLAGSHOEHE': 'precip', 'GLOBAL_KW_J': 'glob', # no spec 'REL_FEUCHTE': 'hum', 'WINDGESCHWINDIGKEIT': 'wind', 'SONNENSCHEINDAUER': 'sun_h'} # ---read meta------------------ meta = pd.read_csv( metadata, sep=';' ) # remove whitespace from header columns meta.rename(columns=lambda x: x.strip(), inplace=True) header = {"Stations_id": meta.Stations_id[meta.last_valid_index()], "Stationsname": meta.Stationsname[meta.last_valid_index()], # workaround for colnames with . (Geogr.Breite) "Breite": meta.iloc[meta.last_valid_index(), 2], # DezDeg "Laenge": meta.iloc[meta.last_valid_index(), 3] # DezDeg } # ---read data------------------ if skip_last is not None: num_lines = sum(1 for line in open(filename)) skip_last = [num_lines-1] # hourly data must be parsed by custom definition if mode == "d": data = pd.read_csv( filename, sep=';', na_values='-999', index_col=' MESS_DATUM', parse_dates=True, skiprows=skip_last ) # hourly data must be parsed by custom definition if mode == "h": def date_parser(date_time): hour = date_time[8:10] day = date_time[6:8] month = date_time[4:6] year = date_time[0:4] minute = '00' sec = '00' return pd.Timestamp('%s-%s-%s %s:%s:%s' % (year, month, day, hour, minute, sec)) data = pd.read_csv( filename, sep=';', na_values='-999', index_col=' MESS_DATUM', date_parser=date_parser, skiprows=skip_last ) # remove whitespace from header columns data.rename(columns=lambda x: x.strip(), inplace=True) # rename to dissag definition data = data.rename(columns=dict_d) # get colums which are not defined drop = [col for col in data.columns if col not in dict_d.values()] # delete columns data = data.drop(drop, axis=1) # convert temperatures to Kelvin (+273.15) if 'tmin' in data.columns: data["tmin"] = data["tmin"] + 273.15 if 'tmax' in data.columns: data["tmax"] = data["tmax"] + 273.15 if 'tmean' in data.columns: data["tmean"] = data["tmean"] + 273.15 if 'temp' in data.columns: data["temp"] = data["temp"] + 273.15 return header, data if type(filename) == list: i = 1 for file in filename: header, data_h = read_single_dwd(file, metadata, mode, skip_last) if i == 1: data = data_h else: data = data.join(data_h, how='outer') i += 1 else: header, data = read_single_dwd(filename, metadata, mode, skip_last) return header, data
[ "def", "read_dwd", "(", "filename", ",", "metadata", ",", "mode", "=", "\"d\"", ",", "skip_last", "=", "True", ")", ":", "def", "read_single_dwd", "(", "filename", ",", "metadata", ",", "mode", ",", "skip_last", ")", ":", "# Param names {'DWD':'dissag_def'}", "dict_d", "=", "{", "'LUFTTEMPERATUR'", ":", "'tmean'", ",", "'LUFTTEMPERATUR_MINIMUM'", ":", "'tmin'", ",", "# no spec", "'LUFTTEMPERATUR_MAXIMUM'", ":", "'tmax'", ",", "# no spec", "'NIEDERSCHLAGSHOEHE'", ":", "'precip'", ",", "'GLOBAL_KW_J'", ":", "'glob'", ",", "# no spec", "'REL_FEUCHTE'", ":", "'hum'", ",", "'WINDGESCHWINDIGKEIT'", ":", "'wind'", ",", "'SONNENSCHEINDAUER'", ":", "'sun_h'", "}", "# ---read meta------------------", "meta", "=", "pd", ".", "read_csv", "(", "metadata", ",", "sep", "=", "';'", ")", "# remove whitespace from header columns", "meta", ".", "rename", "(", "columns", "=", "lambda", "x", ":", "x", ".", "strip", "(", ")", ",", "inplace", "=", "True", ")", "header", "=", "{", "\"Stations_id\"", ":", "meta", ".", "Stations_id", "[", "meta", ".", "last_valid_index", "(", ")", "]", ",", "\"Stationsname\"", ":", "meta", ".", "Stationsname", "[", "meta", ".", "last_valid_index", "(", ")", "]", ",", "# workaround for colnames with . (Geogr.Breite)", "\"Breite\"", ":", "meta", ".", "iloc", "[", "meta", ".", "last_valid_index", "(", ")", ",", "2", "]", ",", "# DezDeg", "\"Laenge\"", ":", "meta", ".", "iloc", "[", "meta", ".", "last_valid_index", "(", ")", ",", "3", "]", "# DezDeg", "}", "# ---read data------------------", "if", "skip_last", "is", "not", "None", ":", "num_lines", "=", "sum", "(", "1", "for", "line", "in", "open", "(", "filename", ")", ")", "skip_last", "=", "[", "num_lines", "-", "1", "]", "# hourly data must be parsed by custom definition", "if", "mode", "==", "\"d\"", ":", "data", "=", "pd", ".", "read_csv", "(", "filename", ",", "sep", "=", "';'", ",", "na_values", "=", "'-999'", ",", "index_col", "=", "' MESS_DATUM'", ",", "parse_dates", "=", "True", ",", "skiprows", "=", "skip_last", ")", "# hourly data must be parsed by custom definition", "if", "mode", "==", "\"h\"", ":", "def", "date_parser", "(", "date_time", ")", ":", "hour", "=", "date_time", "[", "8", ":", "10", "]", "day", "=", "date_time", "[", "6", ":", "8", "]", "month", "=", "date_time", "[", "4", ":", "6", "]", "year", "=", "date_time", "[", "0", ":", "4", "]", "minute", "=", "'00'", "sec", "=", "'00'", "return", "pd", ".", "Timestamp", "(", "'%s-%s-%s %s:%s:%s'", "%", "(", "year", ",", "month", ",", "day", ",", "hour", ",", "minute", ",", "sec", ")", ")", "data", "=", "pd", ".", "read_csv", "(", "filename", ",", "sep", "=", "';'", ",", "na_values", "=", "'-999'", ",", "index_col", "=", "' MESS_DATUM'", ",", "date_parser", "=", "date_parser", ",", "skiprows", "=", "skip_last", ")", "# remove whitespace from header columns", "data", ".", "rename", "(", "columns", "=", "lambda", "x", ":", "x", ".", "strip", "(", ")", ",", "inplace", "=", "True", ")", "# rename to dissag definition", "data", "=", "data", ".", "rename", "(", "columns", "=", "dict_d", ")", "# get colums which are not defined", "drop", "=", "[", "col", "for", "col", "in", "data", ".", "columns", "if", "col", "not", "in", "dict_d", ".", "values", "(", ")", "]", "# delete columns", "data", "=", "data", ".", "drop", "(", "drop", ",", "axis", "=", "1", ")", "# convert temperatures to Kelvin (+273.15)", "if", "'tmin'", "in", "data", ".", "columns", ":", "data", "[", "\"tmin\"", "]", "=", "data", "[", "\"tmin\"", "]", "+", "273.15", "if", "'tmax'", "in", "data", ".", "columns", ":", "data", "[", "\"tmax\"", "]", "=", "data", "[", "\"tmax\"", "]", "+", "273.15", "if", "'tmean'", "in", "data", ".", "columns", ":", "data", "[", "\"tmean\"", "]", "=", "data", "[", "\"tmean\"", "]", "+", "273.15", "if", "'temp'", "in", "data", ".", "columns", ":", "data", "[", "\"temp\"", "]", "=", "data", "[", "\"temp\"", "]", "+", "273.15", "return", "header", ",", "data", "if", "type", "(", "filename", ")", "==", "list", ":", "i", "=", "1", "for", "file", "in", "filename", ":", "header", ",", "data_h", "=", "read_single_dwd", "(", "file", ",", "metadata", ",", "mode", ",", "skip_last", ")", "if", "i", "==", "1", ":", "data", "=", "data_h", "else", ":", "data", "=", "data", ".", "join", "(", "data_h", ",", "how", "=", "'outer'", ")", "i", "+=", "1", "else", ":", "header", ",", "data", "=", "read_single_dwd", "(", "filename", ",", "metadata", ",", "mode", ",", "skip_last", ")", "return", "header", ",", "data" ]
Reads dwd (German Weather Service) data and returns the data in required dataformat (pd df) Parameters ---- filename : DWD file to read (full path) / list of hourly files (RR+TU+FF) metadata : corresponding DWD metadata file to read mode : "d" for daily and "h" for hourly input skip_last : boolen, skips last line due to file format Returns ---- [header, data] header: header as dict data : data as pd df
[ "Reads", "dwd", "(", "German", "Weather", "Service", ")", "data", "and", "returns", "the", "data", "in", "required", "dataformat", "(", "pd", "df", ")" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/data_io.py#L116-L233
kristianfoerster/melodist
melodist/data_io.py
write_smet
def write_smet(filename, data, metadata, nodata_value=-999, mode='h', check_nan=True): """writes smet files Parameters ---- filename : filename/loction of output data : data to write as pandas df metadata: header to write input as dict nodata_value: Nodata Value to write/use mode: defines if to write daily ("d") or continuos data (default 'h') check_nan: will check if only nans in data and if true will not write this colums (default True) """ # dictionary # based on smet spec V.1.1 and selfdefined # daily data dict_d= {'tmean':'TA', 'tmin':'TMAX', #no spec 'tmax':'TMIN', #no spec 'precip':'PSUM', 'glob':'ISWR', #no spec 'hum':'RH', 'wind':'VW' } #hourly data dict_h= {'temp':'TA', 'precip':'PSUM', 'glob':'ISWR', #no spec 'hum':'RH', 'wind':'VW' } #rename columns if mode == "d": data = data.rename(columns=dict_d) if mode == "h": data = data.rename(columns=dict_h) if check_nan: #get all colums with data datas_in = data.sum().dropna().to_frame().T #get colums with no datas drop = [data_nan for data_nan in data.columns if data_nan not in datas_in] #delete columns data = data.drop(drop, axis=1) with open(filename, 'w') as f: #preparing data #converte date_times to SMET timestamps if mode == "d": t = '%Y-%m-%dT00:00' if mode == "h": t = '%Y-%m-%dT%H:%M' data['timestamp'] = [d.strftime(t) for d in data.index] cols = data.columns.tolist() cols = cols[-1:] + cols[:-1] data = data[cols] #metadatas update metadata['fields'] = ' '.join(data.columns) metadata["units_multiplier"] = len(metadata['fields'].split())*"1 " #writing data #metadata f.write('SMET 1.1 ASCII\n') f.write('[HEADER]\n') for k, v in metadata.items(): f.write('{} = {}\n'.format(k, v)) #data f.write('[DATA]\n') data_str = data.fillna(nodata_value).to_string( header=False, index=False, float_format=lambda x: '{:.2f}'.format(x), ) f.write(data_str)
python
def write_smet(filename, data, metadata, nodata_value=-999, mode='h', check_nan=True): """writes smet files Parameters ---- filename : filename/loction of output data : data to write as pandas df metadata: header to write input as dict nodata_value: Nodata Value to write/use mode: defines if to write daily ("d") or continuos data (default 'h') check_nan: will check if only nans in data and if true will not write this colums (default True) """ # dictionary # based on smet spec V.1.1 and selfdefined # daily data dict_d= {'tmean':'TA', 'tmin':'TMAX', #no spec 'tmax':'TMIN', #no spec 'precip':'PSUM', 'glob':'ISWR', #no spec 'hum':'RH', 'wind':'VW' } #hourly data dict_h= {'temp':'TA', 'precip':'PSUM', 'glob':'ISWR', #no spec 'hum':'RH', 'wind':'VW' } #rename columns if mode == "d": data = data.rename(columns=dict_d) if mode == "h": data = data.rename(columns=dict_h) if check_nan: #get all colums with data datas_in = data.sum().dropna().to_frame().T #get colums with no datas drop = [data_nan for data_nan in data.columns if data_nan not in datas_in] #delete columns data = data.drop(drop, axis=1) with open(filename, 'w') as f: #preparing data #converte date_times to SMET timestamps if mode == "d": t = '%Y-%m-%dT00:00' if mode == "h": t = '%Y-%m-%dT%H:%M' data['timestamp'] = [d.strftime(t) for d in data.index] cols = data.columns.tolist() cols = cols[-1:] + cols[:-1] data = data[cols] #metadatas update metadata['fields'] = ' '.join(data.columns) metadata["units_multiplier"] = len(metadata['fields'].split())*"1 " #writing data #metadata f.write('SMET 1.1 ASCII\n') f.write('[HEADER]\n') for k, v in metadata.items(): f.write('{} = {}\n'.format(k, v)) #data f.write('[DATA]\n') data_str = data.fillna(nodata_value).to_string( header=False, index=False, float_format=lambda x: '{:.2f}'.format(x), ) f.write(data_str)
[ "def", "write_smet", "(", "filename", ",", "data", ",", "metadata", ",", "nodata_value", "=", "-", "999", ",", "mode", "=", "'h'", ",", "check_nan", "=", "True", ")", ":", "# dictionary", "# based on smet spec V.1.1 and selfdefined", "# daily data", "dict_d", "=", "{", "'tmean'", ":", "'TA'", ",", "'tmin'", ":", "'TMAX'", ",", "#no spec", "'tmax'", ":", "'TMIN'", ",", "#no spec", "'precip'", ":", "'PSUM'", ",", "'glob'", ":", "'ISWR'", ",", "#no spec", "'hum'", ":", "'RH'", ",", "'wind'", ":", "'VW'", "}", "#hourly data", "dict_h", "=", "{", "'temp'", ":", "'TA'", ",", "'precip'", ":", "'PSUM'", ",", "'glob'", ":", "'ISWR'", ",", "#no spec", "'hum'", ":", "'RH'", ",", "'wind'", ":", "'VW'", "}", "#rename columns", "if", "mode", "==", "\"d\"", ":", "data", "=", "data", ".", "rename", "(", "columns", "=", "dict_d", ")", "if", "mode", "==", "\"h\"", ":", "data", "=", "data", ".", "rename", "(", "columns", "=", "dict_h", ")", "if", "check_nan", ":", "#get all colums with data", "datas_in", "=", "data", ".", "sum", "(", ")", ".", "dropna", "(", ")", ".", "to_frame", "(", ")", ".", "T", "#get colums with no datas", "drop", "=", "[", "data_nan", "for", "data_nan", "in", "data", ".", "columns", "if", "data_nan", "not", "in", "datas_in", "]", "#delete columns", "data", "=", "data", ".", "drop", "(", "drop", ",", "axis", "=", "1", ")", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "#preparing data", "#converte date_times to SMET timestamps", "if", "mode", "==", "\"d\"", ":", "t", "=", "'%Y-%m-%dT00:00'", "if", "mode", "==", "\"h\"", ":", "t", "=", "'%Y-%m-%dT%H:%M'", "data", "[", "'timestamp'", "]", "=", "[", "d", ".", "strftime", "(", "t", ")", "for", "d", "in", "data", ".", "index", "]", "cols", "=", "data", ".", "columns", ".", "tolist", "(", ")", "cols", "=", "cols", "[", "-", "1", ":", "]", "+", "cols", "[", ":", "-", "1", "]", "data", "=", "data", "[", "cols", "]", "#metadatas update", "metadata", "[", "'fields'", "]", "=", "' '", ".", "join", "(", "data", ".", "columns", ")", "metadata", "[", "\"units_multiplier\"", "]", "=", "len", "(", "metadata", "[", "'fields'", "]", ".", "split", "(", ")", ")", "*", "\"1 \"", "#writing data", "#metadata", "f", ".", "write", "(", "'SMET 1.1 ASCII\\n'", ")", "f", ".", "write", "(", "'[HEADER]\\n'", ")", "for", "k", ",", "v", "in", "metadata", ".", "items", "(", ")", ":", "f", ".", "write", "(", "'{} = {}\\n'", ".", "format", "(", "k", ",", "v", ")", ")", "#data", "f", ".", "write", "(", "'[DATA]\\n'", ")", "data_str", "=", "data", ".", "fillna", "(", "nodata_value", ")", ".", "to_string", "(", "header", "=", "False", ",", "index", "=", "False", ",", "float_format", "=", "lambda", "x", ":", "'{:.2f}'", ".", "format", "(", "x", ")", ",", ")", "f", ".", "write", "(", "data_str", ")" ]
writes smet files Parameters ---- filename : filename/loction of output data : data to write as pandas df metadata: header to write input as dict nodata_value: Nodata Value to write/use mode: defines if to write daily ("d") or continuos data (default 'h') check_nan: will check if only nans in data and if true will not write this colums (default True)
[ "writes", "smet", "files" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/data_io.py#L236-L320
kristianfoerster/melodist
melodist/data_io.py
read_single_knmi_file
def read_single_knmi_file(filename): """reads a single file of KNMI's meteorological time series data availability: www.knmi.nl/nederland-nu/klimatologie/uurgegevens Args: filename: the file to be opened Returns: pandas data frame including time series """ hourly_data_obs_raw = pd.read_csv( filename, parse_dates=[['YYYYMMDD', 'HH']], date_parser=lambda yyyymmdd, hh: pd.datetime(int(str(yyyymmdd)[0:4]), int(str(yyyymmdd)[4:6]), int(str(yyyymmdd)[6:8]), int(hh) - 1), skiprows=31, skipinitialspace=True, na_values='', keep_date_col=True, ) hourly_data_obs_raw.index = hourly_data_obs_raw['YYYYMMDD_HH'] hourly_data_obs_raw.index = hourly_data_obs_raw.index + pd.Timedelta(hours=1) columns_hourly = ['temp', 'precip', 'glob', 'hum', 'wind', 'ssd'] hourly_data_obs = pd.DataFrame( index=hourly_data_obs_raw.index, columns=columns_hourly, data=dict( temp=hourly_data_obs_raw['T'] / 10 + 273.15, precip=hourly_data_obs_raw['RH'] / 10, glob=hourly_data_obs_raw['Q'] * 10000 / 3600., hum=hourly_data_obs_raw['U'], wind=hourly_data_obs_raw['FH'] / 10, ssd=hourly_data_obs_raw['SQ'] * 6, ), ) # remove negative values negative_values = hourly_data_obs['precip'] < 0.0 hourly_data_obs.loc[negative_values, 'precip'] = 0.0 return hourly_data_obs
python
def read_single_knmi_file(filename): """reads a single file of KNMI's meteorological time series data availability: www.knmi.nl/nederland-nu/klimatologie/uurgegevens Args: filename: the file to be opened Returns: pandas data frame including time series """ hourly_data_obs_raw = pd.read_csv( filename, parse_dates=[['YYYYMMDD', 'HH']], date_parser=lambda yyyymmdd, hh: pd.datetime(int(str(yyyymmdd)[0:4]), int(str(yyyymmdd)[4:6]), int(str(yyyymmdd)[6:8]), int(hh) - 1), skiprows=31, skipinitialspace=True, na_values='', keep_date_col=True, ) hourly_data_obs_raw.index = hourly_data_obs_raw['YYYYMMDD_HH'] hourly_data_obs_raw.index = hourly_data_obs_raw.index + pd.Timedelta(hours=1) columns_hourly = ['temp', 'precip', 'glob', 'hum', 'wind', 'ssd'] hourly_data_obs = pd.DataFrame( index=hourly_data_obs_raw.index, columns=columns_hourly, data=dict( temp=hourly_data_obs_raw['T'] / 10 + 273.15, precip=hourly_data_obs_raw['RH'] / 10, glob=hourly_data_obs_raw['Q'] * 10000 / 3600., hum=hourly_data_obs_raw['U'], wind=hourly_data_obs_raw['FH'] / 10, ssd=hourly_data_obs_raw['SQ'] * 6, ), ) # remove negative values negative_values = hourly_data_obs['precip'] < 0.0 hourly_data_obs.loc[negative_values, 'precip'] = 0.0 return hourly_data_obs
[ "def", "read_single_knmi_file", "(", "filename", ")", ":", "hourly_data_obs_raw", "=", "pd", ".", "read_csv", "(", "filename", ",", "parse_dates", "=", "[", "[", "'YYYYMMDD'", ",", "'HH'", "]", "]", ",", "date_parser", "=", "lambda", "yyyymmdd", ",", "hh", ":", "pd", ".", "datetime", "(", "int", "(", "str", "(", "yyyymmdd", ")", "[", "0", ":", "4", "]", ")", ",", "int", "(", "str", "(", "yyyymmdd", ")", "[", "4", ":", "6", "]", ")", ",", "int", "(", "str", "(", "yyyymmdd", ")", "[", "6", ":", "8", "]", ")", ",", "int", "(", "hh", ")", "-", "1", ")", ",", "skiprows", "=", "31", ",", "skipinitialspace", "=", "True", ",", "na_values", "=", "''", ",", "keep_date_col", "=", "True", ",", ")", "hourly_data_obs_raw", ".", "index", "=", "hourly_data_obs_raw", "[", "'YYYYMMDD_HH'", "]", "hourly_data_obs_raw", ".", "index", "=", "hourly_data_obs_raw", ".", "index", "+", "pd", ".", "Timedelta", "(", "hours", "=", "1", ")", "columns_hourly", "=", "[", "'temp'", ",", "'precip'", ",", "'glob'", ",", "'hum'", ",", "'wind'", ",", "'ssd'", "]", "hourly_data_obs", "=", "pd", ".", "DataFrame", "(", "index", "=", "hourly_data_obs_raw", ".", "index", ",", "columns", "=", "columns_hourly", ",", "data", "=", "dict", "(", "temp", "=", "hourly_data_obs_raw", "[", "'T'", "]", "/", "10", "+", "273.15", ",", "precip", "=", "hourly_data_obs_raw", "[", "'RH'", "]", "/", "10", ",", "glob", "=", "hourly_data_obs_raw", "[", "'Q'", "]", "*", "10000", "/", "3600.", ",", "hum", "=", "hourly_data_obs_raw", "[", "'U'", "]", ",", "wind", "=", "hourly_data_obs_raw", "[", "'FH'", "]", "/", "10", ",", "ssd", "=", "hourly_data_obs_raw", "[", "'SQ'", "]", "*", "6", ",", ")", ",", ")", "# remove negative values", "negative_values", "=", "hourly_data_obs", "[", "'precip'", "]", "<", "0.0", "hourly_data_obs", ".", "loc", "[", "negative_values", ",", "'precip'", "]", "=", "0.0", "return", "hourly_data_obs" ]
reads a single file of KNMI's meteorological time series data availability: www.knmi.nl/nederland-nu/klimatologie/uurgegevens Args: filename: the file to be opened Returns: pandas data frame including time series
[ "reads", "a", "single", "file", "of", "KNMI", "s", "meteorological", "time", "series" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/data_io.py#L323-L367
kristianfoerster/melodist
melodist/data_io.py
read_knmi_dataset
def read_knmi_dataset(directory): """Reads files from a directory and merges the time series Please note: For each station, a separate directory must be provided! data availability: www.knmi.nl/nederland-nu/klimatologie/uurgegevens Args: directory: directory including the files Returns: pandas data frame including time series """ filemask = '%s*.txt' % directory filelist = glob.glob(filemask) columns_hourly = ['temp', 'precip', 'glob', 'hum', 'wind', 'ssd'] ts = pd.DataFrame(columns=columns_hourly) first_call = True for file_i in filelist: print(file_i) current = read_single_knmi_file(file_i) if(first_call): ts = current first_call = False else: ts = pd.concat([ts, current]) return ts
python
def read_knmi_dataset(directory): """Reads files from a directory and merges the time series Please note: For each station, a separate directory must be provided! data availability: www.knmi.nl/nederland-nu/klimatologie/uurgegevens Args: directory: directory including the files Returns: pandas data frame including time series """ filemask = '%s*.txt' % directory filelist = glob.glob(filemask) columns_hourly = ['temp', 'precip', 'glob', 'hum', 'wind', 'ssd'] ts = pd.DataFrame(columns=columns_hourly) first_call = True for file_i in filelist: print(file_i) current = read_single_knmi_file(file_i) if(first_call): ts = current first_call = False else: ts = pd.concat([ts, current]) return ts
[ "def", "read_knmi_dataset", "(", "directory", ")", ":", "filemask", "=", "'%s*.txt'", "%", "directory", "filelist", "=", "glob", ".", "glob", "(", "filemask", ")", "columns_hourly", "=", "[", "'temp'", ",", "'precip'", ",", "'glob'", ",", "'hum'", ",", "'wind'", ",", "'ssd'", "]", "ts", "=", "pd", ".", "DataFrame", "(", "columns", "=", "columns_hourly", ")", "first_call", "=", "True", "for", "file_i", "in", "filelist", ":", "print", "(", "file_i", ")", "current", "=", "read_single_knmi_file", "(", "file_i", ")", "if", "(", "first_call", ")", ":", "ts", "=", "current", "first_call", "=", "False", "else", ":", "ts", "=", "pd", ".", "concat", "(", "[", "ts", ",", "current", "]", ")", "return", "ts" ]
Reads files from a directory and merges the time series Please note: For each station, a separate directory must be provided! data availability: www.knmi.nl/nederland-nu/klimatologie/uurgegevens Args: directory: directory including the files Returns: pandas data frame including time series
[ "Reads", "files", "from", "a", "directory", "and", "merges", "the", "time", "series" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/data_io.py#L370-L397
kristianfoerster/melodist
melodist/station.py
Station.calc_sun_times
def calc_sun_times(self): """ Computes the times of sunrise, solar noon, and sunset for each day. """ self.sun_times = melodist.util.get_sun_times(self.data_daily.index, self.lon, self.lat, self.timezone)
python
def calc_sun_times(self): """ Computes the times of sunrise, solar noon, and sunset for each day. """ self.sun_times = melodist.util.get_sun_times(self.data_daily.index, self.lon, self.lat, self.timezone)
[ "def", "calc_sun_times", "(", "self", ")", ":", "self", ".", "sun_times", "=", "melodist", ".", "util", ".", "get_sun_times", "(", "self", ".", "data_daily", ".", "index", ",", "self", ".", "lon", ",", "self", ".", "lat", ",", "self", ".", "timezone", ")" ]
Computes the times of sunrise, solar noon, and sunset for each day.
[ "Computes", "the", "times", "of", "sunrise", "solar", "noon", "and", "sunset", "for", "each", "day", "." ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/station.py#L174-L179
kristianfoerster/melodist
melodist/station.py
Station.disaggregate_wind
def disaggregate_wind(self, method='equal'): """ Disaggregate wind speed. Parameters ---------- method : str, optional Disaggregation method. ``equal`` Mean daily wind speed is duplicated for the 24 hours of the day. (Default) ``cosine`` Distributes daily mean wind speed using a cosine function derived from hourly observations. ``random`` Draws random numbers to distribute wind speed (usually not conserving the daily average). """ self.data_disagg.wind = melodist.disaggregate_wind(self.data_daily.wind, method=method, **self.statistics.wind)
python
def disaggregate_wind(self, method='equal'): """ Disaggregate wind speed. Parameters ---------- method : str, optional Disaggregation method. ``equal`` Mean daily wind speed is duplicated for the 24 hours of the day. (Default) ``cosine`` Distributes daily mean wind speed using a cosine function derived from hourly observations. ``random`` Draws random numbers to distribute wind speed (usually not conserving the daily average). """ self.data_disagg.wind = melodist.disaggregate_wind(self.data_daily.wind, method=method, **self.statistics.wind)
[ "def", "disaggregate_wind", "(", "self", ",", "method", "=", "'equal'", ")", ":", "self", ".", "data_disagg", ".", "wind", "=", "melodist", ".", "disaggregate_wind", "(", "self", ".", "data_daily", ".", "wind", ",", "method", "=", "method", ",", "*", "*", "self", ".", "statistics", ".", "wind", ")" ]
Disaggregate wind speed. Parameters ---------- method : str, optional Disaggregation method. ``equal`` Mean daily wind speed is duplicated for the 24 hours of the day. (Default) ``cosine`` Distributes daily mean wind speed using a cosine function derived from hourly observations. ``random`` Draws random numbers to distribute wind speed (usually not conserving the daily average).
[ "Disaggregate", "wind", "speed", "." ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/station.py#L181-L201
kristianfoerster/melodist
melodist/station.py
Station.disaggregate_humidity
def disaggregate_humidity(self, method='equal', preserve_daily_mean=False): """ Disaggregate relative humidity. Parameters ---------- method : str, optional Disaggregation method. ``equal`` Mean daily humidity is duplicated for the 24 hours of the day. (Default) ``minimal``: Calculates humidity from daily dew point temperature by setting the dew point temperature equal to the daily minimum temperature. ``dewpoint_regression``: Calculates humidity from daily dew point temperature by calculating dew point temperature using ``Tdew = a * Tmin + b``, where ``a`` and ``b`` are determined by calibration. ``linear_dewpoint_variation``: Calculates humidity from hourly dew point temperature by assuming a linear dew point temperature variation between consecutive days. ``min_max``: Calculates hourly humidity from observations of daily minimum and maximum humidity. ``month_hour_precip_mean``: Calculates hourly humidity from categorical [month, hour, precip(y/n)] mean values derived from observations. preserve_daily_mean : bool, optional If True, correct the daily mean values of the disaggregated data with the observed daily means. """ self.data_disagg.hum = melodist.disaggregate_humidity( self.data_daily, temp=self.data_disagg.temp, method=method, preserve_daily_mean=preserve_daily_mean, **self.statistics.hum )
python
def disaggregate_humidity(self, method='equal', preserve_daily_mean=False): """ Disaggregate relative humidity. Parameters ---------- method : str, optional Disaggregation method. ``equal`` Mean daily humidity is duplicated for the 24 hours of the day. (Default) ``minimal``: Calculates humidity from daily dew point temperature by setting the dew point temperature equal to the daily minimum temperature. ``dewpoint_regression``: Calculates humidity from daily dew point temperature by calculating dew point temperature using ``Tdew = a * Tmin + b``, where ``a`` and ``b`` are determined by calibration. ``linear_dewpoint_variation``: Calculates humidity from hourly dew point temperature by assuming a linear dew point temperature variation between consecutive days. ``min_max``: Calculates hourly humidity from observations of daily minimum and maximum humidity. ``month_hour_precip_mean``: Calculates hourly humidity from categorical [month, hour, precip(y/n)] mean values derived from observations. preserve_daily_mean : bool, optional If True, correct the daily mean values of the disaggregated data with the observed daily means. """ self.data_disagg.hum = melodist.disaggregate_humidity( self.data_daily, temp=self.data_disagg.temp, method=method, preserve_daily_mean=preserve_daily_mean, **self.statistics.hum )
[ "def", "disaggregate_humidity", "(", "self", ",", "method", "=", "'equal'", ",", "preserve_daily_mean", "=", "False", ")", ":", "self", ".", "data_disagg", ".", "hum", "=", "melodist", ".", "disaggregate_humidity", "(", "self", ".", "data_daily", ",", "temp", "=", "self", ".", "data_disagg", ".", "temp", ",", "method", "=", "method", ",", "preserve_daily_mean", "=", "preserve_daily_mean", ",", "*", "*", "self", ".", "statistics", ".", "hum", ")" ]
Disaggregate relative humidity. Parameters ---------- method : str, optional Disaggregation method. ``equal`` Mean daily humidity is duplicated for the 24 hours of the day. (Default) ``minimal``: Calculates humidity from daily dew point temperature by setting the dew point temperature equal to the daily minimum temperature. ``dewpoint_regression``: Calculates humidity from daily dew point temperature by calculating dew point temperature using ``Tdew = a * Tmin + b``, where ``a`` and ``b`` are determined by calibration. ``linear_dewpoint_variation``: Calculates humidity from hourly dew point temperature by assuming a linear dew point temperature variation between consecutive days. ``min_max``: Calculates hourly humidity from observations of daily minimum and maximum humidity. ``month_hour_precip_mean``: Calculates hourly humidity from categorical [month, hour, precip(y/n)] mean values derived from observations. preserve_daily_mean : bool, optional If True, correct the daily mean values of the disaggregated data with the observed daily means.
[ "Disaggregate", "relative", "humidity", "." ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/station.py#L203-L243
kristianfoerster/melodist
melodist/station.py
Station.disaggregate_temperature
def disaggregate_temperature(self, method='sine_min_max', min_max_time='fix', mod_nighttime=False): """ Disaggregate air temperature. Parameters ---------- method : str, optional Disaggregation method. ``sine_min_max`` Hourly temperatures follow a sine function preserving daily minimum and maximum values. (Default) ``sine_mean`` Hourly temperatures follow a sine function preserving the daily mean value and the diurnal temperature range. ``sine`` Same as ``sine_min_max``. ``mean_course_min_max`` Hourly temperatures follow an observed average course (calculated for each month), preserving daily minimum and maximum values. ``mean_course_mean`` Hourly temperatures follow an observed average course (calculated for each month), preserving the daily mean value and the diurnal temperature range. min_max_time : str, optional Method to determine the time of minimum and maximum temperature. ``fix``: Minimum/maximum temperature are assumed to occur at 07:00/14:00 local time. ``sun_loc``: Minimum/maximum temperature are assumed to occur at sunrise / solar noon + 2 h. ``sun_loc_shift``: Minimum/maximum temperature are assumed to occur at sunrise / solar noon + monthly mean shift. mod_nighttime : bool, optional Use linear interpolation between minimum and maximum temperature. """ self.data_disagg.temp = melodist.disaggregate_temperature( self.data_daily, method=method, min_max_time=min_max_time, max_delta=self.statistics.temp.max_delta, mean_course=self.statistics.temp.mean_course, sun_times=self.sun_times, mod_nighttime=mod_nighttime )
python
def disaggregate_temperature(self, method='sine_min_max', min_max_time='fix', mod_nighttime=False): """ Disaggregate air temperature. Parameters ---------- method : str, optional Disaggregation method. ``sine_min_max`` Hourly temperatures follow a sine function preserving daily minimum and maximum values. (Default) ``sine_mean`` Hourly temperatures follow a sine function preserving the daily mean value and the diurnal temperature range. ``sine`` Same as ``sine_min_max``. ``mean_course_min_max`` Hourly temperatures follow an observed average course (calculated for each month), preserving daily minimum and maximum values. ``mean_course_mean`` Hourly temperatures follow an observed average course (calculated for each month), preserving the daily mean value and the diurnal temperature range. min_max_time : str, optional Method to determine the time of minimum and maximum temperature. ``fix``: Minimum/maximum temperature are assumed to occur at 07:00/14:00 local time. ``sun_loc``: Minimum/maximum temperature are assumed to occur at sunrise / solar noon + 2 h. ``sun_loc_shift``: Minimum/maximum temperature are assumed to occur at sunrise / solar noon + monthly mean shift. mod_nighttime : bool, optional Use linear interpolation between minimum and maximum temperature. """ self.data_disagg.temp = melodist.disaggregate_temperature( self.data_daily, method=method, min_max_time=min_max_time, max_delta=self.statistics.temp.max_delta, mean_course=self.statistics.temp.mean_course, sun_times=self.sun_times, mod_nighttime=mod_nighttime )
[ "def", "disaggregate_temperature", "(", "self", ",", "method", "=", "'sine_min_max'", ",", "min_max_time", "=", "'fix'", ",", "mod_nighttime", "=", "False", ")", ":", "self", ".", "data_disagg", ".", "temp", "=", "melodist", ".", "disaggregate_temperature", "(", "self", ".", "data_daily", ",", "method", "=", "method", ",", "min_max_time", "=", "min_max_time", ",", "max_delta", "=", "self", ".", "statistics", ".", "temp", ".", "max_delta", ",", "mean_course", "=", "self", ".", "statistics", ".", "temp", ".", "mean_course", ",", "sun_times", "=", "self", ".", "sun_times", ",", "mod_nighttime", "=", "mod_nighttime", ")" ]
Disaggregate air temperature. Parameters ---------- method : str, optional Disaggregation method. ``sine_min_max`` Hourly temperatures follow a sine function preserving daily minimum and maximum values. (Default) ``sine_mean`` Hourly temperatures follow a sine function preserving the daily mean value and the diurnal temperature range. ``sine`` Same as ``sine_min_max``. ``mean_course_min_max`` Hourly temperatures follow an observed average course (calculated for each month), preserving daily minimum and maximum values. ``mean_course_mean`` Hourly temperatures follow an observed average course (calculated for each month), preserving the daily mean value and the diurnal temperature range. min_max_time : str, optional Method to determine the time of minimum and maximum temperature. ``fix``: Minimum/maximum temperature are assumed to occur at 07:00/14:00 local time. ``sun_loc``: Minimum/maximum temperature are assumed to occur at sunrise / solar noon + 2 h. ``sun_loc_shift``: Minimum/maximum temperature are assumed to occur at sunrise / solar noon + monthly mean shift. mod_nighttime : bool, optional Use linear interpolation between minimum and maximum temperature.
[ "Disaggregate", "air", "temperature", "." ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/station.py#L245-L296
kristianfoerster/melodist
melodist/station.py
Station.disaggregate_precipitation
def disaggregate_precipitation(self, method='equal', zerodiv='uniform', shift=0, master_precip=None): """ Disaggregate precipitation. Parameters ---------- method : str, optional Disaggregation method. ``equal`` Daily precipitation is distributed equally over the 24 hours of the day. (Default) ``cascade`` Hourly precipitation values are obtained using a cascade model set up using hourly observations. zerodiv : str, optional Method to deal with zero division, relevant for ``method='masterstation'``. ``uniform`` Use uniform distribution. (Default) master_precip : Series, optional Hourly precipitation records from a representative station (required for ``method='masterstation'``). """ if method == 'equal': precip_disagg = melodist.disagg_prec(self.data_daily, method=method, shift=shift) elif method == 'cascade': precip_disagg = pd.Series(index=self.data_disagg.index) for months, stats in zip(self.statistics.precip.months, self.statistics.precip.stats): precip_daily = melodist.seasonal_subset(self.data_daily.precip, months=months) if len(precip_daily) > 1: data = melodist.disagg_prec(precip_daily, method=method, cascade_options=stats, shift=shift, zerodiv=zerodiv) precip_disagg.loc[data.index] = data elif method == 'masterstation': precip_disagg = melodist.precip_master_station(self.data_daily.precip, master_precip, zerodiv) self.data_disagg.precip = precip_disagg
python
def disaggregate_precipitation(self, method='equal', zerodiv='uniform', shift=0, master_precip=None): """ Disaggregate precipitation. Parameters ---------- method : str, optional Disaggregation method. ``equal`` Daily precipitation is distributed equally over the 24 hours of the day. (Default) ``cascade`` Hourly precipitation values are obtained using a cascade model set up using hourly observations. zerodiv : str, optional Method to deal with zero division, relevant for ``method='masterstation'``. ``uniform`` Use uniform distribution. (Default) master_precip : Series, optional Hourly precipitation records from a representative station (required for ``method='masterstation'``). """ if method == 'equal': precip_disagg = melodist.disagg_prec(self.data_daily, method=method, shift=shift) elif method == 'cascade': precip_disagg = pd.Series(index=self.data_disagg.index) for months, stats in zip(self.statistics.precip.months, self.statistics.precip.stats): precip_daily = melodist.seasonal_subset(self.data_daily.precip, months=months) if len(precip_daily) > 1: data = melodist.disagg_prec(precip_daily, method=method, cascade_options=stats, shift=shift, zerodiv=zerodiv) precip_disagg.loc[data.index] = data elif method == 'masterstation': precip_disagg = melodist.precip_master_station(self.data_daily.precip, master_precip, zerodiv) self.data_disagg.precip = precip_disagg
[ "def", "disaggregate_precipitation", "(", "self", ",", "method", "=", "'equal'", ",", "zerodiv", "=", "'uniform'", ",", "shift", "=", "0", ",", "master_precip", "=", "None", ")", ":", "if", "method", "==", "'equal'", ":", "precip_disagg", "=", "melodist", ".", "disagg_prec", "(", "self", ".", "data_daily", ",", "method", "=", "method", ",", "shift", "=", "shift", ")", "elif", "method", "==", "'cascade'", ":", "precip_disagg", "=", "pd", ".", "Series", "(", "index", "=", "self", ".", "data_disagg", ".", "index", ")", "for", "months", ",", "stats", "in", "zip", "(", "self", ".", "statistics", ".", "precip", ".", "months", ",", "self", ".", "statistics", ".", "precip", ".", "stats", ")", ":", "precip_daily", "=", "melodist", ".", "seasonal_subset", "(", "self", ".", "data_daily", ".", "precip", ",", "months", "=", "months", ")", "if", "len", "(", "precip_daily", ")", ">", "1", ":", "data", "=", "melodist", ".", "disagg_prec", "(", "precip_daily", ",", "method", "=", "method", ",", "cascade_options", "=", "stats", ",", "shift", "=", "shift", ",", "zerodiv", "=", "zerodiv", ")", "precip_disagg", ".", "loc", "[", "data", ".", "index", "]", "=", "data", "elif", "method", "==", "'masterstation'", ":", "precip_disagg", "=", "melodist", ".", "precip_master_station", "(", "self", ".", "data_daily", ".", "precip", ",", "master_precip", ",", "zerodiv", ")", "self", ".", "data_disagg", ".", "precip", "=", "precip_disagg" ]
Disaggregate precipitation. Parameters ---------- method : str, optional Disaggregation method. ``equal`` Daily precipitation is distributed equally over the 24 hours of the day. (Default) ``cascade`` Hourly precipitation values are obtained using a cascade model set up using hourly observations. zerodiv : str, optional Method to deal with zero division, relevant for ``method='masterstation'``. ``uniform`` Use uniform distribution. (Default) master_precip : Series, optional Hourly precipitation records from a representative station (required for ``method='masterstation'``).
[ "Disaggregate", "precipitation", "." ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/station.py#L298-L338
kristianfoerster/melodist
melodist/station.py
Station.disaggregate_radiation
def disaggregate_radiation(self, method='pot_rad', pot_rad=None): """ Disaggregate solar radiation. Parameters ---------- method : str, optional Disaggregation method. ``pot_rad`` Calculates potential clear-sky hourly radiation and scales it according to the mean daily radiation. (Default) ``pot_rad_via_ssd`` Calculates potential clear-sky hourly radiation and scales it according to the observed daily sunshine duration. ``pot_rad_via_bc`` Calculates potential clear-sky hourly radiation and scales it according to daily minimum and maximum temperature. ``mean_course`` Hourly radiation follows an observed average course (calculated for each month). pot_rad : Series, optional Hourly values of potential solar radiation. If ``None``, calculated internally. """ if self.sun_times is None: self.calc_sun_times() if pot_rad is None and method != 'mean_course': pot_rad = melodist.potential_radiation(self.data_disagg.index, self.lon, self.lat, self.timezone) self.data_disagg.glob = melodist.disaggregate_radiation( self.data_daily, sun_times=self.sun_times, pot_rad=pot_rad, method=method, angstr_a=self.statistics.glob.angstroem.a, angstr_b=self.statistics.glob.angstroem.b, bristcamp_a=self.statistics.glob.bristcamp.a, bristcamp_c=self.statistics.glob.bristcamp.c, mean_course=self.statistics.glob.mean_course )
python
def disaggregate_radiation(self, method='pot_rad', pot_rad=None): """ Disaggregate solar radiation. Parameters ---------- method : str, optional Disaggregation method. ``pot_rad`` Calculates potential clear-sky hourly radiation and scales it according to the mean daily radiation. (Default) ``pot_rad_via_ssd`` Calculates potential clear-sky hourly radiation and scales it according to the observed daily sunshine duration. ``pot_rad_via_bc`` Calculates potential clear-sky hourly radiation and scales it according to daily minimum and maximum temperature. ``mean_course`` Hourly radiation follows an observed average course (calculated for each month). pot_rad : Series, optional Hourly values of potential solar radiation. If ``None``, calculated internally. """ if self.sun_times is None: self.calc_sun_times() if pot_rad is None and method != 'mean_course': pot_rad = melodist.potential_radiation(self.data_disagg.index, self.lon, self.lat, self.timezone) self.data_disagg.glob = melodist.disaggregate_radiation( self.data_daily, sun_times=self.sun_times, pot_rad=pot_rad, method=method, angstr_a=self.statistics.glob.angstroem.a, angstr_b=self.statistics.glob.angstroem.b, bristcamp_a=self.statistics.glob.bristcamp.a, bristcamp_c=self.statistics.glob.bristcamp.c, mean_course=self.statistics.glob.mean_course )
[ "def", "disaggregate_radiation", "(", "self", ",", "method", "=", "'pot_rad'", ",", "pot_rad", "=", "None", ")", ":", "if", "self", ".", "sun_times", "is", "None", ":", "self", ".", "calc_sun_times", "(", ")", "if", "pot_rad", "is", "None", "and", "method", "!=", "'mean_course'", ":", "pot_rad", "=", "melodist", ".", "potential_radiation", "(", "self", ".", "data_disagg", ".", "index", ",", "self", ".", "lon", ",", "self", ".", "lat", ",", "self", ".", "timezone", ")", "self", ".", "data_disagg", ".", "glob", "=", "melodist", ".", "disaggregate_radiation", "(", "self", ".", "data_daily", ",", "sun_times", "=", "self", ".", "sun_times", ",", "pot_rad", "=", "pot_rad", ",", "method", "=", "method", ",", "angstr_a", "=", "self", ".", "statistics", ".", "glob", ".", "angstroem", ".", "a", ",", "angstr_b", "=", "self", ".", "statistics", ".", "glob", ".", "angstroem", ".", "b", ",", "bristcamp_a", "=", "self", ".", "statistics", ".", "glob", ".", "bristcamp", ".", "a", ",", "bristcamp_c", "=", "self", ".", "statistics", ".", "glob", ".", "bristcamp", ".", "c", ",", "mean_course", "=", "self", ".", "statistics", ".", "glob", ".", "mean_course", ")" ]
Disaggregate solar radiation. Parameters ---------- method : str, optional Disaggregation method. ``pot_rad`` Calculates potential clear-sky hourly radiation and scales it according to the mean daily radiation. (Default) ``pot_rad_via_ssd`` Calculates potential clear-sky hourly radiation and scales it according to the observed daily sunshine duration. ``pot_rad_via_bc`` Calculates potential clear-sky hourly radiation and scales it according to daily minimum and maximum temperature. ``mean_course`` Hourly radiation follows an observed average course (calculated for each month). pot_rad : Series, optional Hourly values of potential solar radiation. If ``None``, calculated internally.
[ "Disaggregate", "solar", "radiation", "." ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/station.py#L340-L383
kristianfoerster/melodist
melodist/station.py
Station.interpolate
def interpolate(self, column_hours, method='linear', limit=24, limit_direction='both', **kwargs): """ Wrapper function for ``pandas.Series.interpolate`` that can be used to "disaggregate" values using various interpolation methods. Parameters ---------- column_hours : dict Dictionary containing column names in ``data_daily`` and the hour values they should be associated to. method, limit, limit_direction, **kwargs These parameters are passed on to ``pandas.Series.interpolate``. Examples -------- Assume that ``mystation.data_daily.T7``, ``mystation.data_daily.T14``, and ``mystation.data_daily.T19`` contain air temperature measurements taken at 07:00, 14:00, and 19:00. We can use the interpolation functions provided by pandas/scipy to derive hourly values: >>> mystation.data_hourly.temp = mystation.interpolate({'T7': 7, 'T14': 14, 'T19': 19}) # linear interpolation (default) >>> mystation.data_hourly.temp = mystation.interpolate({'T7': 7, 'T14': 14, 'T19': 19}, method='cubic') # cubic spline """ kwargs = dict(kwargs, method=method, limit=limit, limit_direction=limit_direction) data = melodist.util.prepare_interpolation_data(self.data_daily, column_hours) return data.interpolate(**kwargs)
python
def interpolate(self, column_hours, method='linear', limit=24, limit_direction='both', **kwargs): """ Wrapper function for ``pandas.Series.interpolate`` that can be used to "disaggregate" values using various interpolation methods. Parameters ---------- column_hours : dict Dictionary containing column names in ``data_daily`` and the hour values they should be associated to. method, limit, limit_direction, **kwargs These parameters are passed on to ``pandas.Series.interpolate``. Examples -------- Assume that ``mystation.data_daily.T7``, ``mystation.data_daily.T14``, and ``mystation.data_daily.T19`` contain air temperature measurements taken at 07:00, 14:00, and 19:00. We can use the interpolation functions provided by pandas/scipy to derive hourly values: >>> mystation.data_hourly.temp = mystation.interpolate({'T7': 7, 'T14': 14, 'T19': 19}) # linear interpolation (default) >>> mystation.data_hourly.temp = mystation.interpolate({'T7': 7, 'T14': 14, 'T19': 19}, method='cubic') # cubic spline """ kwargs = dict(kwargs, method=method, limit=limit, limit_direction=limit_direction) data = melodist.util.prepare_interpolation_data(self.data_daily, column_hours) return data.interpolate(**kwargs)
[ "def", "interpolate", "(", "self", ",", "column_hours", ",", "method", "=", "'linear'", ",", "limit", "=", "24", ",", "limit_direction", "=", "'both'", ",", "*", "*", "kwargs", ")", ":", "kwargs", "=", "dict", "(", "kwargs", ",", "method", "=", "method", ",", "limit", "=", "limit", ",", "limit_direction", "=", "limit_direction", ")", "data", "=", "melodist", ".", "util", ".", "prepare_interpolation_data", "(", "self", ".", "data_daily", ",", "column_hours", ")", "return", "data", ".", "interpolate", "(", "*", "*", "kwargs", ")" ]
Wrapper function for ``pandas.Series.interpolate`` that can be used to "disaggregate" values using various interpolation methods. Parameters ---------- column_hours : dict Dictionary containing column names in ``data_daily`` and the hour values they should be associated to. method, limit, limit_direction, **kwargs These parameters are passed on to ``pandas.Series.interpolate``. Examples -------- Assume that ``mystation.data_daily.T7``, ``mystation.data_daily.T14``, and ``mystation.data_daily.T19`` contain air temperature measurements taken at 07:00, 14:00, and 19:00. We can use the interpolation functions provided by pandas/scipy to derive hourly values: >>> mystation.data_hourly.temp = mystation.interpolate({'T7': 7, 'T14': 14, 'T19': 19}) # linear interpolation (default) >>> mystation.data_hourly.temp = mystation.interpolate({'T7': 7, 'T14': 14, 'T19': 19}, method='cubic') # cubic spline
[ "Wrapper", "function", "for", "pandas", ".", "Series", ".", "interpolate", "that", "can", "be", "used", "to", "disaggregate", "values", "using", "various", "interpolation", "methods", "." ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/station.py#L385-L412
boydgreenfield/query
query/core.py
QueryDbOrm._query_helper
def _query_helper(self, by=None): """ Internal helper for preparing queries. """ if by is None: primary_keys = self.table.primary_key.columns.keys() if len(primary_keys) > 1: warnings.warn("WARNING: MORE THAN 1 PRIMARY KEY FOR TABLE %s. " "USING THE FIRST KEY %s." % (self.table.name, primary_keys[0])) if not primary_keys: raise NoPrimaryKeyException("Table %s needs a primary key for" "the .last() method to work properly. " "Alternatively, specify an ORDER BY " "column with the by= argument. " % self.table.name) id_col = primary_keys[0] else: id_col = by if self.column is None: col = "*" else: col = self.column.name return col, id_col
python
def _query_helper(self, by=None): """ Internal helper for preparing queries. """ if by is None: primary_keys = self.table.primary_key.columns.keys() if len(primary_keys) > 1: warnings.warn("WARNING: MORE THAN 1 PRIMARY KEY FOR TABLE %s. " "USING THE FIRST KEY %s." % (self.table.name, primary_keys[0])) if not primary_keys: raise NoPrimaryKeyException("Table %s needs a primary key for" "the .last() method to work properly. " "Alternatively, specify an ORDER BY " "column with the by= argument. " % self.table.name) id_col = primary_keys[0] else: id_col = by if self.column is None: col = "*" else: col = self.column.name return col, id_col
[ "def", "_query_helper", "(", "self", ",", "by", "=", "None", ")", ":", "if", "by", "is", "None", ":", "primary_keys", "=", "self", ".", "table", ".", "primary_key", ".", "columns", ".", "keys", "(", ")", "if", "len", "(", "primary_keys", ")", ">", "1", ":", "warnings", ".", "warn", "(", "\"WARNING: MORE THAN 1 PRIMARY KEY FOR TABLE %s. \"", "\"USING THE FIRST KEY %s.\"", "%", "(", "self", ".", "table", ".", "name", ",", "primary_keys", "[", "0", "]", ")", ")", "if", "not", "primary_keys", ":", "raise", "NoPrimaryKeyException", "(", "\"Table %s needs a primary key for\"", "\"the .last() method to work properly. \"", "\"Alternatively, specify an ORDER BY \"", "\"column with the by= argument. \"", "%", "self", ".", "table", ".", "name", ")", "id_col", "=", "primary_keys", "[", "0", "]", "else", ":", "id_col", "=", "by", "if", "self", ".", "column", "is", "None", ":", "col", "=", "\"*\"", "else", ":", "col", "=", "self", ".", "column", ".", "name", "return", "col", ",", "id_col" ]
Internal helper for preparing queries.
[ "Internal", "helper", "for", "preparing", "queries", "." ]
train
https://github.com/boydgreenfield/query/blob/03aa43b746b43832af3f0403265e648a5617b62b/query/core.py#L72-L99
boydgreenfield/query
query/core.py
QueryDbOrm.head
def head(self, n=10, by=None, **kwargs): """ Get the first n entries for a given Table/Column. Additional keywords passed to QueryDb.query(). Requires that the given table has a primary key specified. """ col, id_col = self._query_helper(by=by) select = ("SELECT %s FROM %s ORDER BY %s ASC LIMIT %d" % (col, self.table.name, id_col, n)) return self._db.query(select, **kwargs)
python
def head(self, n=10, by=None, **kwargs): """ Get the first n entries for a given Table/Column. Additional keywords passed to QueryDb.query(). Requires that the given table has a primary key specified. """ col, id_col = self._query_helper(by=by) select = ("SELECT %s FROM %s ORDER BY %s ASC LIMIT %d" % (col, self.table.name, id_col, n)) return self._db.query(select, **kwargs)
[ "def", "head", "(", "self", ",", "n", "=", "10", ",", "by", "=", "None", ",", "*", "*", "kwargs", ")", ":", "col", ",", "id_col", "=", "self", ".", "_query_helper", "(", "by", "=", "by", ")", "select", "=", "(", "\"SELECT %s FROM %s ORDER BY %s ASC LIMIT %d\"", "%", "(", "col", ",", "self", ".", "table", ".", "name", ",", "id_col", ",", "n", ")", ")", "return", "self", ".", "_db", ".", "query", "(", "select", ",", "*", "*", "kwargs", ")" ]
Get the first n entries for a given Table/Column. Additional keywords passed to QueryDb.query(). Requires that the given table has a primary key specified.
[ "Get", "the", "first", "n", "entries", "for", "a", "given", "Table", "/", "Column", ".", "Additional", "keywords", "passed", "to", "QueryDb", ".", "query", "()", "." ]
train
https://github.com/boydgreenfield/query/blob/03aa43b746b43832af3f0403265e648a5617b62b/query/core.py#L101-L113
boydgreenfield/query
query/core.py
QueryDbOrm.last
def last(self, n=10, by=None, **kwargs): """ Alias for .tail(). """ return self.tail(n=n, by=by, **kwargs)
python
def last(self, n=10, by=None, **kwargs): """ Alias for .tail(). """ return self.tail(n=n, by=by, **kwargs)
[ "def", "last", "(", "self", ",", "n", "=", "10", ",", "by", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "tail", "(", "n", "=", "n", ",", "by", "=", "by", ",", "*", "*", "kwargs", ")" ]
Alias for .tail().
[ "Alias", "for", ".", "tail", "()", "." ]
train
https://github.com/boydgreenfield/query/blob/03aa43b746b43832af3f0403265e648a5617b62b/query/core.py#L134-L138
boydgreenfield/query
query/core.py
QueryDbOrm.where
def where(self, where_string, **kwargs): """ Select from a given Table or Column with the specified WHERE clause string. Additional keywords are passed to ExploreSqlDB.query(). For convenience, if there is no '=', '>', '<', 'like', or 'LIKE' clause in the WHERE statement .where() tries to match the input string against the primary key column of the Table. Args: where_string (str): Where clause for the query against the Table or Column Kwars: **kwargs: Optional **kwargs passed to the QueryDb.query() call Returns: result (pandas.DataFrame or sqlalchemy ResultProxy): Query result as a DataFrame (default) or sqlalchemy result. """ col, id_col = self._query_helper(by=None) where_string = str(where_string) # Coerce here, for .__contains___ where_operators = ["=", ">", "<", "LIKE", "like"] if np.any([where_string.__contains__(w) for w in where_operators]): select = ("SELECT %s FROM %s WHERE %s" % (col, self.table.name, where_string)) else: select = ("SELECT %s FROM %s WHERE %s = %s" % (col, self.table.name, id_col, where_string)) return self._db.query(select, **kwargs)
python
def where(self, where_string, **kwargs): """ Select from a given Table or Column with the specified WHERE clause string. Additional keywords are passed to ExploreSqlDB.query(). For convenience, if there is no '=', '>', '<', 'like', or 'LIKE' clause in the WHERE statement .where() tries to match the input string against the primary key column of the Table. Args: where_string (str): Where clause for the query against the Table or Column Kwars: **kwargs: Optional **kwargs passed to the QueryDb.query() call Returns: result (pandas.DataFrame or sqlalchemy ResultProxy): Query result as a DataFrame (default) or sqlalchemy result. """ col, id_col = self._query_helper(by=None) where_string = str(where_string) # Coerce here, for .__contains___ where_operators = ["=", ">", "<", "LIKE", "like"] if np.any([where_string.__contains__(w) for w in where_operators]): select = ("SELECT %s FROM %s WHERE %s" % (col, self.table.name, where_string)) else: select = ("SELECT %s FROM %s WHERE %s = %s" % (col, self.table.name, id_col, where_string)) return self._db.query(select, **kwargs)
[ "def", "where", "(", "self", ",", "where_string", ",", "*", "*", "kwargs", ")", ":", "col", ",", "id_col", "=", "self", ".", "_query_helper", "(", "by", "=", "None", ")", "where_string", "=", "str", "(", "where_string", ")", "# Coerce here, for .__contains___", "where_operators", "=", "[", "\"=\"", ",", "\">\"", ",", "\"<\"", ",", "\"LIKE\"", ",", "\"like\"", "]", "if", "np", ".", "any", "(", "[", "where_string", ".", "__contains__", "(", "w", ")", "for", "w", "in", "where_operators", "]", ")", ":", "select", "=", "(", "\"SELECT %s FROM %s WHERE %s\"", "%", "(", "col", ",", "self", ".", "table", ".", "name", ",", "where_string", ")", ")", "else", ":", "select", "=", "(", "\"SELECT %s FROM %s WHERE %s = %s\"", "%", "(", "col", ",", "self", ".", "table", ".", "name", ",", "id_col", ",", "where_string", ")", ")", "return", "self", ".", "_db", ".", "query", "(", "select", ",", "*", "*", "kwargs", ")" ]
Select from a given Table or Column with the specified WHERE clause string. Additional keywords are passed to ExploreSqlDB.query(). For convenience, if there is no '=', '>', '<', 'like', or 'LIKE' clause in the WHERE statement .where() tries to match the input string against the primary key column of the Table. Args: where_string (str): Where clause for the query against the Table or Column Kwars: **kwargs: Optional **kwargs passed to the QueryDb.query() call Returns: result (pandas.DataFrame or sqlalchemy ResultProxy): Query result as a DataFrame (default) or sqlalchemy result.
[ "Select", "from", "a", "given", "Table", "or", "Column", "with", "the", "specified", "WHERE", "clause", "string", ".", "Additional", "keywords", "are", "passed", "to", "ExploreSqlDB", ".", "query", "()", ".", "For", "convenience", "if", "there", "is", "no", "=", ">", "<", "like", "or", "LIKE", "clause", "in", "the", "WHERE", "statement", ".", "where", "()", "tries", "to", "match", "the", "input", "string", "against", "the", "primary", "key", "column", "of", "the", "Table", "." ]
train
https://github.com/boydgreenfield/query/blob/03aa43b746b43832af3f0403265e648a5617b62b/query/core.py#L140-L170
boydgreenfield/query
query/core.py
QueryDb.query
def query(self, sql_query, return_as="dataframe"): """ Execute a raw SQL query against the the SQL DB. Args: sql_query (str): A raw SQL query to execute. Kwargs: return_as (str): Specify what type of object should be returned. The following are acceptable types: - "dataframe": pandas.DataFrame or None if no matching query - "result": sqlalchemy.engine.result.ResultProxy Returns: result (pandas.DataFrame or sqlalchemy ResultProxy): Query result as a DataFrame (default) or sqlalchemy result (specified with return_as="result") Raises: QueryDbError """ if isinstance(sql_query, str): pass elif isinstance(sql_query, unicode): sql_query = str(sql_query) else: raise QueryDbError("query() requires a str or unicode input.") query = sqlalchemy.sql.text(sql_query) if return_as.upper() in ["DF", "DATAFRAME"]: return self._to_df(query, self._engine) elif return_as.upper() in ["RESULT", "RESULTPROXY"]: with self._engine.connect() as conn: result = conn.execute(query) return result else: raise QueryDbError("Other return types not implemented.")
python
def query(self, sql_query, return_as="dataframe"): """ Execute a raw SQL query against the the SQL DB. Args: sql_query (str): A raw SQL query to execute. Kwargs: return_as (str): Specify what type of object should be returned. The following are acceptable types: - "dataframe": pandas.DataFrame or None if no matching query - "result": sqlalchemy.engine.result.ResultProxy Returns: result (pandas.DataFrame or sqlalchemy ResultProxy): Query result as a DataFrame (default) or sqlalchemy result (specified with return_as="result") Raises: QueryDbError """ if isinstance(sql_query, str): pass elif isinstance(sql_query, unicode): sql_query = str(sql_query) else: raise QueryDbError("query() requires a str or unicode input.") query = sqlalchemy.sql.text(sql_query) if return_as.upper() in ["DF", "DATAFRAME"]: return self._to_df(query, self._engine) elif return_as.upper() in ["RESULT", "RESULTPROXY"]: with self._engine.connect() as conn: result = conn.execute(query) return result else: raise QueryDbError("Other return types not implemented.")
[ "def", "query", "(", "self", ",", "sql_query", ",", "return_as", "=", "\"dataframe\"", ")", ":", "if", "isinstance", "(", "sql_query", ",", "str", ")", ":", "pass", "elif", "isinstance", "(", "sql_query", ",", "unicode", ")", ":", "sql_query", "=", "str", "(", "sql_query", ")", "else", ":", "raise", "QueryDbError", "(", "\"query() requires a str or unicode input.\"", ")", "query", "=", "sqlalchemy", ".", "sql", ".", "text", "(", "sql_query", ")", "if", "return_as", ".", "upper", "(", ")", "in", "[", "\"DF\"", ",", "\"DATAFRAME\"", "]", ":", "return", "self", ".", "_to_df", "(", "query", ",", "self", ".", "_engine", ")", "elif", "return_as", ".", "upper", "(", ")", "in", "[", "\"RESULT\"", ",", "\"RESULTPROXY\"", "]", ":", "with", "self", ".", "_engine", ".", "connect", "(", ")", "as", "conn", ":", "result", "=", "conn", ".", "execute", "(", "query", ")", "return", "result", "else", ":", "raise", "QueryDbError", "(", "\"Other return types not implemented.\"", ")" ]
Execute a raw SQL query against the the SQL DB. Args: sql_query (str): A raw SQL query to execute. Kwargs: return_as (str): Specify what type of object should be returned. The following are acceptable types: - "dataframe": pandas.DataFrame or None if no matching query - "result": sqlalchemy.engine.result.ResultProxy Returns: result (pandas.DataFrame or sqlalchemy ResultProxy): Query result as a DataFrame (default) or sqlalchemy result (specified with return_as="result") Raises: QueryDbError
[ "Execute", "a", "raw", "SQL", "query", "against", "the", "the", "SQL", "DB", "." ]
train
https://github.com/boydgreenfield/query/blob/03aa43b746b43832af3f0403265e648a5617b62b/query/core.py#L298-L335
boydgreenfield/query
query/core.py
QueryDb._set_metadata
def _set_metadata(self): """ Internal helper to set metadata attributes. """ meta = QueryDbMeta() with self._engine.connect() as conn: meta.bind = conn meta.reflect() self._meta = meta # Set an inspect attribute, whose subattributes # return individual tables / columns. Tables and columns # are special classes with .last() and other convenience methods self.inspect = QueryDbAttributes() for table in self._meta.tables: setattr(self.inspect, table, QueryDbOrm(self._meta.tables[table], self)) table_attr = getattr(self.inspect, table) table_cols = table_attr.table.columns for col in table_cols.keys(): setattr(table_attr, col, QueryDbOrm(table_cols[col], self)) # Finally add some summary info: # Table name # Primary Key item or list # N of Cols # Distinct Col Values (class so NVARCHAR(20) and NVARCHAR(30) are not different) primary_keys = table_attr.table.primary_key.columns.keys() self._summary_info.append(( table, primary_keys[0] if len(primary_keys) == 1 else primary_keys, len(table_cols), len(set([x.type.__class__ for x in table_cols.values()])), ))
python
def _set_metadata(self): """ Internal helper to set metadata attributes. """ meta = QueryDbMeta() with self._engine.connect() as conn: meta.bind = conn meta.reflect() self._meta = meta # Set an inspect attribute, whose subattributes # return individual tables / columns. Tables and columns # are special classes with .last() and other convenience methods self.inspect = QueryDbAttributes() for table in self._meta.tables: setattr(self.inspect, table, QueryDbOrm(self._meta.tables[table], self)) table_attr = getattr(self.inspect, table) table_cols = table_attr.table.columns for col in table_cols.keys(): setattr(table_attr, col, QueryDbOrm(table_cols[col], self)) # Finally add some summary info: # Table name # Primary Key item or list # N of Cols # Distinct Col Values (class so NVARCHAR(20) and NVARCHAR(30) are not different) primary_keys = table_attr.table.primary_key.columns.keys() self._summary_info.append(( table, primary_keys[0] if len(primary_keys) == 1 else primary_keys, len(table_cols), len(set([x.type.__class__ for x in table_cols.values()])), ))
[ "def", "_set_metadata", "(", "self", ")", ":", "meta", "=", "QueryDbMeta", "(", ")", "with", "self", ".", "_engine", ".", "connect", "(", ")", "as", "conn", ":", "meta", ".", "bind", "=", "conn", "meta", ".", "reflect", "(", ")", "self", ".", "_meta", "=", "meta", "# Set an inspect attribute, whose subattributes", "# return individual tables / columns. Tables and columns", "# are special classes with .last() and other convenience methods", "self", ".", "inspect", "=", "QueryDbAttributes", "(", ")", "for", "table", "in", "self", ".", "_meta", ".", "tables", ":", "setattr", "(", "self", ".", "inspect", ",", "table", ",", "QueryDbOrm", "(", "self", ".", "_meta", ".", "tables", "[", "table", "]", ",", "self", ")", ")", "table_attr", "=", "getattr", "(", "self", ".", "inspect", ",", "table", ")", "table_cols", "=", "table_attr", ".", "table", ".", "columns", "for", "col", "in", "table_cols", ".", "keys", "(", ")", ":", "setattr", "(", "table_attr", ",", "col", ",", "QueryDbOrm", "(", "table_cols", "[", "col", "]", ",", "self", ")", ")", "# Finally add some summary info:", "# Table name", "# Primary Key item or list", "# N of Cols", "# Distinct Col Values (class so NVARCHAR(20) and NVARCHAR(30) are not different)", "primary_keys", "=", "table_attr", ".", "table", ".", "primary_key", ".", "columns", ".", "keys", "(", ")", "self", ".", "_summary_info", ".", "append", "(", "(", "table", ",", "primary_keys", "[", "0", "]", "if", "len", "(", "primary_keys", ")", "==", "1", "else", "primary_keys", ",", "len", "(", "table_cols", ")", ",", "len", "(", "set", "(", "[", "x", ".", "type", ".", "__class__", "for", "x", "in", "table_cols", ".", "values", "(", ")", "]", ")", ")", ",", ")", ")" ]
Internal helper to set metadata attributes.
[ "Internal", "helper", "to", "set", "metadata", "attributes", "." ]
train
https://github.com/boydgreenfield/query/blob/03aa43b746b43832af3f0403265e648a5617b62b/query/core.py#L337-L373
boydgreenfield/query
query/core.py
QueryDb._to_df
def _to_df(self, query, conn, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None): """ Internal convert-to-DataFrame convenience wrapper. """ return pd.io.sql.read_sql(str(query), conn, index_col=index_col, coerce_float=coerce_float, params=params, parse_dates=parse_dates, columns=columns)
python
def _to_df(self, query, conn, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None): """ Internal convert-to-DataFrame convenience wrapper. """ return pd.io.sql.read_sql(str(query), conn, index_col=index_col, coerce_float=coerce_float, params=params, parse_dates=parse_dates, columns=columns)
[ "def", "_to_df", "(", "self", ",", "query", ",", "conn", ",", "index_col", "=", "None", ",", "coerce_float", "=", "True", ",", "params", "=", "None", ",", "parse_dates", "=", "None", ",", "columns", "=", "None", ")", ":", "return", "pd", ".", "io", ".", "sql", ".", "read_sql", "(", "str", "(", "query", ")", ",", "conn", ",", "index_col", "=", "index_col", ",", "coerce_float", "=", "coerce_float", ",", "params", "=", "params", ",", "parse_dates", "=", "parse_dates", ",", "columns", "=", "columns", ")" ]
Internal convert-to-DataFrame convenience wrapper.
[ "Internal", "convert", "-", "to", "-", "DataFrame", "convenience", "wrapper", "." ]
train
https://github.com/boydgreenfield/query/blob/03aa43b746b43832af3f0403265e648a5617b62b/query/core.py#L375-L382
kristianfoerster/melodist
melodist/temperature.py
disaggregate_temperature
def disaggregate_temperature(data_daily, method='sine_min_max', min_max_time='fix', mod_nighttime=False, max_delta=None, mean_course=None, sun_times=None): """The disaggregation function for temperature Parameters ---- data_daily : daily data method : method to disaggregate min_max_time: "fix" - min/max temperature at fixed times 7h/14h, "sun_loc" - min/max calculated by sunrise/sunnoon + 2h, "sun_loc_shift" - min/max calculated by sunrise/sunnoon + monthly mean shift, max_delta: maximum monthly temperature shift as returned by get_shift_by_data() sun_times: times of sunrise/noon as returned by get_sun_times() """ if method not in ( 'sine_min_max', 'sine_mean', 'sine', 'mean_course_min_max', 'mean_course_mean', ): raise ValueError('Invalid option') temp_disagg = pd.Series(index=melodist.util.hourly_index(data_daily.index)) if method in ('sine_min_max', 'sine_mean', 'sine'): # for this option assume time of minimum and maximum and fit cosine function through minimum and maximum temperatures hours_per_day = 24 default_shift_hours = 2 daylength_thres = 3 # min / max hour during polar night assumption min_loc_polar = 6 max_loc_polar = 18 locdf = pd.DataFrame( index=data_daily.index, columns=[ 'min_loc', 'max_loc', 'min_val_before', 'min_val_cur', 'min_val_next', 'max_val_before', 'max_val_cur', 'max_val_next', 'mean_val_cur', ] ) if min_max_time == 'fix': # take fixed location for minimum and maximum locdf.min_loc = 7 locdf.max_loc = 14 elif min_max_time == 'sun_loc': # take location for minimum and maximum by sunrise / sunnoon + 2h locdf.min_loc = sun_times.sunrise.round() # sun rise round to full hour locdf.max_loc = sun_times.sunnoon.round() + default_shift_hours # sun noon round to full hour + fix 2h elif min_max_time == 'sun_loc_shift': # take location for minimum and maximum by sunrise / sunnoon + monthly delta locdf.min_loc = sun_times.sunrise.round() # sun rise round to full hour locdf.max_loc = (sun_times.sunnoon + max_delta[locdf.index.month].values).round() # sun noon + shift derived from observed hourly data, round to full hour pos = locdf.min_loc > locdf.max_loc locdf.loc[pos, 'max_loc'] = sun_times.sunnoon[pos].round() + default_shift_hours # standard shift in this case locdf.min_loc = locdf.min_loc.astype(int) locdf.max_loc = locdf.max_loc.astype(int) locdf.min_val_cur = data_daily.tmin locdf.max_val_cur = data_daily.tmax locdf.mean_val_cur = data_daily.temp locdf.min_val_next = data_daily.tmin.shift(-1, 'D') locdf.max_val_next = data_daily.tmax.shift(-1, 'D') locdf.loc[locdf.index[-1], 'min_val_next'] = locdf.min_val_cur.iloc[-1] locdf.loc[locdf.index[-1], 'max_val_next'] = locdf.max_val_cur.iloc[-1] locdf.min_val_before = data_daily.tmin.shift(1, 'D') locdf.max_val_before = data_daily.tmax.shift(1, 'D') locdf.loc[locdf.index[0], 'min_val_before'] = locdf.min_val_cur.iloc[0] locdf.loc[locdf.index[0], 'max_val_before'] = locdf.max_val_cur.iloc[0] locdf_day = locdf locdf = locdf.reindex(temp_disagg.index, method='ffill') # whenever we are before the maximum for the current day, use minimum value of current day for cosine function fitting # once we have passed the maximum value use the minimum for next day to ensure smooth transitions min_val = locdf.min_val_next.copy() min_val[min_val.index.hour < locdf.max_loc] = locdf.min_val_cur # whenever we are before the minimum for the current day, use maximum value of day before for cosine function fitting # once we have passed the minimum value use the maximum for the current day to ensure smooth transitions max_val = locdf.max_val_cur.copy() max_val[max_val.index.hour < locdf.min_loc] = locdf.max_val_before temp_disagg = pd.Series(index=min_val.index) if method in ('sine_min_max', 'sine'): delta_val = max_val - min_val v_trans = min_val + delta_val / 2. if mod_nighttime: before_min = locdf.index.hour <= locdf.min_loc between_min_max = (locdf.index.hour > locdf.min_loc) & (locdf.index.hour < locdf.max_loc) after_max = locdf.index.hour >= locdf.max_loc temp_disagg[before_min] = v_trans + delta_val / 2. * np.cos(np.pi / (hours_per_day - (locdf.max_loc - locdf.min_loc)) * (hours_per_day - locdf.max_loc + locdf.index.hour)) temp_disagg[between_min_max] = v_trans + delta_val / 2. * np.cos(1.25 * np.pi + 0.75 * np.pi / (locdf.max_loc - locdf.min_loc) * (locdf.index.hour - locdf.min_loc)) temp_disagg[after_max] = v_trans + delta_val / 2. * np.cos(np.pi / (hours_per_day - (locdf.max_loc - locdf.min_loc)) * (locdf.index.hour - locdf.max_loc)) else: temp_disagg[:] = v_trans + (delta_val / 2.) * np.cos(2 * np.pi / hours_per_day * (locdf.index.hour - locdf.max_loc)) elif method == 'sine_mean': dtr = locdf.max_val_cur - locdf.min_val_cur temp_disagg[:] = locdf.mean_val_cur + dtr / 2. * np.cos(2 * np.pi / hours_per_day * (locdf.index.hour - locdf.max_loc)) polars = sun_times.daylength < daylength_thres if polars.sum() > 0: # during polar night, no diurnal variation of temperature is applied # instead the daily average calculated using tmin and tmax is applied polars_index_hourly = melodist.util.hourly_index(polars[polars].index) temp_disagg.loc[polars_index_hourly] = np.nan avg_before = (locdf_day.min_val_before + locdf_day.max_val_before) / 2. avg_cur = (locdf_day.min_val_cur + locdf_day.max_val_cur) / 2. getting_warmers = polars & (avg_before <= avg_cur) getting_colders = polars & ~(avg_before <= avg_cur) getting_warmers_min_loc = pd.DatetimeIndex([ts.replace(hour=min_loc_polar) for ts in getting_warmers[getting_warmers].index]) getting_warmers_max_loc = pd.DatetimeIndex([ts.replace(hour=max_loc_polar) for ts in getting_warmers[getting_warmers].index]) temp_disagg[getting_warmers_min_loc] = locdf_day.min_val_cur[getting_warmers].values temp_disagg[getting_warmers_max_loc] = locdf_day.max_val_cur[getting_warmers].values getting_colders_min_loc = pd.DatetimeIndex([ts.replace(hour=min_loc_polar) for ts in getting_colders[getting_colders].index]) getting_colders_max_loc = pd.DatetimeIndex([ts.replace(hour=max_loc_polar) for ts in getting_colders[getting_colders].index]) temp_disagg[getting_colders_min_loc] = locdf_day.max_val_cur[getting_colders].values temp_disagg[getting_colders_max_loc] = locdf_day.min_val_cur[getting_colders].values temp_polars = temp_disagg.loc[polars_index_hourly].copy() transition_days = polars[polars.diff() == True].astype(int) # 0 where transition from polar to "normal" mode, 1 where transition from normal to polar if len(transition_days) > 0: polar_to_normal_days = transition_days.index[transition_days == 0] normal_to_polar_days = transition_days.index[transition_days == 1] - pd.Timedelta(days=1) add_days = polar_to_normal_days.union(normal_to_polar_days) temp_polars = temp_polars.append(temp_disagg[melodist.util.hourly_index(add_days)]).sort_index() for day in polar_to_normal_days: min_loc = int(locdf.loc[day].min_loc) temp_polars[day.replace(hour=0):day.replace(hour=min_loc) - pd.Timedelta(hours=1)] = np.nan temp_polars[day.replace(hour=min_loc)] = locdf.min_val_cur[day] for day in normal_to_polar_days: max_loc = int(locdf.loc[day].max_loc) temp_polars[day.replace(hour=max_loc) + pd.Timedelta(hours=1):day.replace(hour=23)] = np.nan temp_interp = temp_polars.interpolate(method='linear', limit=23) temp_disagg[temp_interp.index] = temp_interp elif method == 'mean_course_min_max': data_daily_as_hourly = data_daily.reindex(temp_disagg.index, method='ffill', limit=23) df = pd.DataFrame(index=temp_disagg.index) df['normval'] = mean_course.unstack().loc[list(zip(df.index.month, df.index.hour))].values df['tmin'] = data_daily_as_hourly.tmin df['tmax'] = data_daily_as_hourly.tmax temp_disagg[:] = df.normval * (df.tmax - df.tmin) + df.tmin elif method == 'mean_course_mean': data_daily_as_hourly = data_daily.reindex(temp_disagg.index, method='ffill', limit=23) dtr = data_daily_as_hourly.tmax - data_daily_as_hourly.tmin mc = pd.Series(index=temp_disagg.index) mean_course_zeromean = mean_course - mean_course.mean() # shift mean course so that the daily mean is 0 mc[:] = mean_course_zeromean.unstack().loc[list(zip(temp_disagg.index.month, temp_disagg.index.hour))].values temp_disagg[:] = data_daily_as_hourly.temp + dtr * mc return temp_disagg
python
def disaggregate_temperature(data_daily, method='sine_min_max', min_max_time='fix', mod_nighttime=False, max_delta=None, mean_course=None, sun_times=None): """The disaggregation function for temperature Parameters ---- data_daily : daily data method : method to disaggregate min_max_time: "fix" - min/max temperature at fixed times 7h/14h, "sun_loc" - min/max calculated by sunrise/sunnoon + 2h, "sun_loc_shift" - min/max calculated by sunrise/sunnoon + monthly mean shift, max_delta: maximum monthly temperature shift as returned by get_shift_by_data() sun_times: times of sunrise/noon as returned by get_sun_times() """ if method not in ( 'sine_min_max', 'sine_mean', 'sine', 'mean_course_min_max', 'mean_course_mean', ): raise ValueError('Invalid option') temp_disagg = pd.Series(index=melodist.util.hourly_index(data_daily.index)) if method in ('sine_min_max', 'sine_mean', 'sine'): # for this option assume time of minimum and maximum and fit cosine function through minimum and maximum temperatures hours_per_day = 24 default_shift_hours = 2 daylength_thres = 3 # min / max hour during polar night assumption min_loc_polar = 6 max_loc_polar = 18 locdf = pd.DataFrame( index=data_daily.index, columns=[ 'min_loc', 'max_loc', 'min_val_before', 'min_val_cur', 'min_val_next', 'max_val_before', 'max_val_cur', 'max_val_next', 'mean_val_cur', ] ) if min_max_time == 'fix': # take fixed location for minimum and maximum locdf.min_loc = 7 locdf.max_loc = 14 elif min_max_time == 'sun_loc': # take location for minimum and maximum by sunrise / sunnoon + 2h locdf.min_loc = sun_times.sunrise.round() # sun rise round to full hour locdf.max_loc = sun_times.sunnoon.round() + default_shift_hours # sun noon round to full hour + fix 2h elif min_max_time == 'sun_loc_shift': # take location for minimum and maximum by sunrise / sunnoon + monthly delta locdf.min_loc = sun_times.sunrise.round() # sun rise round to full hour locdf.max_loc = (sun_times.sunnoon + max_delta[locdf.index.month].values).round() # sun noon + shift derived from observed hourly data, round to full hour pos = locdf.min_loc > locdf.max_loc locdf.loc[pos, 'max_loc'] = sun_times.sunnoon[pos].round() + default_shift_hours # standard shift in this case locdf.min_loc = locdf.min_loc.astype(int) locdf.max_loc = locdf.max_loc.astype(int) locdf.min_val_cur = data_daily.tmin locdf.max_val_cur = data_daily.tmax locdf.mean_val_cur = data_daily.temp locdf.min_val_next = data_daily.tmin.shift(-1, 'D') locdf.max_val_next = data_daily.tmax.shift(-1, 'D') locdf.loc[locdf.index[-1], 'min_val_next'] = locdf.min_val_cur.iloc[-1] locdf.loc[locdf.index[-1], 'max_val_next'] = locdf.max_val_cur.iloc[-1] locdf.min_val_before = data_daily.tmin.shift(1, 'D') locdf.max_val_before = data_daily.tmax.shift(1, 'D') locdf.loc[locdf.index[0], 'min_val_before'] = locdf.min_val_cur.iloc[0] locdf.loc[locdf.index[0], 'max_val_before'] = locdf.max_val_cur.iloc[0] locdf_day = locdf locdf = locdf.reindex(temp_disagg.index, method='ffill') # whenever we are before the maximum for the current day, use minimum value of current day for cosine function fitting # once we have passed the maximum value use the minimum for next day to ensure smooth transitions min_val = locdf.min_val_next.copy() min_val[min_val.index.hour < locdf.max_loc] = locdf.min_val_cur # whenever we are before the minimum for the current day, use maximum value of day before for cosine function fitting # once we have passed the minimum value use the maximum for the current day to ensure smooth transitions max_val = locdf.max_val_cur.copy() max_val[max_val.index.hour < locdf.min_loc] = locdf.max_val_before temp_disagg = pd.Series(index=min_val.index) if method in ('sine_min_max', 'sine'): delta_val = max_val - min_val v_trans = min_val + delta_val / 2. if mod_nighttime: before_min = locdf.index.hour <= locdf.min_loc between_min_max = (locdf.index.hour > locdf.min_loc) & (locdf.index.hour < locdf.max_loc) after_max = locdf.index.hour >= locdf.max_loc temp_disagg[before_min] = v_trans + delta_val / 2. * np.cos(np.pi / (hours_per_day - (locdf.max_loc - locdf.min_loc)) * (hours_per_day - locdf.max_loc + locdf.index.hour)) temp_disagg[between_min_max] = v_trans + delta_val / 2. * np.cos(1.25 * np.pi + 0.75 * np.pi / (locdf.max_loc - locdf.min_loc) * (locdf.index.hour - locdf.min_loc)) temp_disagg[after_max] = v_trans + delta_val / 2. * np.cos(np.pi / (hours_per_day - (locdf.max_loc - locdf.min_loc)) * (locdf.index.hour - locdf.max_loc)) else: temp_disagg[:] = v_trans + (delta_val / 2.) * np.cos(2 * np.pi / hours_per_day * (locdf.index.hour - locdf.max_loc)) elif method == 'sine_mean': dtr = locdf.max_val_cur - locdf.min_val_cur temp_disagg[:] = locdf.mean_val_cur + dtr / 2. * np.cos(2 * np.pi / hours_per_day * (locdf.index.hour - locdf.max_loc)) polars = sun_times.daylength < daylength_thres if polars.sum() > 0: # during polar night, no diurnal variation of temperature is applied # instead the daily average calculated using tmin and tmax is applied polars_index_hourly = melodist.util.hourly_index(polars[polars].index) temp_disagg.loc[polars_index_hourly] = np.nan avg_before = (locdf_day.min_val_before + locdf_day.max_val_before) / 2. avg_cur = (locdf_day.min_val_cur + locdf_day.max_val_cur) / 2. getting_warmers = polars & (avg_before <= avg_cur) getting_colders = polars & ~(avg_before <= avg_cur) getting_warmers_min_loc = pd.DatetimeIndex([ts.replace(hour=min_loc_polar) for ts in getting_warmers[getting_warmers].index]) getting_warmers_max_loc = pd.DatetimeIndex([ts.replace(hour=max_loc_polar) for ts in getting_warmers[getting_warmers].index]) temp_disagg[getting_warmers_min_loc] = locdf_day.min_val_cur[getting_warmers].values temp_disagg[getting_warmers_max_loc] = locdf_day.max_val_cur[getting_warmers].values getting_colders_min_loc = pd.DatetimeIndex([ts.replace(hour=min_loc_polar) for ts in getting_colders[getting_colders].index]) getting_colders_max_loc = pd.DatetimeIndex([ts.replace(hour=max_loc_polar) for ts in getting_colders[getting_colders].index]) temp_disagg[getting_colders_min_loc] = locdf_day.max_val_cur[getting_colders].values temp_disagg[getting_colders_max_loc] = locdf_day.min_val_cur[getting_colders].values temp_polars = temp_disagg.loc[polars_index_hourly].copy() transition_days = polars[polars.diff() == True].astype(int) # 0 where transition from polar to "normal" mode, 1 where transition from normal to polar if len(transition_days) > 0: polar_to_normal_days = transition_days.index[transition_days == 0] normal_to_polar_days = transition_days.index[transition_days == 1] - pd.Timedelta(days=1) add_days = polar_to_normal_days.union(normal_to_polar_days) temp_polars = temp_polars.append(temp_disagg[melodist.util.hourly_index(add_days)]).sort_index() for day in polar_to_normal_days: min_loc = int(locdf.loc[day].min_loc) temp_polars[day.replace(hour=0):day.replace(hour=min_loc) - pd.Timedelta(hours=1)] = np.nan temp_polars[day.replace(hour=min_loc)] = locdf.min_val_cur[day] for day in normal_to_polar_days: max_loc = int(locdf.loc[day].max_loc) temp_polars[day.replace(hour=max_loc) + pd.Timedelta(hours=1):day.replace(hour=23)] = np.nan temp_interp = temp_polars.interpolate(method='linear', limit=23) temp_disagg[temp_interp.index] = temp_interp elif method == 'mean_course_min_max': data_daily_as_hourly = data_daily.reindex(temp_disagg.index, method='ffill', limit=23) df = pd.DataFrame(index=temp_disagg.index) df['normval'] = mean_course.unstack().loc[list(zip(df.index.month, df.index.hour))].values df['tmin'] = data_daily_as_hourly.tmin df['tmax'] = data_daily_as_hourly.tmax temp_disagg[:] = df.normval * (df.tmax - df.tmin) + df.tmin elif method == 'mean_course_mean': data_daily_as_hourly = data_daily.reindex(temp_disagg.index, method='ffill', limit=23) dtr = data_daily_as_hourly.tmax - data_daily_as_hourly.tmin mc = pd.Series(index=temp_disagg.index) mean_course_zeromean = mean_course - mean_course.mean() # shift mean course so that the daily mean is 0 mc[:] = mean_course_zeromean.unstack().loc[list(zip(temp_disagg.index.month, temp_disagg.index.hour))].values temp_disagg[:] = data_daily_as_hourly.temp + dtr * mc return temp_disagg
[ "def", "disaggregate_temperature", "(", "data_daily", ",", "method", "=", "'sine_min_max'", ",", "min_max_time", "=", "'fix'", ",", "mod_nighttime", "=", "False", ",", "max_delta", "=", "None", ",", "mean_course", "=", "None", ",", "sun_times", "=", "None", ")", ":", "if", "method", "not", "in", "(", "'sine_min_max'", ",", "'sine_mean'", ",", "'sine'", ",", "'mean_course_min_max'", ",", "'mean_course_mean'", ",", ")", ":", "raise", "ValueError", "(", "'Invalid option'", ")", "temp_disagg", "=", "pd", ".", "Series", "(", "index", "=", "melodist", ".", "util", ".", "hourly_index", "(", "data_daily", ".", "index", ")", ")", "if", "method", "in", "(", "'sine_min_max'", ",", "'sine_mean'", ",", "'sine'", ")", ":", "# for this option assume time of minimum and maximum and fit cosine function through minimum and maximum temperatures", "hours_per_day", "=", "24", "default_shift_hours", "=", "2", "daylength_thres", "=", "3", "# min / max hour during polar night assumption", "min_loc_polar", "=", "6", "max_loc_polar", "=", "18", "locdf", "=", "pd", ".", "DataFrame", "(", "index", "=", "data_daily", ".", "index", ",", "columns", "=", "[", "'min_loc'", ",", "'max_loc'", ",", "'min_val_before'", ",", "'min_val_cur'", ",", "'min_val_next'", ",", "'max_val_before'", ",", "'max_val_cur'", ",", "'max_val_next'", ",", "'mean_val_cur'", ",", "]", ")", "if", "min_max_time", "==", "'fix'", ":", "# take fixed location for minimum and maximum", "locdf", ".", "min_loc", "=", "7", "locdf", ".", "max_loc", "=", "14", "elif", "min_max_time", "==", "'sun_loc'", ":", "# take location for minimum and maximum by sunrise / sunnoon + 2h", "locdf", ".", "min_loc", "=", "sun_times", ".", "sunrise", ".", "round", "(", ")", "# sun rise round to full hour", "locdf", ".", "max_loc", "=", "sun_times", ".", "sunnoon", ".", "round", "(", ")", "+", "default_shift_hours", "# sun noon round to full hour + fix 2h", "elif", "min_max_time", "==", "'sun_loc_shift'", ":", "# take location for minimum and maximum by sunrise / sunnoon + monthly delta", "locdf", ".", "min_loc", "=", "sun_times", ".", "sunrise", ".", "round", "(", ")", "# sun rise round to full hour", "locdf", ".", "max_loc", "=", "(", "sun_times", ".", "sunnoon", "+", "max_delta", "[", "locdf", ".", "index", ".", "month", "]", ".", "values", ")", ".", "round", "(", ")", "# sun noon + shift derived from observed hourly data, round to full hour", "pos", "=", "locdf", ".", "min_loc", ">", "locdf", ".", "max_loc", "locdf", ".", "loc", "[", "pos", ",", "'max_loc'", "]", "=", "sun_times", ".", "sunnoon", "[", "pos", "]", ".", "round", "(", ")", "+", "default_shift_hours", "# standard shift in this case", "locdf", ".", "min_loc", "=", "locdf", ".", "min_loc", ".", "astype", "(", "int", ")", "locdf", ".", "max_loc", "=", "locdf", ".", "max_loc", ".", "astype", "(", "int", ")", "locdf", ".", "min_val_cur", "=", "data_daily", ".", "tmin", "locdf", ".", "max_val_cur", "=", "data_daily", ".", "tmax", "locdf", ".", "mean_val_cur", "=", "data_daily", ".", "temp", "locdf", ".", "min_val_next", "=", "data_daily", ".", "tmin", ".", "shift", "(", "-", "1", ",", "'D'", ")", "locdf", ".", "max_val_next", "=", "data_daily", ".", "tmax", ".", "shift", "(", "-", "1", ",", "'D'", ")", "locdf", ".", "loc", "[", "locdf", ".", "index", "[", "-", "1", "]", ",", "'min_val_next'", "]", "=", "locdf", ".", "min_val_cur", ".", "iloc", "[", "-", "1", "]", "locdf", ".", "loc", "[", "locdf", ".", "index", "[", "-", "1", "]", ",", "'max_val_next'", "]", "=", "locdf", ".", "max_val_cur", ".", "iloc", "[", "-", "1", "]", "locdf", ".", "min_val_before", "=", "data_daily", ".", "tmin", ".", "shift", "(", "1", ",", "'D'", ")", "locdf", ".", "max_val_before", "=", "data_daily", ".", "tmax", ".", "shift", "(", "1", ",", "'D'", ")", "locdf", ".", "loc", "[", "locdf", ".", "index", "[", "0", "]", ",", "'min_val_before'", "]", "=", "locdf", ".", "min_val_cur", ".", "iloc", "[", "0", "]", "locdf", ".", "loc", "[", "locdf", ".", "index", "[", "0", "]", ",", "'max_val_before'", "]", "=", "locdf", ".", "max_val_cur", ".", "iloc", "[", "0", "]", "locdf_day", "=", "locdf", "locdf", "=", "locdf", ".", "reindex", "(", "temp_disagg", ".", "index", ",", "method", "=", "'ffill'", ")", "# whenever we are before the maximum for the current day, use minimum value of current day for cosine function fitting", "# once we have passed the maximum value use the minimum for next day to ensure smooth transitions", "min_val", "=", "locdf", ".", "min_val_next", ".", "copy", "(", ")", "min_val", "[", "min_val", ".", "index", ".", "hour", "<", "locdf", ".", "max_loc", "]", "=", "locdf", ".", "min_val_cur", "# whenever we are before the minimum for the current day, use maximum value of day before for cosine function fitting", "# once we have passed the minimum value use the maximum for the current day to ensure smooth transitions", "max_val", "=", "locdf", ".", "max_val_cur", ".", "copy", "(", ")", "max_val", "[", "max_val", ".", "index", ".", "hour", "<", "locdf", ".", "min_loc", "]", "=", "locdf", ".", "max_val_before", "temp_disagg", "=", "pd", ".", "Series", "(", "index", "=", "min_val", ".", "index", ")", "if", "method", "in", "(", "'sine_min_max'", ",", "'sine'", ")", ":", "delta_val", "=", "max_val", "-", "min_val", "v_trans", "=", "min_val", "+", "delta_val", "/", "2.", "if", "mod_nighttime", ":", "before_min", "=", "locdf", ".", "index", ".", "hour", "<=", "locdf", ".", "min_loc", "between_min_max", "=", "(", "locdf", ".", "index", ".", "hour", ">", "locdf", ".", "min_loc", ")", "&", "(", "locdf", ".", "index", ".", "hour", "<", "locdf", ".", "max_loc", ")", "after_max", "=", "locdf", ".", "index", ".", "hour", ">=", "locdf", ".", "max_loc", "temp_disagg", "[", "before_min", "]", "=", "v_trans", "+", "delta_val", "/", "2.", "*", "np", ".", "cos", "(", "np", ".", "pi", "/", "(", "hours_per_day", "-", "(", "locdf", ".", "max_loc", "-", "locdf", ".", "min_loc", ")", ")", "*", "(", "hours_per_day", "-", "locdf", ".", "max_loc", "+", "locdf", ".", "index", ".", "hour", ")", ")", "temp_disagg", "[", "between_min_max", "]", "=", "v_trans", "+", "delta_val", "/", "2.", "*", "np", ".", "cos", "(", "1.25", "*", "np", ".", "pi", "+", "0.75", "*", "np", ".", "pi", "/", "(", "locdf", ".", "max_loc", "-", "locdf", ".", "min_loc", ")", "*", "(", "locdf", ".", "index", ".", "hour", "-", "locdf", ".", "min_loc", ")", ")", "temp_disagg", "[", "after_max", "]", "=", "v_trans", "+", "delta_val", "/", "2.", "*", "np", ".", "cos", "(", "np", ".", "pi", "/", "(", "hours_per_day", "-", "(", "locdf", ".", "max_loc", "-", "locdf", ".", "min_loc", ")", ")", "*", "(", "locdf", ".", "index", ".", "hour", "-", "locdf", ".", "max_loc", ")", ")", "else", ":", "temp_disagg", "[", ":", "]", "=", "v_trans", "+", "(", "delta_val", "/", "2.", ")", "*", "np", ".", "cos", "(", "2", "*", "np", ".", "pi", "/", "hours_per_day", "*", "(", "locdf", ".", "index", ".", "hour", "-", "locdf", ".", "max_loc", ")", ")", "elif", "method", "==", "'sine_mean'", ":", "dtr", "=", "locdf", ".", "max_val_cur", "-", "locdf", ".", "min_val_cur", "temp_disagg", "[", ":", "]", "=", "locdf", ".", "mean_val_cur", "+", "dtr", "/", "2.", "*", "np", ".", "cos", "(", "2", "*", "np", ".", "pi", "/", "hours_per_day", "*", "(", "locdf", ".", "index", ".", "hour", "-", "locdf", ".", "max_loc", ")", ")", "polars", "=", "sun_times", ".", "daylength", "<", "daylength_thres", "if", "polars", ".", "sum", "(", ")", ">", "0", ":", "# during polar night, no diurnal variation of temperature is applied", "# instead the daily average calculated using tmin and tmax is applied", "polars_index_hourly", "=", "melodist", ".", "util", ".", "hourly_index", "(", "polars", "[", "polars", "]", ".", "index", ")", "temp_disagg", ".", "loc", "[", "polars_index_hourly", "]", "=", "np", ".", "nan", "avg_before", "=", "(", "locdf_day", ".", "min_val_before", "+", "locdf_day", ".", "max_val_before", ")", "/", "2.", "avg_cur", "=", "(", "locdf_day", ".", "min_val_cur", "+", "locdf_day", ".", "max_val_cur", ")", "/", "2.", "getting_warmers", "=", "polars", "&", "(", "avg_before", "<=", "avg_cur", ")", "getting_colders", "=", "polars", "&", "~", "(", "avg_before", "<=", "avg_cur", ")", "getting_warmers_min_loc", "=", "pd", ".", "DatetimeIndex", "(", "[", "ts", ".", "replace", "(", "hour", "=", "min_loc_polar", ")", "for", "ts", "in", "getting_warmers", "[", "getting_warmers", "]", ".", "index", "]", ")", "getting_warmers_max_loc", "=", "pd", ".", "DatetimeIndex", "(", "[", "ts", ".", "replace", "(", "hour", "=", "max_loc_polar", ")", "for", "ts", "in", "getting_warmers", "[", "getting_warmers", "]", ".", "index", "]", ")", "temp_disagg", "[", "getting_warmers_min_loc", "]", "=", "locdf_day", ".", "min_val_cur", "[", "getting_warmers", "]", ".", "values", "temp_disagg", "[", "getting_warmers_max_loc", "]", "=", "locdf_day", ".", "max_val_cur", "[", "getting_warmers", "]", ".", "values", "getting_colders_min_loc", "=", "pd", ".", "DatetimeIndex", "(", "[", "ts", ".", "replace", "(", "hour", "=", "min_loc_polar", ")", "for", "ts", "in", "getting_colders", "[", "getting_colders", "]", ".", "index", "]", ")", "getting_colders_max_loc", "=", "pd", ".", "DatetimeIndex", "(", "[", "ts", ".", "replace", "(", "hour", "=", "max_loc_polar", ")", "for", "ts", "in", "getting_colders", "[", "getting_colders", "]", ".", "index", "]", ")", "temp_disagg", "[", "getting_colders_min_loc", "]", "=", "locdf_day", ".", "max_val_cur", "[", "getting_colders", "]", ".", "values", "temp_disagg", "[", "getting_colders_max_loc", "]", "=", "locdf_day", ".", "min_val_cur", "[", "getting_colders", "]", ".", "values", "temp_polars", "=", "temp_disagg", ".", "loc", "[", "polars_index_hourly", "]", ".", "copy", "(", ")", "transition_days", "=", "polars", "[", "polars", ".", "diff", "(", ")", "==", "True", "]", ".", "astype", "(", "int", ")", "# 0 where transition from polar to \"normal\" mode, 1 where transition from normal to polar", "if", "len", "(", "transition_days", ")", ">", "0", ":", "polar_to_normal_days", "=", "transition_days", ".", "index", "[", "transition_days", "==", "0", "]", "normal_to_polar_days", "=", "transition_days", ".", "index", "[", "transition_days", "==", "1", "]", "-", "pd", ".", "Timedelta", "(", "days", "=", "1", ")", "add_days", "=", "polar_to_normal_days", ".", "union", "(", "normal_to_polar_days", ")", "temp_polars", "=", "temp_polars", ".", "append", "(", "temp_disagg", "[", "melodist", ".", "util", ".", "hourly_index", "(", "add_days", ")", "]", ")", ".", "sort_index", "(", ")", "for", "day", "in", "polar_to_normal_days", ":", "min_loc", "=", "int", "(", "locdf", ".", "loc", "[", "day", "]", ".", "min_loc", ")", "temp_polars", "[", "day", ".", "replace", "(", "hour", "=", "0", ")", ":", "day", ".", "replace", "(", "hour", "=", "min_loc", ")", "-", "pd", ".", "Timedelta", "(", "hours", "=", "1", ")", "]", "=", "np", ".", "nan", "temp_polars", "[", "day", ".", "replace", "(", "hour", "=", "min_loc", ")", "]", "=", "locdf", ".", "min_val_cur", "[", "day", "]", "for", "day", "in", "normal_to_polar_days", ":", "max_loc", "=", "int", "(", "locdf", ".", "loc", "[", "day", "]", ".", "max_loc", ")", "temp_polars", "[", "day", ".", "replace", "(", "hour", "=", "max_loc", ")", "+", "pd", ".", "Timedelta", "(", "hours", "=", "1", ")", ":", "day", ".", "replace", "(", "hour", "=", "23", ")", "]", "=", "np", ".", "nan", "temp_interp", "=", "temp_polars", ".", "interpolate", "(", "method", "=", "'linear'", ",", "limit", "=", "23", ")", "temp_disagg", "[", "temp_interp", ".", "index", "]", "=", "temp_interp", "elif", "method", "==", "'mean_course_min_max'", ":", "data_daily_as_hourly", "=", "data_daily", ".", "reindex", "(", "temp_disagg", ".", "index", ",", "method", "=", "'ffill'", ",", "limit", "=", "23", ")", "df", "=", "pd", ".", "DataFrame", "(", "index", "=", "temp_disagg", ".", "index", ")", "df", "[", "'normval'", "]", "=", "mean_course", ".", "unstack", "(", ")", ".", "loc", "[", "list", "(", "zip", "(", "df", ".", "index", ".", "month", ",", "df", ".", "index", ".", "hour", ")", ")", "]", ".", "values", "df", "[", "'tmin'", "]", "=", "data_daily_as_hourly", ".", "tmin", "df", "[", "'tmax'", "]", "=", "data_daily_as_hourly", ".", "tmax", "temp_disagg", "[", ":", "]", "=", "df", ".", "normval", "*", "(", "df", ".", "tmax", "-", "df", ".", "tmin", ")", "+", "df", ".", "tmin", "elif", "method", "==", "'mean_course_mean'", ":", "data_daily_as_hourly", "=", "data_daily", ".", "reindex", "(", "temp_disagg", ".", "index", ",", "method", "=", "'ffill'", ",", "limit", "=", "23", ")", "dtr", "=", "data_daily_as_hourly", ".", "tmax", "-", "data_daily_as_hourly", ".", "tmin", "mc", "=", "pd", ".", "Series", "(", "index", "=", "temp_disagg", ".", "index", ")", "mean_course_zeromean", "=", "mean_course", "-", "mean_course", ".", "mean", "(", ")", "# shift mean course so that the daily mean is 0", "mc", "[", ":", "]", "=", "mean_course_zeromean", ".", "unstack", "(", ")", ".", "loc", "[", "list", "(", "zip", "(", "temp_disagg", ".", "index", ".", "month", ",", "temp_disagg", ".", "index", ".", "hour", ")", ")", "]", ".", "values", "temp_disagg", "[", ":", "]", "=", "data_daily_as_hourly", ".", "temp", "+", "dtr", "*", "mc", "return", "temp_disagg" ]
The disaggregation function for temperature Parameters ---- data_daily : daily data method : method to disaggregate min_max_time: "fix" - min/max temperature at fixed times 7h/14h, "sun_loc" - min/max calculated by sunrise/sunnoon + 2h, "sun_loc_shift" - min/max calculated by sunrise/sunnoon + monthly mean shift, max_delta: maximum monthly temperature shift as returned by get_shift_by_data() sun_times: times of sunrise/noon as returned by get_sun_times()
[ "The", "disaggregation", "function", "for", "temperature" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/temperature.py#L33-L212
kristianfoerster/melodist
melodist/temperature.py
get_shift_by_data
def get_shift_by_data(temp_hourly, lon, lat, time_zone): '''function to get max temp shift (monthly) by hourly data Parameters ---- hourly_data_obs : observed hourly data lat : latitude in DezDeg lon : longitude in DezDeg time_zone: timezone ''' daily_index = temp_hourly.resample('D').mean().index sun_times = melodist.util.get_sun_times(daily_index, lon, lat, time_zone) idxmax = temp_hourly.groupby(temp_hourly.index.date).idxmax() idxmax.index = pd.to_datetime(idxmax.index) max_temp_hour_obs = idxmax.dropna().apply(lambda d: d.hour) max_temp_hour_pot = sun_times.sunnoon max_delta = max_temp_hour_obs - max_temp_hour_pot mean_monthly_delta = max_delta.groupby(max_delta.index.month).mean() return mean_monthly_delta
python
def get_shift_by_data(temp_hourly, lon, lat, time_zone): '''function to get max temp shift (monthly) by hourly data Parameters ---- hourly_data_obs : observed hourly data lat : latitude in DezDeg lon : longitude in DezDeg time_zone: timezone ''' daily_index = temp_hourly.resample('D').mean().index sun_times = melodist.util.get_sun_times(daily_index, lon, lat, time_zone) idxmax = temp_hourly.groupby(temp_hourly.index.date).idxmax() idxmax.index = pd.to_datetime(idxmax.index) max_temp_hour_obs = idxmax.dropna().apply(lambda d: d.hour) max_temp_hour_pot = sun_times.sunnoon max_delta = max_temp_hour_obs - max_temp_hour_pot mean_monthly_delta = max_delta.groupby(max_delta.index.month).mean() return mean_monthly_delta
[ "def", "get_shift_by_data", "(", "temp_hourly", ",", "lon", ",", "lat", ",", "time_zone", ")", ":", "daily_index", "=", "temp_hourly", ".", "resample", "(", "'D'", ")", ".", "mean", "(", ")", ".", "index", "sun_times", "=", "melodist", ".", "util", ".", "get_sun_times", "(", "daily_index", ",", "lon", ",", "lat", ",", "time_zone", ")", "idxmax", "=", "temp_hourly", ".", "groupby", "(", "temp_hourly", ".", "index", ".", "date", ")", ".", "idxmax", "(", ")", "idxmax", ".", "index", "=", "pd", ".", "to_datetime", "(", "idxmax", ".", "index", ")", "max_temp_hour_obs", "=", "idxmax", ".", "dropna", "(", ")", ".", "apply", "(", "lambda", "d", ":", "d", ".", "hour", ")", "max_temp_hour_pot", "=", "sun_times", ".", "sunnoon", "max_delta", "=", "max_temp_hour_obs", "-", "max_temp_hour_pot", "mean_monthly_delta", "=", "max_delta", ".", "groupby", "(", "max_delta", ".", "index", ".", "month", ")", ".", "mean", "(", ")", "return", "mean_monthly_delta" ]
function to get max temp shift (monthly) by hourly data Parameters ---- hourly_data_obs : observed hourly data lat : latitude in DezDeg lon : longitude in DezDeg time_zone: timezone
[ "function", "to", "get", "max", "temp", "shift", "(", "monthly", ")", "by", "hourly", "data", "Parameters", "----", "hourly_data_obs", ":", "observed", "hourly", "data", "lat", ":", "latitude", "in", "DezDeg", "lon", ":", "longitude", "in", "DezDeg", "time_zone", ":", "timezone" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/temperature.py#L215-L235
kristianfoerster/melodist
melodist/util/util.py
distribute_equally
def distribute_equally(daily_data, divide=False): """Obtains hourly values by equally distributing the daily values. Args: daily_data: daily values divide: if True, divide resulting values by the number of hours in order to preserve the daily sum (required e.g. for precipitation). Returns: Equally distributed hourly values. """ index = hourly_index(daily_data.index) hourly_data = daily_data.reindex(index) hourly_data = hourly_data.groupby(hourly_data.index.day).transform( lambda x: x.fillna(method='ffill', limit=23)) if divide: hourly_data /= 24 return hourly_data
python
def distribute_equally(daily_data, divide=False): """Obtains hourly values by equally distributing the daily values. Args: daily_data: daily values divide: if True, divide resulting values by the number of hours in order to preserve the daily sum (required e.g. for precipitation). Returns: Equally distributed hourly values. """ index = hourly_index(daily_data.index) hourly_data = daily_data.reindex(index) hourly_data = hourly_data.groupby(hourly_data.index.day).transform( lambda x: x.fillna(method='ffill', limit=23)) if divide: hourly_data /= 24 return hourly_data
[ "def", "distribute_equally", "(", "daily_data", ",", "divide", "=", "False", ")", ":", "index", "=", "hourly_index", "(", "daily_data", ".", "index", ")", "hourly_data", "=", "daily_data", ".", "reindex", "(", "index", ")", "hourly_data", "=", "hourly_data", ".", "groupby", "(", "hourly_data", ".", "index", ".", "day", ")", ".", "transform", "(", "lambda", "x", ":", "x", ".", "fillna", "(", "method", "=", "'ffill'", ",", "limit", "=", "23", ")", ")", "if", "divide", ":", "hourly_data", "/=", "24", "return", "hourly_data" ]
Obtains hourly values by equally distributing the daily values. Args: daily_data: daily values divide: if True, divide resulting values by the number of hours in order to preserve the daily sum (required e.g. for precipitation). Returns: Equally distributed hourly values.
[ "Obtains", "hourly", "values", "by", "equally", "distributing", "the", "daily", "values", "." ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/util/util.py#L50-L70
kristianfoerster/melodist
melodist/util/util.py
vapor_pressure
def vapor_pressure(temp, hum): """ Calculates vapor pressure from temperature and humidity after Sonntag (1990). Args: temp: temperature values hum: humidity value(s). Can be scalar (e.g. for calculating saturation vapor pressure). Returns: Vapor pressure in hPa. """ if np.isscalar(hum): hum = np.zeros(temp.shape) + hum assert(temp.shape == hum.shape) positives = np.array(temp >= 273.15) vap_press = np.zeros(temp.shape) * np.nan vap_press[positives] = 6.112 * np.exp((17.62 * (temp[positives] - 273.15)) / (243.12 + (temp[positives] - 273.15))) * hum[positives] / 100. vap_press[~positives] = 6.112 * np.exp((22.46 * (temp[~positives] - 273.15)) / (272.62 + (temp[~positives] - 273.15))) * hum[~positives] / 100. return vap_press
python
def vapor_pressure(temp, hum): """ Calculates vapor pressure from temperature and humidity after Sonntag (1990). Args: temp: temperature values hum: humidity value(s). Can be scalar (e.g. for calculating saturation vapor pressure). Returns: Vapor pressure in hPa. """ if np.isscalar(hum): hum = np.zeros(temp.shape) + hum assert(temp.shape == hum.shape) positives = np.array(temp >= 273.15) vap_press = np.zeros(temp.shape) * np.nan vap_press[positives] = 6.112 * np.exp((17.62 * (temp[positives] - 273.15)) / (243.12 + (temp[positives] - 273.15))) * hum[positives] / 100. vap_press[~positives] = 6.112 * np.exp((22.46 * (temp[~positives] - 273.15)) / (272.62 + (temp[~positives] - 273.15))) * hum[~positives] / 100. return vap_press
[ "def", "vapor_pressure", "(", "temp", ",", "hum", ")", ":", "if", "np", ".", "isscalar", "(", "hum", ")", ":", "hum", "=", "np", ".", "zeros", "(", "temp", ".", "shape", ")", "+", "hum", "assert", "(", "temp", ".", "shape", "==", "hum", ".", "shape", ")", "positives", "=", "np", ".", "array", "(", "temp", ">=", "273.15", ")", "vap_press", "=", "np", ".", "zeros", "(", "temp", ".", "shape", ")", "*", "np", ".", "nan", "vap_press", "[", "positives", "]", "=", "6.112", "*", "np", ".", "exp", "(", "(", "17.62", "*", "(", "temp", "[", "positives", "]", "-", "273.15", ")", ")", "/", "(", "243.12", "+", "(", "temp", "[", "positives", "]", "-", "273.15", ")", ")", ")", "*", "hum", "[", "positives", "]", "/", "100.", "vap_press", "[", "~", "positives", "]", "=", "6.112", "*", "np", ".", "exp", "(", "(", "22.46", "*", "(", "temp", "[", "~", "positives", "]", "-", "273.15", ")", ")", "/", "(", "272.62", "+", "(", "temp", "[", "~", "positives", "]", "-", "273.15", ")", ")", ")", "*", "hum", "[", "~", "positives", "]", "/", "100.", "return", "vap_press" ]
Calculates vapor pressure from temperature and humidity after Sonntag (1990). Args: temp: temperature values hum: humidity value(s). Can be scalar (e.g. for calculating saturation vapor pressure). Returns: Vapor pressure in hPa.
[ "Calculates", "vapor", "pressure", "from", "temperature", "and", "humidity", "after", "Sonntag", "(", "1990", ")", "." ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/util/util.py#L73-L95
kristianfoerster/melodist
melodist/util/util.py
dewpoint_temperature
def dewpoint_temperature(temp, hum): """computes the dewpoint temperature Parameters ---- temp : temperature [K] hum : relative humidity Returns dewpoint temperature in K """ assert(temp.shape == hum.shape) vap_press = vapor_pressure(temp, hum) positives = np.array(temp >= 273.15) dewpoint_temp = temp.copy() * np.nan dewpoint_temp[positives] = 243.12 * np.log(vap_press[positives] / 6.112) / (17.62 - np.log(vap_press[positives] / 6.112)) dewpoint_temp[~positives] = 272.62 * np.log(vap_press[~positives] / 6.112) / (22.46 - np.log(vap_press[~positives] / 6.112)) return dewpoint_temp + 273.15
python
def dewpoint_temperature(temp, hum): """computes the dewpoint temperature Parameters ---- temp : temperature [K] hum : relative humidity Returns dewpoint temperature in K """ assert(temp.shape == hum.shape) vap_press = vapor_pressure(temp, hum) positives = np.array(temp >= 273.15) dewpoint_temp = temp.copy() * np.nan dewpoint_temp[positives] = 243.12 * np.log(vap_press[positives] / 6.112) / (17.62 - np.log(vap_press[positives] / 6.112)) dewpoint_temp[~positives] = 272.62 * np.log(vap_press[~positives] / 6.112) / (22.46 - np.log(vap_press[~positives] / 6.112)) return dewpoint_temp + 273.15
[ "def", "dewpoint_temperature", "(", "temp", ",", "hum", ")", ":", "assert", "(", "temp", ".", "shape", "==", "hum", ".", "shape", ")", "vap_press", "=", "vapor_pressure", "(", "temp", ",", "hum", ")", "positives", "=", "np", ".", "array", "(", "temp", ">=", "273.15", ")", "dewpoint_temp", "=", "temp", ".", "copy", "(", ")", "*", "np", ".", "nan", "dewpoint_temp", "[", "positives", "]", "=", "243.12", "*", "np", ".", "log", "(", "vap_press", "[", "positives", "]", "/", "6.112", ")", "/", "(", "17.62", "-", "np", ".", "log", "(", "vap_press", "[", "positives", "]", "/", "6.112", ")", ")", "dewpoint_temp", "[", "~", "positives", "]", "=", "272.62", "*", "np", ".", "log", "(", "vap_press", "[", "~", "positives", "]", "/", "6.112", ")", "/", "(", "22.46", "-", "np", ".", "log", "(", "vap_press", "[", "~", "positives", "]", "/", "6.112", ")", ")", "return", "dewpoint_temp", "+", "273.15" ]
computes the dewpoint temperature Parameters ---- temp : temperature [K] hum : relative humidity Returns dewpoint temperature in K
[ "computes", "the", "dewpoint", "temperature" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/util/util.py#L98-L119
kristianfoerster/melodist
melodist/util/util.py
linregress
def linregress(x, y, return_stats=False): """linear regression calculation Parameters ---- x : independent variable (series) y : dependent variable (series) return_stats : returns statistical values as well if required (bool) Returns ---- list of parameters (and statistics) """ a1, a0, r_value, p_value, stderr = scipy.stats.linregress(x, y) retval = a1, a0 if return_stats: retval += r_value, p_value, stderr return retval
python
def linregress(x, y, return_stats=False): """linear regression calculation Parameters ---- x : independent variable (series) y : dependent variable (series) return_stats : returns statistical values as well if required (bool) Returns ---- list of parameters (and statistics) """ a1, a0, r_value, p_value, stderr = scipy.stats.linregress(x, y) retval = a1, a0 if return_stats: retval += r_value, p_value, stderr return retval
[ "def", "linregress", "(", "x", ",", "y", ",", "return_stats", "=", "False", ")", ":", "a1", ",", "a0", ",", "r_value", ",", "p_value", ",", "stderr", "=", "scipy", ".", "stats", ".", "linregress", "(", "x", ",", "y", ")", "retval", "=", "a1", ",", "a0", "if", "return_stats", ":", "retval", "+=", "r_value", ",", "p_value", ",", "stderr", "return", "retval" ]
linear regression calculation Parameters ---- x : independent variable (series) y : dependent variable (series) return_stats : returns statistical values as well if required (bool) Returns ---- list of parameters (and statistics)
[ "linear", "regression", "calculation" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/util/util.py#L122-L142
kristianfoerster/melodist
melodist/util/util.py
get_sun_times
def get_sun_times(dates, lon, lat, time_zone): """Computes the times of sunrise, solar noon, and sunset for each day. Parameters ---- dates: datetime lat : latitude in DecDeg lon : longitude in DecDeg time_zone : timezone Returns ---- DataFrame: [sunrise, sunnoon, sunset, day length] in dec hours """ df = pd.DataFrame(index=dates, columns=['sunrise', 'sunnoon', 'sunset', 'daylength']) doy = np.array([(d - d.replace(day=1, month=1)).days + 1 for d in df.index]) # day of year # Day angle and declination after Bourges (1985): day_angle_b = np.deg2rad((360. / 365.25) * (doy - 79.346)) declination = np.deg2rad( 0.3723 + 23.2567 * np.sin(day_angle_b) - 0.7580 * np.cos(day_angle_b) + 0.1149 * np.sin(2*day_angle_b) + 0.3656 * np.cos(2*day_angle_b) - 0.1712 * np.sin(3*day_angle_b) + 0.0201 * np.cos(3*day_angle_b) ) # Equation of time with day angle after Spencer (1971): day_angle_s = 2 * np.pi * (doy - 1) / 365. eq_time = 12. / np.pi * ( 0.000075 + 0.001868 * np.cos( day_angle_s) - 0.032077 * np.sin( day_angle_s) - 0.014615 * np.cos(2*day_angle_s) - 0.040849 * np.sin(2*day_angle_s) ) # standard_meridian = time_zone * 15. delta_lat_time = (lon - standard_meridian) * 24. / 360. omega_nul_arg = -np.tan(np.deg2rad(lat)) * np.tan(declination) omega_nul = np.arccos(omega_nul_arg) sunrise = 12. * (1. - (omega_nul) / np.pi) - delta_lat_time - eq_time sunset = 12. * (1. + (omega_nul) / np.pi) - delta_lat_time - eq_time # as an approximation, solar noon is independent of the below mentioned # cases: sunnoon = 12. * (1.) - delta_lat_time - eq_time # $kf 2015-11-13: special case midnight sun and polar night # CASE 1: MIDNIGHT SUN # set sunrise and sunset to values that would yield the maximum day # length even though this a crude assumption pos = omega_nul_arg < -1 sunrise[pos] = sunnoon[pos] - 12 sunset[pos] = sunnoon[pos] + 12 # CASE 2: POLAR NIGHT # set sunrise and sunset to values that would yield the minmum day # length even though this a crude assumption pos = omega_nul_arg > 1 sunrise[pos] = sunnoon[pos] sunset[pos] = sunnoon[pos] daylength = sunset - sunrise # adjust if required sunrise[sunrise < 0] += 24 sunset[sunset > 24] -= 24 df.sunrise = sunrise df.sunnoon = sunnoon df.sunset = sunset df.daylength = daylength return df
python
def get_sun_times(dates, lon, lat, time_zone): """Computes the times of sunrise, solar noon, and sunset for each day. Parameters ---- dates: datetime lat : latitude in DecDeg lon : longitude in DecDeg time_zone : timezone Returns ---- DataFrame: [sunrise, sunnoon, sunset, day length] in dec hours """ df = pd.DataFrame(index=dates, columns=['sunrise', 'sunnoon', 'sunset', 'daylength']) doy = np.array([(d - d.replace(day=1, month=1)).days + 1 for d in df.index]) # day of year # Day angle and declination after Bourges (1985): day_angle_b = np.deg2rad((360. / 365.25) * (doy - 79.346)) declination = np.deg2rad( 0.3723 + 23.2567 * np.sin(day_angle_b) - 0.7580 * np.cos(day_angle_b) + 0.1149 * np.sin(2*day_angle_b) + 0.3656 * np.cos(2*day_angle_b) - 0.1712 * np.sin(3*day_angle_b) + 0.0201 * np.cos(3*day_angle_b) ) # Equation of time with day angle after Spencer (1971): day_angle_s = 2 * np.pi * (doy - 1) / 365. eq_time = 12. / np.pi * ( 0.000075 + 0.001868 * np.cos( day_angle_s) - 0.032077 * np.sin( day_angle_s) - 0.014615 * np.cos(2*day_angle_s) - 0.040849 * np.sin(2*day_angle_s) ) # standard_meridian = time_zone * 15. delta_lat_time = (lon - standard_meridian) * 24. / 360. omega_nul_arg = -np.tan(np.deg2rad(lat)) * np.tan(declination) omega_nul = np.arccos(omega_nul_arg) sunrise = 12. * (1. - (omega_nul) / np.pi) - delta_lat_time - eq_time sunset = 12. * (1. + (omega_nul) / np.pi) - delta_lat_time - eq_time # as an approximation, solar noon is independent of the below mentioned # cases: sunnoon = 12. * (1.) - delta_lat_time - eq_time # $kf 2015-11-13: special case midnight sun and polar night # CASE 1: MIDNIGHT SUN # set sunrise and sunset to values that would yield the maximum day # length even though this a crude assumption pos = omega_nul_arg < -1 sunrise[pos] = sunnoon[pos] - 12 sunset[pos] = sunnoon[pos] + 12 # CASE 2: POLAR NIGHT # set sunrise and sunset to values that would yield the minmum day # length even though this a crude assumption pos = omega_nul_arg > 1 sunrise[pos] = sunnoon[pos] sunset[pos] = sunnoon[pos] daylength = sunset - sunrise # adjust if required sunrise[sunrise < 0] += 24 sunset[sunset > 24] -= 24 df.sunrise = sunrise df.sunnoon = sunnoon df.sunset = sunset df.daylength = daylength return df
[ "def", "get_sun_times", "(", "dates", ",", "lon", ",", "lat", ",", "time_zone", ")", ":", "df", "=", "pd", ".", "DataFrame", "(", "index", "=", "dates", ",", "columns", "=", "[", "'sunrise'", ",", "'sunnoon'", ",", "'sunset'", ",", "'daylength'", "]", ")", "doy", "=", "np", ".", "array", "(", "[", "(", "d", "-", "d", ".", "replace", "(", "day", "=", "1", ",", "month", "=", "1", ")", ")", ".", "days", "+", "1", "for", "d", "in", "df", ".", "index", "]", ")", "# day of year", "# Day angle and declination after Bourges (1985):", "day_angle_b", "=", "np", ".", "deg2rad", "(", "(", "360.", "/", "365.25", ")", "*", "(", "doy", "-", "79.346", ")", ")", "declination", "=", "np", ".", "deg2rad", "(", "0.3723", "+", "23.2567", "*", "np", ".", "sin", "(", "day_angle_b", ")", "-", "0.7580", "*", "np", ".", "cos", "(", "day_angle_b", ")", "+", "0.1149", "*", "np", ".", "sin", "(", "2", "*", "day_angle_b", ")", "+", "0.3656", "*", "np", ".", "cos", "(", "2", "*", "day_angle_b", ")", "-", "0.1712", "*", "np", ".", "sin", "(", "3", "*", "day_angle_b", ")", "+", "0.0201", "*", "np", ".", "cos", "(", "3", "*", "day_angle_b", ")", ")", "# Equation of time with day angle after Spencer (1971):", "day_angle_s", "=", "2", "*", "np", ".", "pi", "*", "(", "doy", "-", "1", ")", "/", "365.", "eq_time", "=", "12.", "/", "np", ".", "pi", "*", "(", "0.000075", "+", "0.001868", "*", "np", ".", "cos", "(", "day_angle_s", ")", "-", "0.032077", "*", "np", ".", "sin", "(", "day_angle_s", ")", "-", "0.014615", "*", "np", ".", "cos", "(", "2", "*", "day_angle_s", ")", "-", "0.040849", "*", "np", ".", "sin", "(", "2", "*", "day_angle_s", ")", ")", "#", "standard_meridian", "=", "time_zone", "*", "15.", "delta_lat_time", "=", "(", "lon", "-", "standard_meridian", ")", "*", "24.", "/", "360.", "omega_nul_arg", "=", "-", "np", ".", "tan", "(", "np", ".", "deg2rad", "(", "lat", ")", ")", "*", "np", ".", "tan", "(", "declination", ")", "omega_nul", "=", "np", ".", "arccos", "(", "omega_nul_arg", ")", "sunrise", "=", "12.", "*", "(", "1.", "-", "(", "omega_nul", ")", "/", "np", ".", "pi", ")", "-", "delta_lat_time", "-", "eq_time", "sunset", "=", "12.", "*", "(", "1.", "+", "(", "omega_nul", ")", "/", "np", ".", "pi", ")", "-", "delta_lat_time", "-", "eq_time", "# as an approximation, solar noon is independent of the below mentioned", "# cases:", "sunnoon", "=", "12.", "*", "(", "1.", ")", "-", "delta_lat_time", "-", "eq_time", "# $kf 2015-11-13: special case midnight sun and polar night", "# CASE 1: MIDNIGHT SUN", "# set sunrise and sunset to values that would yield the maximum day", "# length even though this a crude assumption", "pos", "=", "omega_nul_arg", "<", "-", "1", "sunrise", "[", "pos", "]", "=", "sunnoon", "[", "pos", "]", "-", "12", "sunset", "[", "pos", "]", "=", "sunnoon", "[", "pos", "]", "+", "12", "# CASE 2: POLAR NIGHT", "# set sunrise and sunset to values that would yield the minmum day", "# length even though this a crude assumption", "pos", "=", "omega_nul_arg", ">", "1", "sunrise", "[", "pos", "]", "=", "sunnoon", "[", "pos", "]", "sunset", "[", "pos", "]", "=", "sunnoon", "[", "pos", "]", "daylength", "=", "sunset", "-", "sunrise", "# adjust if required", "sunrise", "[", "sunrise", "<", "0", "]", "+=", "24", "sunset", "[", "sunset", ">", "24", "]", "-=", "24", "df", ".", "sunrise", "=", "sunrise", "df", ".", "sunnoon", "=", "sunnoon", "df", ".", "sunset", "=", "sunset", "df", ".", "daylength", "=", "daylength", "return", "df" ]
Computes the times of sunrise, solar noon, and sunset for each day. Parameters ---- dates: datetime lat : latitude in DecDeg lon : longitude in DecDeg time_zone : timezone Returns ---- DataFrame: [sunrise, sunnoon, sunset, day length] in dec hours
[ "Computes", "the", "times", "of", "sunrise", "solar", "noon", "and", "sunset", "for", "each", "day", "." ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/util/util.py#L145-L221
kristianfoerster/melodist
melodist/util/util.py
detect_gaps
def detect_gaps(dataframe, timestep, print_all=False, print_max=5, verbose=True): """checks if a given dataframe contains gaps and returns the number of gaps This funtion checks if a dataframe contains any gaps for a given temporal resolution that needs to be specified in seconds. The number of gaps detected in the dataframe is returned. Args: dataframe: A pandas dataframe object with index defined as datetime timestep (int): The temporal resolution of the time series in seconds (e.g., 86400 for daily values) print_all (bool, opt): Lists every gap on the screen print_mx (int, opt): The maximum number of gaps listed on the screen in order to avoid a decrease in performance if numerous gaps occur verbose (bool, opt): Enables/disables output to the screen Returns: The number of gaps as integer. Negative values indicate errors. """ gcount = 0 msg_counter = 0 warning_printed = False try: n = len(dataframe.index) except: print('Error: Invalid dataframe.') return -1 for i in range(0, n): if(i > 0): time_diff = dataframe.index[i] - dataframe.index[i-1] if(time_diff.delta/1E9 != timestep): gcount += 1 if print_all or (msg_counter <= print_max - 1): if verbose: print('Warning: Gap in time series found between %s and %s' % (dataframe.index[i-1], dataframe.index[i])) msg_counter += 1 if msg_counter == print_max and verbose and not warning_printed: print('Waring: Only the first %i gaps have been listed. Try to increase print_max parameter to show more details.' % msg_counter) warning_printed = True if verbose: print('%i gaps found in total.' % (gcount)) return gcount
python
def detect_gaps(dataframe, timestep, print_all=False, print_max=5, verbose=True): """checks if a given dataframe contains gaps and returns the number of gaps This funtion checks if a dataframe contains any gaps for a given temporal resolution that needs to be specified in seconds. The number of gaps detected in the dataframe is returned. Args: dataframe: A pandas dataframe object with index defined as datetime timestep (int): The temporal resolution of the time series in seconds (e.g., 86400 for daily values) print_all (bool, opt): Lists every gap on the screen print_mx (int, opt): The maximum number of gaps listed on the screen in order to avoid a decrease in performance if numerous gaps occur verbose (bool, opt): Enables/disables output to the screen Returns: The number of gaps as integer. Negative values indicate errors. """ gcount = 0 msg_counter = 0 warning_printed = False try: n = len(dataframe.index) except: print('Error: Invalid dataframe.') return -1 for i in range(0, n): if(i > 0): time_diff = dataframe.index[i] - dataframe.index[i-1] if(time_diff.delta/1E9 != timestep): gcount += 1 if print_all or (msg_counter <= print_max - 1): if verbose: print('Warning: Gap in time series found between %s and %s' % (dataframe.index[i-1], dataframe.index[i])) msg_counter += 1 if msg_counter == print_max and verbose and not warning_printed: print('Waring: Only the first %i gaps have been listed. Try to increase print_max parameter to show more details.' % msg_counter) warning_printed = True if verbose: print('%i gaps found in total.' % (gcount)) return gcount
[ "def", "detect_gaps", "(", "dataframe", ",", "timestep", ",", "print_all", "=", "False", ",", "print_max", "=", "5", ",", "verbose", "=", "True", ")", ":", "gcount", "=", "0", "msg_counter", "=", "0", "warning_printed", "=", "False", "try", ":", "n", "=", "len", "(", "dataframe", ".", "index", ")", "except", ":", "print", "(", "'Error: Invalid dataframe.'", ")", "return", "-", "1", "for", "i", "in", "range", "(", "0", ",", "n", ")", ":", "if", "(", "i", ">", "0", ")", ":", "time_diff", "=", "dataframe", ".", "index", "[", "i", "]", "-", "dataframe", ".", "index", "[", "i", "-", "1", "]", "if", "(", "time_diff", ".", "delta", "/", "1E9", "!=", "timestep", ")", ":", "gcount", "+=", "1", "if", "print_all", "or", "(", "msg_counter", "<=", "print_max", "-", "1", ")", ":", "if", "verbose", ":", "print", "(", "'Warning: Gap in time series found between %s and %s'", "%", "(", "dataframe", ".", "index", "[", "i", "-", "1", "]", ",", "dataframe", ".", "index", "[", "i", "]", ")", ")", "msg_counter", "+=", "1", "if", "msg_counter", "==", "print_max", "and", "verbose", "and", "not", "warning_printed", ":", "print", "(", "'Waring: Only the first %i gaps have been listed. Try to increase print_max parameter to show more details.'", "%", "msg_counter", ")", "warning_printed", "=", "True", "if", "verbose", ":", "print", "(", "'%i gaps found in total.'", "%", "(", "gcount", ")", ")", "return", "gcount" ]
checks if a given dataframe contains gaps and returns the number of gaps This funtion checks if a dataframe contains any gaps for a given temporal resolution that needs to be specified in seconds. The number of gaps detected in the dataframe is returned. Args: dataframe: A pandas dataframe object with index defined as datetime timestep (int): The temporal resolution of the time series in seconds (e.g., 86400 for daily values) print_all (bool, opt): Lists every gap on the screen print_mx (int, opt): The maximum number of gaps listed on the screen in order to avoid a decrease in performance if numerous gaps occur verbose (bool, opt): Enables/disables output to the screen Returns: The number of gaps as integer. Negative values indicate errors.
[ "checks", "if", "a", "given", "dataframe", "contains", "gaps", "and", "returns", "the", "number", "of", "gaps" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/util/util.py#L224-L265
kristianfoerster/melodist
melodist/util/util.py
drop_incomplete_days
def drop_incomplete_days(dataframe, shift=0): """truncates a given dataframe to full days only This funtion truncates a given pandas dataframe (time series) to full days only, thus dropping leading and tailing hours of incomplete days. Please note that this methodology only applies to hourly time series. Args: dataframe: A pandas dataframe object with index defined as datetime shift (unsigned int, opt): First hour of daily recordings. For daily recordings of precipitation gages, 8 would be the first hour of the subsequent day of recordings since daily totals are usually recorded at 7. Omit defining this parameter if you intend to pertain recordings to 0-23h. Returns: A dataframe with full days only. """ dropped = 0 if shift > 23 or shift < 0: print("Invalid shift parameter setting! Using defaults.") shift = 0 first = shift last = first - 1 if last < 0: last += 24 try: # todo: move this checks to a separate function n = len(dataframe.index) except: print('Error: Invalid dataframe.') return dataframe delete = list() # drop heading lines if required for i in range(0, n): if dataframe.index.hour[i] == first and dataframe.index.minute[i] == 0: break else: delete.append(i) dropped += 1 # drop tailing lines if required for i in range(n-1, 0, -1): if dataframe.index.hour[i] == last and dataframe.index.minute[i] == 0: break else: delete.append(i) dropped += 1 # print("The following rows have been dropped (%i in total):" % dropped) # print(delete) return dataframe.drop(dataframe.index[[delete]])
python
def drop_incomplete_days(dataframe, shift=0): """truncates a given dataframe to full days only This funtion truncates a given pandas dataframe (time series) to full days only, thus dropping leading and tailing hours of incomplete days. Please note that this methodology only applies to hourly time series. Args: dataframe: A pandas dataframe object with index defined as datetime shift (unsigned int, opt): First hour of daily recordings. For daily recordings of precipitation gages, 8 would be the first hour of the subsequent day of recordings since daily totals are usually recorded at 7. Omit defining this parameter if you intend to pertain recordings to 0-23h. Returns: A dataframe with full days only. """ dropped = 0 if shift > 23 or shift < 0: print("Invalid shift parameter setting! Using defaults.") shift = 0 first = shift last = first - 1 if last < 0: last += 24 try: # todo: move this checks to a separate function n = len(dataframe.index) except: print('Error: Invalid dataframe.') return dataframe delete = list() # drop heading lines if required for i in range(0, n): if dataframe.index.hour[i] == first and dataframe.index.minute[i] == 0: break else: delete.append(i) dropped += 1 # drop tailing lines if required for i in range(n-1, 0, -1): if dataframe.index.hour[i] == last and dataframe.index.minute[i] == 0: break else: delete.append(i) dropped += 1 # print("The following rows have been dropped (%i in total):" % dropped) # print(delete) return dataframe.drop(dataframe.index[[delete]])
[ "def", "drop_incomplete_days", "(", "dataframe", ",", "shift", "=", "0", ")", ":", "dropped", "=", "0", "if", "shift", ">", "23", "or", "shift", "<", "0", ":", "print", "(", "\"Invalid shift parameter setting! Using defaults.\"", ")", "shift", "=", "0", "first", "=", "shift", "last", "=", "first", "-", "1", "if", "last", "<", "0", ":", "last", "+=", "24", "try", ":", "# todo: move this checks to a separate function", "n", "=", "len", "(", "dataframe", ".", "index", ")", "except", ":", "print", "(", "'Error: Invalid dataframe.'", ")", "return", "dataframe", "delete", "=", "list", "(", ")", "# drop heading lines if required", "for", "i", "in", "range", "(", "0", ",", "n", ")", ":", "if", "dataframe", ".", "index", ".", "hour", "[", "i", "]", "==", "first", "and", "dataframe", ".", "index", ".", "minute", "[", "i", "]", "==", "0", ":", "break", "else", ":", "delete", ".", "append", "(", "i", ")", "dropped", "+=", "1", "# drop tailing lines if required", "for", "i", "in", "range", "(", "n", "-", "1", ",", "0", ",", "-", "1", ")", ":", "if", "dataframe", ".", "index", ".", "hour", "[", "i", "]", "==", "last", "and", "dataframe", ".", "index", ".", "minute", "[", "i", "]", "==", "0", ":", "break", "else", ":", "delete", ".", "append", "(", "i", ")", "dropped", "+=", "1", "# print(\"The following rows have been dropped (%i in total):\" % dropped)", "# print(delete)", "return", "dataframe", ".", "drop", "(", "dataframe", ".", "index", "[", "[", "delete", "]", "]", ")" ]
truncates a given dataframe to full days only This funtion truncates a given pandas dataframe (time series) to full days only, thus dropping leading and tailing hours of incomplete days. Please note that this methodology only applies to hourly time series. Args: dataframe: A pandas dataframe object with index defined as datetime shift (unsigned int, opt): First hour of daily recordings. For daily recordings of precipitation gages, 8 would be the first hour of the subsequent day of recordings since daily totals are usually recorded at 7. Omit defining this parameter if you intend to pertain recordings to 0-23h. Returns: A dataframe with full days only.
[ "truncates", "a", "given", "dataframe", "to", "full", "days", "only" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/util/util.py#L268-L320
kristianfoerster/melodist
melodist/util/util.py
daily_from_hourly
def daily_from_hourly(df): """Aggregates data (hourly to daily values) according to the characteristics of each variable (e.g., average for temperature, sum for precipitation) Args: df: dataframe including time series with one hour time steps Returns: dataframe (daily) """ df_daily = pd.DataFrame() if 'temp' in df: df_daily['temp'] = df.temp.resample('D').mean() df_daily['tmin'] = df.temp.groupby(df.temp.index.date).min() df_daily['tmax'] = df.temp.groupby(df.temp.index.date).max() if 'precip' in df: df_daily['precip'] = df.precip.resample('D').sum() if 'glob' in df: df_daily['glob'] = df.glob.resample('D').mean() if 'hum' in df: df_daily['hum'] = df.hum.resample('D').mean() if 'hum' in df: df_daily['hum_min'] = df.hum.groupby(df.hum.index.date).min() if 'hum' in df: df_daily['hum_max'] = df.hum.groupby(df.hum.index.date).max() if 'wind' in df: df_daily['wind'] = df.wind.resample('D').mean() if 'ssd' in df: df_daily['ssd'] = df.ssd.resample('D').sum() / 60 # minutes to hours df_daily.index.name = None return df_daily
python
def daily_from_hourly(df): """Aggregates data (hourly to daily values) according to the characteristics of each variable (e.g., average for temperature, sum for precipitation) Args: df: dataframe including time series with one hour time steps Returns: dataframe (daily) """ df_daily = pd.DataFrame() if 'temp' in df: df_daily['temp'] = df.temp.resample('D').mean() df_daily['tmin'] = df.temp.groupby(df.temp.index.date).min() df_daily['tmax'] = df.temp.groupby(df.temp.index.date).max() if 'precip' in df: df_daily['precip'] = df.precip.resample('D').sum() if 'glob' in df: df_daily['glob'] = df.glob.resample('D').mean() if 'hum' in df: df_daily['hum'] = df.hum.resample('D').mean() if 'hum' in df: df_daily['hum_min'] = df.hum.groupby(df.hum.index.date).min() if 'hum' in df: df_daily['hum_max'] = df.hum.groupby(df.hum.index.date).max() if 'wind' in df: df_daily['wind'] = df.wind.resample('D').mean() if 'ssd' in df: df_daily['ssd'] = df.ssd.resample('D').sum() / 60 # minutes to hours df_daily.index.name = None return df_daily
[ "def", "daily_from_hourly", "(", "df", ")", ":", "df_daily", "=", "pd", ".", "DataFrame", "(", ")", "if", "'temp'", "in", "df", ":", "df_daily", "[", "'temp'", "]", "=", "df", ".", "temp", ".", "resample", "(", "'D'", ")", ".", "mean", "(", ")", "df_daily", "[", "'tmin'", "]", "=", "df", ".", "temp", ".", "groupby", "(", "df", ".", "temp", ".", "index", ".", "date", ")", ".", "min", "(", ")", "df_daily", "[", "'tmax'", "]", "=", "df", ".", "temp", ".", "groupby", "(", "df", ".", "temp", ".", "index", ".", "date", ")", ".", "max", "(", ")", "if", "'precip'", "in", "df", ":", "df_daily", "[", "'precip'", "]", "=", "df", ".", "precip", ".", "resample", "(", "'D'", ")", ".", "sum", "(", ")", "if", "'glob'", "in", "df", ":", "df_daily", "[", "'glob'", "]", "=", "df", ".", "glob", ".", "resample", "(", "'D'", ")", ".", "mean", "(", ")", "if", "'hum'", "in", "df", ":", "df_daily", "[", "'hum'", "]", "=", "df", ".", "hum", ".", "resample", "(", "'D'", ")", ".", "mean", "(", ")", "if", "'hum'", "in", "df", ":", "df_daily", "[", "'hum_min'", "]", "=", "df", ".", "hum", ".", "groupby", "(", "df", ".", "hum", ".", "index", ".", "date", ")", ".", "min", "(", ")", "if", "'hum'", "in", "df", ":", "df_daily", "[", "'hum_max'", "]", "=", "df", ".", "hum", ".", "groupby", "(", "df", ".", "hum", ".", "index", ".", "date", ")", ".", "max", "(", ")", "if", "'wind'", "in", "df", ":", "df_daily", "[", "'wind'", "]", "=", "df", ".", "wind", ".", "resample", "(", "'D'", ")", ".", "mean", "(", ")", "if", "'ssd'", "in", "df", ":", "df_daily", "[", "'ssd'", "]", "=", "df", ".", "ssd", ".", "resample", "(", "'D'", ")", ".", "sum", "(", ")", "/", "60", "# minutes to hours", "df_daily", ".", "index", ".", "name", "=", "None", "return", "df_daily" ]
Aggregates data (hourly to daily values) according to the characteristics of each variable (e.g., average for temperature, sum for precipitation) Args: df: dataframe including time series with one hour time steps Returns: dataframe (daily)
[ "Aggregates", "data", "(", "hourly", "to", "daily", "values", ")", "according", "to", "the", "characteristics", "of", "each", "variable", "(", "e", ".", "g", ".", "average", "for", "temperature", "sum", "for", "precipitation", ")" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/util/util.py#L340-L380
kristianfoerster/melodist
melodist/precipitation.py
disagg_prec
def disagg_prec(dailyData, method='equal', cascade_options=None, hourly_data_obs=None, zerodiv="uniform", shift=0): """The disaggregation function for precipitation. Parameters ---------- dailyData : pd.Series daily data method : str method to disaggregate cascade_options : cascade object including statistical parameters for the cascade model hourly_data_obs : pd.Series observed hourly data of master station zerodiv : str method to deal with zero division by key "uniform" --> uniform distribution shift : int shifts the precipitation data by shift (int) steps (eg +7 for 7:00 to 6:00) """ if method not in ('equal', 'cascade', 'masterstation'): raise ValueError('Invalid option') if method == 'equal': precip_disagg = melodist.distribute_equally(dailyData.precip, divide=True) elif method == 'masterstation': precip_disagg = precip_master_station(dailyData, hourly_data_obs, zerodiv) elif method == 'cascade': assert cascade_options is not None precip_disagg = disagg_prec_cascade(dailyData, cascade_options, shift=shift) return precip_disagg
python
def disagg_prec(dailyData, method='equal', cascade_options=None, hourly_data_obs=None, zerodiv="uniform", shift=0): """The disaggregation function for precipitation. Parameters ---------- dailyData : pd.Series daily data method : str method to disaggregate cascade_options : cascade object including statistical parameters for the cascade model hourly_data_obs : pd.Series observed hourly data of master station zerodiv : str method to deal with zero division by key "uniform" --> uniform distribution shift : int shifts the precipitation data by shift (int) steps (eg +7 for 7:00 to 6:00) """ if method not in ('equal', 'cascade', 'masterstation'): raise ValueError('Invalid option') if method == 'equal': precip_disagg = melodist.distribute_equally(dailyData.precip, divide=True) elif method == 'masterstation': precip_disagg = precip_master_station(dailyData, hourly_data_obs, zerodiv) elif method == 'cascade': assert cascade_options is not None precip_disagg = disagg_prec_cascade(dailyData, cascade_options, shift=shift) return precip_disagg
[ "def", "disagg_prec", "(", "dailyData", ",", "method", "=", "'equal'", ",", "cascade_options", "=", "None", ",", "hourly_data_obs", "=", "None", ",", "zerodiv", "=", "\"uniform\"", ",", "shift", "=", "0", ")", ":", "if", "method", "not", "in", "(", "'equal'", ",", "'cascade'", ",", "'masterstation'", ")", ":", "raise", "ValueError", "(", "'Invalid option'", ")", "if", "method", "==", "'equal'", ":", "precip_disagg", "=", "melodist", ".", "distribute_equally", "(", "dailyData", ".", "precip", ",", "divide", "=", "True", ")", "elif", "method", "==", "'masterstation'", ":", "precip_disagg", "=", "precip_master_station", "(", "dailyData", ",", "hourly_data_obs", ",", "zerodiv", ")", "elif", "method", "==", "'cascade'", ":", "assert", "cascade_options", "is", "not", "None", "precip_disagg", "=", "disagg_prec_cascade", "(", "dailyData", ",", "cascade_options", ",", "shift", "=", "shift", ")", "return", "precip_disagg" ]
The disaggregation function for precipitation. Parameters ---------- dailyData : pd.Series daily data method : str method to disaggregate cascade_options : cascade object including statistical parameters for the cascade model hourly_data_obs : pd.Series observed hourly data of master station zerodiv : str method to deal with zero division by key "uniform" --> uniform distribution shift : int shifts the precipitation data by shift (int) steps (eg +7 for 7:00 to 6:00)
[ "The", "disaggregation", "function", "for", "precipitation", "." ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/precipitation.py#L45-L87
kristianfoerster/melodist
melodist/precipitation.py
disagg_prec_cascade
def disagg_prec_cascade(precip_daily, cascade_options, hourly=True,level=9, shift=0, test=False): """Precipitation disaggregation with cascade model (Olsson, 1998) Parameters ---------- precip_daily : pd.Series daily data hourly: Boolean (for an hourly resolution disaggregation) if False, then returns 5-min disaggregated precipitation (disaggregation level depending on the "level" variable) cascade_options : cascade object including statistical parameters for the cascade model shift : int shifts the precipitation data by shift steps (eg +7 for 7:00 to 6:00) test : bool test mode, returns time series of each cascade level """ if len(precip_daily) < 2: raise ValueError('Input data must have at least two elements.') # set missing values to zero: precip_daily = precip_daily.copy() missing_days = precip_daily.index[precip_daily.isnull()] precip_daily[missing_days] = 0 if hourly: si = 5 # index of first level else: si = level # statistics for branching into two bins wxxcum = np.zeros((7, 2, 4)) if isinstance(cascade_options, melodist.cascade.CascadeStatistics): # this is the standard case considering one data set for all levels # get cumulative probabilities for branching overwrite_stats = False for k in range(0, 7): wxxcum[k, :, :] = cascade_options.wxx[k, :, :] if k > 0: wxxcum[k, :, :] = wxxcum[k-1, :, :] + wxxcum[k, :, :] elif isinstance(cascade_options, list): if len(cascade_options) == si:#5 overwrite_stats = True list_casc = cascade_options else: raise ValueError('Cascade statistics list must have %s elements!' % si) else: raise TypeError('cascade_options has invalid type') # arrays for each level n = len(precip_daily) vdn1 = np.zeros(n*2) vdn2 = np.zeros(n*4) vdn3 = np.zeros(n*8) vdn4 = np.zeros(n*16) vdn5 = np.zeros(n*32) if not hourly: vdn6 = np.zeros(n*64) vdn7 = np.zeros(n*128) vdn8 = np.zeros(n*256) vdn9 = np.zeros(n*512) if level == 10 or level == 11: vdn10 = np.zeros(n*1024) if level == 11: vdn11 = np.zeros(n*2048) # class boundaries for histograms wclassbounds = np.array([0.0, 0.1429, 0.2857, 0.4286, 0.5714, 0.7143, 0.8571, 1.0]) # disaggregation for each level for l in range(1, si+1): if l == 1: vdn_in = precip_daily vdn_out = vdn1 elif l == 2: vdn_in = vdn_out vdn_out = vdn2 elif l == 3: vdn_in = vdn_out vdn_out = vdn3 elif l == 4: vdn_in = vdn_out vdn_out = vdn4 elif l == 5: vdn_in = vdn_out vdn_out = vdn5 elif l == 6: vdn_in = vdn_out vdn_out = vdn6 elif l == 7: vdn_in = vdn_out vdn_out = vdn7 elif l == 8: vdn_in = vdn_out vdn_out = vdn8 elif l == 9: vdn_in = vdn_out vdn_out = vdn9 elif l == 10: vdn_in = vdn_out vdn_out = vdn10 elif l == 11: vdn_in = vdn_out vdn_out = vdn11 si -= 1 if overwrite_stats: cascade_options = list_casc[si] for k in range(0, 7): wxxcum[k, :, :] = cascade_options.wxx[k, :, :] if k > 0: wxxcum[k, :, :] = wxxcum[k-1, :, :] + wxxcum[k, :, :] meanvol = cascade_options.threshold[0] else: meanvol = cascade_options.threshold[si] # evaluate mean rainfall intensity for wet boxes # these values should be determined during the aggregation phase!!!!! # mean volume threshold # meanvol = np.mean(vdn_in[vdn_in>0.]) # use values derived parameter by parameter estimation instead # see above j = 0 for i in range(0, len(vdn_in)): # it's raining now? if vdn_in[i] > 0: # determine type of box if i == 0: # only starting or isolated if vdn_in[i+1] > 0: vbtype = cascade.BoxTypes.starting else: vbtype = cascade.BoxTypes.isolated elif i == len(vdn_in)-1: # only ending or isolated if vdn_in[i-1] > 0: vbtype = cascade.BoxTypes.ending else: vbtype = cascade.BoxTypes.isolated else: # neither at at the end nor at the beginning if vdn_in[i-1] == 0 and vdn_in[i+1] == 0: vbtype = cascade.BoxTypes.isolated if vdn_in[i-1] == 0 and vdn_in[i+1] > 0: vbtype = cascade.BoxTypes.starting if vdn_in[i-1] > 0 and vdn_in[i+1] > 0: vbtype = cascade.BoxTypes.enclosed if vdn_in[i-1] > 0 and vdn_in[i+1] == 0: vbtype = cascade.BoxTypes.ending # above or below mean? if vdn_in[i] > meanvol: belowabove = 1 # above mean else: belowabove = 0 # below mean # p = np.zeros((3, 1)) p[0] = cascade_options.p01[belowabove, vbtype-1] # index changed! p[1] = cascade_options.p10[belowabove, vbtype-1] p[2] = cascade_options.pxx[belowabove, vbtype-1] # draw a random number to determine the braching type rndp = np.random.random() if rndp <= p[0]: # first box 0, second box: 1 P(0/1) vdn_out[j] = 0.0 j = j + 1 vdn_out[j] = vdn_in[i] j = j + 1 elif rndp > p[0] and rndp <= p[0] + p[1]: # first box 1, second box: 0 P(1/0) vdn_out[j] = vdn_in[i] j = j + 1 vdn_out[j] = 0.0 j = j + 1 else: # both boxes wet # we need a new random number rndw = np.random.random() # guess w1: for k in range(0, 7): if rndw <= wxxcum[k, belowabove, vbtype-1]: w1 = wclassbounds[k+1] - 1./14. # class center break vdn_out[j] = w1 * vdn_in[i] j = j + 1 vdn_out[j] = (1. - w1) * vdn_in[i] j = j + 1 # check results (in the previous version this error has never been observed) if w1 < 0 or w1 > 1: print('error') return else: # add two dry boxes vdn_out[j] = 0.0 j = j + 1 vdn_out[j] = 0.0 j = j + 1 if hourly: # uniformly disaggregate 0.75 h values to 0.25 h values vdn_025 = np.zeros(len(vdn_out)*3) j = 0 for i in range(0, len(vdn_out)): for m in range(0, 3): vdn_025[j+m] = vdn_out[i] / 3. j = j + 3 # aggregate to hourly time steps vdn_025cs = np.cumsum(vdn_025) vdn = np.zeros(int(len(vdn_025)/4)) for i in range(0, len(vdn)+1): # for first hour take 4th item if i == 0: vdn[i] = vdn_025cs[3] elif i == 1: pass else: # >1 (starting with 2-1 = 1 item) vdn[i-1] = vdn_025cs[(i*4)-1] - vdn_025cs[(i*4)-5] disagg_precip = pd.Series(index=melodist.util.hourly_index(precip_daily.index), data=vdn) else: precip_sn = pd.Series(index= sub_level_index(precip_daily.index, level=level, fill_gaps=False), data=vdn_out) disagg_precip = precip_sn.resample('5min').sum() # set missing days to nan again: for date in missing_days: disagg_precip[ disagg_precip.index.date == date.date()] = np.nan # shifts the data by shift steps (fills with nan/cuts edge data ) if shift != 0: disagg_precip = disagg_precip.shift(shift) #? freq='1U') # return time series if test: if hourly: return vdn1, vdn2, vdn3, vdn4, vdn5, vdn_025, disagg_precip else: if level == 9: return vdn1, vdn2, vdn3, vdn4, vdn5, vdn6, vdn7, vdn8, vdn9, precip_sn, disagg_precip elif level == 10: return vdn1, vdn2, vdn3, vdn4, vdn5, vdn6, vdn7, vdn8, vdn9, vdn10, precip_sn, disagg_precip else: return vdn1, vdn2, vdn3, vdn4, vdn5, vdn6, vdn7, vdn8, vdn9, vdn10, vdn11, precip_sn, disagg_precip else: return disagg_precip
python
def disagg_prec_cascade(precip_daily, cascade_options, hourly=True,level=9, shift=0, test=False): """Precipitation disaggregation with cascade model (Olsson, 1998) Parameters ---------- precip_daily : pd.Series daily data hourly: Boolean (for an hourly resolution disaggregation) if False, then returns 5-min disaggregated precipitation (disaggregation level depending on the "level" variable) cascade_options : cascade object including statistical parameters for the cascade model shift : int shifts the precipitation data by shift steps (eg +7 for 7:00 to 6:00) test : bool test mode, returns time series of each cascade level """ if len(precip_daily) < 2: raise ValueError('Input data must have at least two elements.') # set missing values to zero: precip_daily = precip_daily.copy() missing_days = precip_daily.index[precip_daily.isnull()] precip_daily[missing_days] = 0 if hourly: si = 5 # index of first level else: si = level # statistics for branching into two bins wxxcum = np.zeros((7, 2, 4)) if isinstance(cascade_options, melodist.cascade.CascadeStatistics): # this is the standard case considering one data set for all levels # get cumulative probabilities for branching overwrite_stats = False for k in range(0, 7): wxxcum[k, :, :] = cascade_options.wxx[k, :, :] if k > 0: wxxcum[k, :, :] = wxxcum[k-1, :, :] + wxxcum[k, :, :] elif isinstance(cascade_options, list): if len(cascade_options) == si:#5 overwrite_stats = True list_casc = cascade_options else: raise ValueError('Cascade statistics list must have %s elements!' % si) else: raise TypeError('cascade_options has invalid type') # arrays for each level n = len(precip_daily) vdn1 = np.zeros(n*2) vdn2 = np.zeros(n*4) vdn3 = np.zeros(n*8) vdn4 = np.zeros(n*16) vdn5 = np.zeros(n*32) if not hourly: vdn6 = np.zeros(n*64) vdn7 = np.zeros(n*128) vdn8 = np.zeros(n*256) vdn9 = np.zeros(n*512) if level == 10 or level == 11: vdn10 = np.zeros(n*1024) if level == 11: vdn11 = np.zeros(n*2048) # class boundaries for histograms wclassbounds = np.array([0.0, 0.1429, 0.2857, 0.4286, 0.5714, 0.7143, 0.8571, 1.0]) # disaggregation for each level for l in range(1, si+1): if l == 1: vdn_in = precip_daily vdn_out = vdn1 elif l == 2: vdn_in = vdn_out vdn_out = vdn2 elif l == 3: vdn_in = vdn_out vdn_out = vdn3 elif l == 4: vdn_in = vdn_out vdn_out = vdn4 elif l == 5: vdn_in = vdn_out vdn_out = vdn5 elif l == 6: vdn_in = vdn_out vdn_out = vdn6 elif l == 7: vdn_in = vdn_out vdn_out = vdn7 elif l == 8: vdn_in = vdn_out vdn_out = vdn8 elif l == 9: vdn_in = vdn_out vdn_out = vdn9 elif l == 10: vdn_in = vdn_out vdn_out = vdn10 elif l == 11: vdn_in = vdn_out vdn_out = vdn11 si -= 1 if overwrite_stats: cascade_options = list_casc[si] for k in range(0, 7): wxxcum[k, :, :] = cascade_options.wxx[k, :, :] if k > 0: wxxcum[k, :, :] = wxxcum[k-1, :, :] + wxxcum[k, :, :] meanvol = cascade_options.threshold[0] else: meanvol = cascade_options.threshold[si] # evaluate mean rainfall intensity for wet boxes # these values should be determined during the aggregation phase!!!!! # mean volume threshold # meanvol = np.mean(vdn_in[vdn_in>0.]) # use values derived parameter by parameter estimation instead # see above j = 0 for i in range(0, len(vdn_in)): # it's raining now? if vdn_in[i] > 0: # determine type of box if i == 0: # only starting or isolated if vdn_in[i+1] > 0: vbtype = cascade.BoxTypes.starting else: vbtype = cascade.BoxTypes.isolated elif i == len(vdn_in)-1: # only ending or isolated if vdn_in[i-1] > 0: vbtype = cascade.BoxTypes.ending else: vbtype = cascade.BoxTypes.isolated else: # neither at at the end nor at the beginning if vdn_in[i-1] == 0 and vdn_in[i+1] == 0: vbtype = cascade.BoxTypes.isolated if vdn_in[i-1] == 0 and vdn_in[i+1] > 0: vbtype = cascade.BoxTypes.starting if vdn_in[i-1] > 0 and vdn_in[i+1] > 0: vbtype = cascade.BoxTypes.enclosed if vdn_in[i-1] > 0 and vdn_in[i+1] == 0: vbtype = cascade.BoxTypes.ending # above or below mean? if vdn_in[i] > meanvol: belowabove = 1 # above mean else: belowabove = 0 # below mean # p = np.zeros((3, 1)) p[0] = cascade_options.p01[belowabove, vbtype-1] # index changed! p[1] = cascade_options.p10[belowabove, vbtype-1] p[2] = cascade_options.pxx[belowabove, vbtype-1] # draw a random number to determine the braching type rndp = np.random.random() if rndp <= p[0]: # first box 0, second box: 1 P(0/1) vdn_out[j] = 0.0 j = j + 1 vdn_out[j] = vdn_in[i] j = j + 1 elif rndp > p[0] and rndp <= p[0] + p[1]: # first box 1, second box: 0 P(1/0) vdn_out[j] = vdn_in[i] j = j + 1 vdn_out[j] = 0.0 j = j + 1 else: # both boxes wet # we need a new random number rndw = np.random.random() # guess w1: for k in range(0, 7): if rndw <= wxxcum[k, belowabove, vbtype-1]: w1 = wclassbounds[k+1] - 1./14. # class center break vdn_out[j] = w1 * vdn_in[i] j = j + 1 vdn_out[j] = (1. - w1) * vdn_in[i] j = j + 1 # check results (in the previous version this error has never been observed) if w1 < 0 or w1 > 1: print('error') return else: # add two dry boxes vdn_out[j] = 0.0 j = j + 1 vdn_out[j] = 0.0 j = j + 1 if hourly: # uniformly disaggregate 0.75 h values to 0.25 h values vdn_025 = np.zeros(len(vdn_out)*3) j = 0 for i in range(0, len(vdn_out)): for m in range(0, 3): vdn_025[j+m] = vdn_out[i] / 3. j = j + 3 # aggregate to hourly time steps vdn_025cs = np.cumsum(vdn_025) vdn = np.zeros(int(len(vdn_025)/4)) for i in range(0, len(vdn)+1): # for first hour take 4th item if i == 0: vdn[i] = vdn_025cs[3] elif i == 1: pass else: # >1 (starting with 2-1 = 1 item) vdn[i-1] = vdn_025cs[(i*4)-1] - vdn_025cs[(i*4)-5] disagg_precip = pd.Series(index=melodist.util.hourly_index(precip_daily.index), data=vdn) else: precip_sn = pd.Series(index= sub_level_index(precip_daily.index, level=level, fill_gaps=False), data=vdn_out) disagg_precip = precip_sn.resample('5min').sum() # set missing days to nan again: for date in missing_days: disagg_precip[ disagg_precip.index.date == date.date()] = np.nan # shifts the data by shift steps (fills with nan/cuts edge data ) if shift != 0: disagg_precip = disagg_precip.shift(shift) #? freq='1U') # return time series if test: if hourly: return vdn1, vdn2, vdn3, vdn4, vdn5, vdn_025, disagg_precip else: if level == 9: return vdn1, vdn2, vdn3, vdn4, vdn5, vdn6, vdn7, vdn8, vdn9, precip_sn, disagg_precip elif level == 10: return vdn1, vdn2, vdn3, vdn4, vdn5, vdn6, vdn7, vdn8, vdn9, vdn10, precip_sn, disagg_precip else: return vdn1, vdn2, vdn3, vdn4, vdn5, vdn6, vdn7, vdn8, vdn9, vdn10, vdn11, precip_sn, disagg_precip else: return disagg_precip
[ "def", "disagg_prec_cascade", "(", "precip_daily", ",", "cascade_options", ",", "hourly", "=", "True", ",", "level", "=", "9", ",", "shift", "=", "0", ",", "test", "=", "False", ")", ":", "if", "len", "(", "precip_daily", ")", "<", "2", ":", "raise", "ValueError", "(", "'Input data must have at least two elements.'", ")", "# set missing values to zero:", "precip_daily", "=", "precip_daily", ".", "copy", "(", ")", "missing_days", "=", "precip_daily", ".", "index", "[", "precip_daily", ".", "isnull", "(", ")", "]", "precip_daily", "[", "missing_days", "]", "=", "0", "if", "hourly", ":", "si", "=", "5", "# index of first level", "else", ":", "si", "=", "level", "# statistics for branching into two bins", "wxxcum", "=", "np", ".", "zeros", "(", "(", "7", ",", "2", ",", "4", ")", ")", "if", "isinstance", "(", "cascade_options", ",", "melodist", ".", "cascade", ".", "CascadeStatistics", ")", ":", "# this is the standard case considering one data set for all levels", "# get cumulative probabilities for branching", "overwrite_stats", "=", "False", "for", "k", "in", "range", "(", "0", ",", "7", ")", ":", "wxxcum", "[", "k", ",", ":", ",", ":", "]", "=", "cascade_options", ".", "wxx", "[", "k", ",", ":", ",", ":", "]", "if", "k", ">", "0", ":", "wxxcum", "[", "k", ",", ":", ",", ":", "]", "=", "wxxcum", "[", "k", "-", "1", ",", ":", ",", ":", "]", "+", "wxxcum", "[", "k", ",", ":", ",", ":", "]", "elif", "isinstance", "(", "cascade_options", ",", "list", ")", ":", "if", "len", "(", "cascade_options", ")", "==", "si", ":", "#5", "overwrite_stats", "=", "True", "list_casc", "=", "cascade_options", "else", ":", "raise", "ValueError", "(", "'Cascade statistics list must have %s elements!'", "%", "si", ")", "else", ":", "raise", "TypeError", "(", "'cascade_options has invalid type'", ")", "# arrays for each level", "n", "=", "len", "(", "precip_daily", ")", "vdn1", "=", "np", ".", "zeros", "(", "n", "*", "2", ")", "vdn2", "=", "np", ".", "zeros", "(", "n", "*", "4", ")", "vdn3", "=", "np", ".", "zeros", "(", "n", "*", "8", ")", "vdn4", "=", "np", ".", "zeros", "(", "n", "*", "16", ")", "vdn5", "=", "np", ".", "zeros", "(", "n", "*", "32", ")", "if", "not", "hourly", ":", "vdn6", "=", "np", ".", "zeros", "(", "n", "*", "64", ")", "vdn7", "=", "np", ".", "zeros", "(", "n", "*", "128", ")", "vdn8", "=", "np", ".", "zeros", "(", "n", "*", "256", ")", "vdn9", "=", "np", ".", "zeros", "(", "n", "*", "512", ")", "if", "level", "==", "10", "or", "level", "==", "11", ":", "vdn10", "=", "np", ".", "zeros", "(", "n", "*", "1024", ")", "if", "level", "==", "11", ":", "vdn11", "=", "np", ".", "zeros", "(", "n", "*", "2048", ")", "# class boundaries for histograms", "wclassbounds", "=", "np", ".", "array", "(", "[", "0.0", ",", "0.1429", ",", "0.2857", ",", "0.4286", ",", "0.5714", ",", "0.7143", ",", "0.8571", ",", "1.0", "]", ")", "# disaggregation for each level", "for", "l", "in", "range", "(", "1", ",", "si", "+", "1", ")", ":", "if", "l", "==", "1", ":", "vdn_in", "=", "precip_daily", "vdn_out", "=", "vdn1", "elif", "l", "==", "2", ":", "vdn_in", "=", "vdn_out", "vdn_out", "=", "vdn2", "elif", "l", "==", "3", ":", "vdn_in", "=", "vdn_out", "vdn_out", "=", "vdn3", "elif", "l", "==", "4", ":", "vdn_in", "=", "vdn_out", "vdn_out", "=", "vdn4", "elif", "l", "==", "5", ":", "vdn_in", "=", "vdn_out", "vdn_out", "=", "vdn5", "elif", "l", "==", "6", ":", "vdn_in", "=", "vdn_out", "vdn_out", "=", "vdn6", "elif", "l", "==", "7", ":", "vdn_in", "=", "vdn_out", "vdn_out", "=", "vdn7", "elif", "l", "==", "8", ":", "vdn_in", "=", "vdn_out", "vdn_out", "=", "vdn8", "elif", "l", "==", "9", ":", "vdn_in", "=", "vdn_out", "vdn_out", "=", "vdn9", "elif", "l", "==", "10", ":", "vdn_in", "=", "vdn_out", "vdn_out", "=", "vdn10", "elif", "l", "==", "11", ":", "vdn_in", "=", "vdn_out", "vdn_out", "=", "vdn11", "si", "-=", "1", "if", "overwrite_stats", ":", "cascade_options", "=", "list_casc", "[", "si", "]", "for", "k", "in", "range", "(", "0", ",", "7", ")", ":", "wxxcum", "[", "k", ",", ":", ",", ":", "]", "=", "cascade_options", ".", "wxx", "[", "k", ",", ":", ",", ":", "]", "if", "k", ">", "0", ":", "wxxcum", "[", "k", ",", ":", ",", ":", "]", "=", "wxxcum", "[", "k", "-", "1", ",", ":", ",", ":", "]", "+", "wxxcum", "[", "k", ",", ":", ",", ":", "]", "meanvol", "=", "cascade_options", ".", "threshold", "[", "0", "]", "else", ":", "meanvol", "=", "cascade_options", ".", "threshold", "[", "si", "]", "# evaluate mean rainfall intensity for wet boxes", "# these values should be determined during the aggregation phase!!!!!", "# mean volume threshold", "# meanvol = np.mean(vdn_in[vdn_in>0.])", "# use values derived parameter by parameter estimation instead", "# see above", "j", "=", "0", "for", "i", "in", "range", "(", "0", ",", "len", "(", "vdn_in", ")", ")", ":", "# it's raining now?", "if", "vdn_in", "[", "i", "]", ">", "0", ":", "# determine type of box", "if", "i", "==", "0", ":", "# only starting or isolated", "if", "vdn_in", "[", "i", "+", "1", "]", ">", "0", ":", "vbtype", "=", "cascade", ".", "BoxTypes", ".", "starting", "else", ":", "vbtype", "=", "cascade", ".", "BoxTypes", ".", "isolated", "elif", "i", "==", "len", "(", "vdn_in", ")", "-", "1", ":", "# only ending or isolated", "if", "vdn_in", "[", "i", "-", "1", "]", ">", "0", ":", "vbtype", "=", "cascade", ".", "BoxTypes", ".", "ending", "else", ":", "vbtype", "=", "cascade", ".", "BoxTypes", ".", "isolated", "else", ":", "# neither at at the end nor at the beginning", "if", "vdn_in", "[", "i", "-", "1", "]", "==", "0", "and", "vdn_in", "[", "i", "+", "1", "]", "==", "0", ":", "vbtype", "=", "cascade", ".", "BoxTypes", ".", "isolated", "if", "vdn_in", "[", "i", "-", "1", "]", "==", "0", "and", "vdn_in", "[", "i", "+", "1", "]", ">", "0", ":", "vbtype", "=", "cascade", ".", "BoxTypes", ".", "starting", "if", "vdn_in", "[", "i", "-", "1", "]", ">", "0", "and", "vdn_in", "[", "i", "+", "1", "]", ">", "0", ":", "vbtype", "=", "cascade", ".", "BoxTypes", ".", "enclosed", "if", "vdn_in", "[", "i", "-", "1", "]", ">", "0", "and", "vdn_in", "[", "i", "+", "1", "]", "==", "0", ":", "vbtype", "=", "cascade", ".", "BoxTypes", ".", "ending", "# above or below mean?", "if", "vdn_in", "[", "i", "]", ">", "meanvol", ":", "belowabove", "=", "1", "# above mean", "else", ":", "belowabove", "=", "0", "# below mean", "#", "p", "=", "np", ".", "zeros", "(", "(", "3", ",", "1", ")", ")", "p", "[", "0", "]", "=", "cascade_options", ".", "p01", "[", "belowabove", ",", "vbtype", "-", "1", "]", "# index changed!", "p", "[", "1", "]", "=", "cascade_options", ".", "p10", "[", "belowabove", ",", "vbtype", "-", "1", "]", "p", "[", "2", "]", "=", "cascade_options", ".", "pxx", "[", "belowabove", ",", "vbtype", "-", "1", "]", "# draw a random number to determine the braching type", "rndp", "=", "np", ".", "random", ".", "random", "(", ")", "if", "rndp", "<=", "p", "[", "0", "]", ":", "# first box 0, second box: 1 P(0/1)", "vdn_out", "[", "j", "]", "=", "0.0", "j", "=", "j", "+", "1", "vdn_out", "[", "j", "]", "=", "vdn_in", "[", "i", "]", "j", "=", "j", "+", "1", "elif", "rndp", ">", "p", "[", "0", "]", "and", "rndp", "<=", "p", "[", "0", "]", "+", "p", "[", "1", "]", ":", "# first box 1, second box: 0 P(1/0)", "vdn_out", "[", "j", "]", "=", "vdn_in", "[", "i", "]", "j", "=", "j", "+", "1", "vdn_out", "[", "j", "]", "=", "0.0", "j", "=", "j", "+", "1", "else", ":", "# both boxes wet", "# we need a new random number", "rndw", "=", "np", ".", "random", ".", "random", "(", ")", "# guess w1:", "for", "k", "in", "range", "(", "0", ",", "7", ")", ":", "if", "rndw", "<=", "wxxcum", "[", "k", ",", "belowabove", ",", "vbtype", "-", "1", "]", ":", "w1", "=", "wclassbounds", "[", "k", "+", "1", "]", "-", "1.", "/", "14.", "# class center", "break", "vdn_out", "[", "j", "]", "=", "w1", "*", "vdn_in", "[", "i", "]", "j", "=", "j", "+", "1", "vdn_out", "[", "j", "]", "=", "(", "1.", "-", "w1", ")", "*", "vdn_in", "[", "i", "]", "j", "=", "j", "+", "1", "# check results (in the previous version this error has never been observed)", "if", "w1", "<", "0", "or", "w1", ">", "1", ":", "print", "(", "'error'", ")", "return", "else", ":", "# add two dry boxes", "vdn_out", "[", "j", "]", "=", "0.0", "j", "=", "j", "+", "1", "vdn_out", "[", "j", "]", "=", "0.0", "j", "=", "j", "+", "1", "if", "hourly", ":", "# uniformly disaggregate 0.75 h values to 0.25 h values", "vdn_025", "=", "np", ".", "zeros", "(", "len", "(", "vdn_out", ")", "*", "3", ")", "j", "=", "0", "for", "i", "in", "range", "(", "0", ",", "len", "(", "vdn_out", ")", ")", ":", "for", "m", "in", "range", "(", "0", ",", "3", ")", ":", "vdn_025", "[", "j", "+", "m", "]", "=", "vdn_out", "[", "i", "]", "/", "3.", "j", "=", "j", "+", "3", "# aggregate to hourly time steps", "vdn_025cs", "=", "np", ".", "cumsum", "(", "vdn_025", ")", "vdn", "=", "np", ".", "zeros", "(", "int", "(", "len", "(", "vdn_025", ")", "/", "4", ")", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "vdn", ")", "+", "1", ")", ":", "# for first hour take 4th item", "if", "i", "==", "0", ":", "vdn", "[", "i", "]", "=", "vdn_025cs", "[", "3", "]", "elif", "i", "==", "1", ":", "pass", "else", ":", "# >1 (starting with 2-1 = 1 item)", "vdn", "[", "i", "-", "1", "]", "=", "vdn_025cs", "[", "(", "i", "*", "4", ")", "-", "1", "]", "-", "vdn_025cs", "[", "(", "i", "*", "4", ")", "-", "5", "]", "disagg_precip", "=", "pd", ".", "Series", "(", "index", "=", "melodist", ".", "util", ".", "hourly_index", "(", "precip_daily", ".", "index", ")", ",", "data", "=", "vdn", ")", "else", ":", "precip_sn", "=", "pd", ".", "Series", "(", "index", "=", "sub_level_index", "(", "precip_daily", ".", "index", ",", "level", "=", "level", ",", "fill_gaps", "=", "False", ")", ",", "data", "=", "vdn_out", ")", "disagg_precip", "=", "precip_sn", ".", "resample", "(", "'5min'", ")", ".", "sum", "(", ")", "# set missing days to nan again:", "for", "date", "in", "missing_days", ":", "disagg_precip", "[", "disagg_precip", ".", "index", ".", "date", "==", "date", ".", "date", "(", ")", "]", "=", "np", ".", "nan", "# shifts the data by shift steps (fills with nan/cuts edge data )", "if", "shift", "!=", "0", ":", "disagg_precip", "=", "disagg_precip", ".", "shift", "(", "shift", ")", "#? freq='1U')", "# return time series", "if", "test", ":", "if", "hourly", ":", "return", "vdn1", ",", "vdn2", ",", "vdn3", ",", "vdn4", ",", "vdn5", ",", "vdn_025", ",", "disagg_precip", "else", ":", "if", "level", "==", "9", ":", "return", "vdn1", ",", "vdn2", ",", "vdn3", ",", "vdn4", ",", "vdn5", ",", "vdn6", ",", "vdn7", ",", "vdn8", ",", "vdn9", ",", "precip_sn", ",", "disagg_precip", "elif", "level", "==", "10", ":", "return", "vdn1", ",", "vdn2", ",", "vdn3", ",", "vdn4", ",", "vdn5", ",", "vdn6", ",", "vdn7", ",", "vdn8", ",", "vdn9", ",", "vdn10", ",", "precip_sn", ",", "disagg_precip", "else", ":", "return", "vdn1", ",", "vdn2", ",", "vdn3", ",", "vdn4", ",", "vdn5", ",", "vdn6", ",", "vdn7", ",", "vdn8", ",", "vdn9", ",", "vdn10", ",", "vdn11", ",", "precip_sn", ",", "disagg_precip", "else", ":", "return", "disagg_precip" ]
Precipitation disaggregation with cascade model (Olsson, 1998) Parameters ---------- precip_daily : pd.Series daily data hourly: Boolean (for an hourly resolution disaggregation) if False, then returns 5-min disaggregated precipitation (disaggregation level depending on the "level" variable) cascade_options : cascade object including statistical parameters for the cascade model shift : int shifts the precipitation data by shift steps (eg +7 for 7:00 to 6:00) test : bool test mode, returns time series of each cascade level
[ "Precipitation", "disaggregation", "with", "cascade", "model", "(", "Olsson", "1998", ")" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/precipitation.py#L90-L357
kristianfoerster/melodist
melodist/precipitation.py
precip_master_station
def precip_master_station(precip_daily, master_precip_hourly, zerodiv): """Disaggregate precipitation based on the patterns of a master station Parameters ----------- precip_daily : pd.Series daily data master_precip_hourly : pd.Series observed hourly data of the master station zerodiv : str method to deal with zero division by key "uniform" --> uniform distribution """ precip_hourly = pd.Series(index=melodist.util.hourly_index(precip_daily.index)) # set some parameters for cosine function for index_d, precip in precip_daily.iteritems(): # get hourly data of the day index = index_d.date().isoformat() precip_h = master_precip_hourly[index] # calc rel values and multiply by daily sums # check for zero division if precip_h.sum() != 0 and precip_h.sum() != np.isnan(precip_h.sum()): precip_h_rel = (precip_h / precip_h.sum()) * precip else: # uniform option will preserve daily data by uniform distr if zerodiv == 'uniform': precip_h_rel = (1/24) * precip else: precip_h_rel = 0 # write the disaggregated day to data precip_hourly[index] = precip_h_rel return precip_hourly
python
def precip_master_station(precip_daily, master_precip_hourly, zerodiv): """Disaggregate precipitation based on the patterns of a master station Parameters ----------- precip_daily : pd.Series daily data master_precip_hourly : pd.Series observed hourly data of the master station zerodiv : str method to deal with zero division by key "uniform" --> uniform distribution """ precip_hourly = pd.Series(index=melodist.util.hourly_index(precip_daily.index)) # set some parameters for cosine function for index_d, precip in precip_daily.iteritems(): # get hourly data of the day index = index_d.date().isoformat() precip_h = master_precip_hourly[index] # calc rel values and multiply by daily sums # check for zero division if precip_h.sum() != 0 and precip_h.sum() != np.isnan(precip_h.sum()): precip_h_rel = (precip_h / precip_h.sum()) * precip else: # uniform option will preserve daily data by uniform distr if zerodiv == 'uniform': precip_h_rel = (1/24) * precip else: precip_h_rel = 0 # write the disaggregated day to data precip_hourly[index] = precip_h_rel return precip_hourly
[ "def", "precip_master_station", "(", "precip_daily", ",", "master_precip_hourly", ",", "zerodiv", ")", ":", "precip_hourly", "=", "pd", ".", "Series", "(", "index", "=", "melodist", ".", "util", ".", "hourly_index", "(", "precip_daily", ".", "index", ")", ")", "# set some parameters for cosine function", "for", "index_d", ",", "precip", "in", "precip_daily", ".", "iteritems", "(", ")", ":", "# get hourly data of the day", "index", "=", "index_d", ".", "date", "(", ")", ".", "isoformat", "(", ")", "precip_h", "=", "master_precip_hourly", "[", "index", "]", "# calc rel values and multiply by daily sums", "# check for zero division", "if", "precip_h", ".", "sum", "(", ")", "!=", "0", "and", "precip_h", ".", "sum", "(", ")", "!=", "np", ".", "isnan", "(", "precip_h", ".", "sum", "(", ")", ")", ":", "precip_h_rel", "=", "(", "precip_h", "/", "precip_h", ".", "sum", "(", ")", ")", "*", "precip", "else", ":", "# uniform option will preserve daily data by uniform distr", "if", "zerodiv", "==", "'uniform'", ":", "precip_h_rel", "=", "(", "1", "/", "24", ")", "*", "precip", "else", ":", "precip_h_rel", "=", "0", "# write the disaggregated day to data", "precip_hourly", "[", "index", "]", "=", "precip_h_rel", "return", "precip_hourly" ]
Disaggregate precipitation based on the patterns of a master station Parameters ----------- precip_daily : pd.Series daily data master_precip_hourly : pd.Series observed hourly data of the master station zerodiv : str method to deal with zero division by key "uniform" --> uniform distribution
[ "Disaggregate", "precipitation", "based", "on", "the", "patterns", "of", "a", "master", "station" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/precipitation.py#L359-L400
kristianfoerster/melodist
melodist/precipitation.py
aggregate_precipitation
def aggregate_precipitation(vec_data,hourly=True, percentile=50): """Aggregates highly resolved precipitation data and creates statistics Parameters ---------- vec_data : pd.Series hourly (hourly=True) OR 5-min values Returns ------- output : cascade object representing statistics of the cascade model """ cascade_opt = cascade.CascadeStatistics() cascade_opt.percentile = percentile # length of input time series n_in = len(vec_data) n_out = np.floor(n_in/2) # alternative: # 1st step: new time series vec_time = vec_data.index vdn0 = [] vtn0 = [] j = 0 for i in range(0, n_in): if np.mod(i, 2) != 0: vdn0.append(vec_data.precip.values[i-1] + vec_data.precip.values[i]) vtn0.append(vec_time[i]) j = j+1 vdn = pd.DataFrame(index=vtn0, data={'precip': vdn0}) # length of new time series n_out = len(vdn) # series of box types: vbtype = np.zeros((n_out, ), dtype=np.int) # fields for empirical probabilities # counts nb = np.zeros((2, 4)) nbxx = np.zeros((2, 4)) # class boundaries for histograms # wclassbounds = np.linspace(0, 1, num=8) wlower = np.array([0, 0.1429, 0.2857, 0.4286, 0.5714, 0.7143, 0.8571]) # wclassbounds[0:7] wupper = np.array([0.1429, 0.2857, 0.4286, 0.5714, 0.7143, 0.8571, 1.0]) # wclassbounds[1:8] # evaluate mean rainfall intensity for wet boxes # these values should be determined during the aggregation phase!!!!! # mean volume threshold meanvol = np.percentile(vdn.precip[vdn.precip > 0.], cascade_opt.percentile) # np.mean(vdn.precip[vdn.precip>0.]) cascade_opt.threshold = np.array([meanvol]) # 2nd step: classify boxes at the upper level for i in range(0, n_out): if vdn.precip.values[i] > 0.: # rain? if i == 0: # only starting or isolated if vdn.precip.values[i+1] > 0.: vbtype[i] = cascade.BoxTypes.starting else: vbtype[i] = cascade.BoxTypes.isolated elif i == n_out-1: # only ending or isolated if vdn.precip.values[i-1] > 0.: vbtype[i] = cascade.BoxTypes.ending else: vbtype[i] = cascade.BoxTypes.isolated else: # neither at at the end nor at the beginning if vdn.precip.values[i-1] == 0. and vdn.precip.values[i+1] == 0.: vbtype[i] = cascade.BoxTypes.isolated if vdn.precip.values[i-1] == 0. and vdn.precip.values[i+1] > 0.: vbtype[i] = cascade.BoxTypes.starting if vdn.precip.values[i-1] > 0. and vdn.precip.values[i+1] > 0.: vbtype[i] = cascade.BoxTypes.enclosed if vdn.precip.values[i-1] > 0. and vdn.precip.values[i+1] == 0.: vbtype[i] = cascade.BoxTypes.ending else: vbtype[i] = cascade.BoxTypes.dry # no rain # 3rd step: examine branching j = 0 for i in range(0, n_in): if np.mod(i, 2) != 0: if vdn.precip.values[j] > 0: if vdn.precip.values[j] > meanvol: belowabove = 1 # above mean else: belowabove = 0 # below mean nb[belowabove, vbtype[j]-1] += 1 if vec_data.precip.values[i-1] > 0 and vec_data.precip.values[i] == 0: # P(1/0) cascade_opt.p10[belowabove, vbtype[j]-1] += 1 if vec_data.precip.values[i-1] == 0 and vec_data.precip.values[i] > 0: # P(0/1) cascade_opt.p01[belowabove, vbtype[j]-1] += 1 if vec_data.precip.values[i-1] > 0 and vec_data.precip.values[i] > 0: # P(x/x) cascade_opt.pxx[belowabove, vbtype[j]-1] += 1 nbxx[belowabove, vbtype[j]-1] += 1 # weights r1 = vec_data.precip.values[i-1] r2 = vec_data.precip.values[i] wxxval = r1 / (r1 + r2) # Test if abs(r1+r2-vdn.precip.values[j]) > 1.E-3: print('i=' + str(i) + ', j=' + str(j) + ', r1=' + str(r1) + ", r2=" + str(r2) + ", Summe=" + str(vdn.precip.values[j])) print(vec_data.index[i]) print(vdn.index[j]) print('error') return cascade_opt, vdn for k in range(0, 7): if wxxval > wlower[k] and wxxval <= wupper[k]: cascade_opt.wxx[k, belowabove, vbtype[j]-1] += 1 break j = j + 1 # 4th step: transform counts to percentages cascade_opt.p01 = cascade_opt.p01 / nb cascade_opt.p10 = cascade_opt.p10 / nb cascade_opt.pxx = cascade_opt.pxx / nb with np.errstate(divide='ignore', invalid='ignore'): # do not issue warnings here when dividing by zero, this is handled below for k in range(0, 7): cascade_opt.wxx[k, :, :] = cascade_opt.wxx[k, :, :] / nbxx[:, :] # In some cases, the time series are too short for deriving statistics. if (np.isnan(cascade_opt.p01).any() or np.isnan(cascade_opt.p10).any() or np.isnan(cascade_opt.pxx).any()): print("ERROR (branching probabilities):") print("Invalid statistics. Default values will be returned. " "Try to use longer time series or apply statistics " "derived for another station.") cascade_opt.fill_with_sample_data() # For some box types, the corresponding probabilities might yield nan. # If this happens, nan values will be replaced by 1/7 in order to provide # valid values for disaggregation. if np.isnan(cascade_opt.wxx).any(): print("Warning (weighting probabilities):") print("The derived cascade statistics are not valid as some " "probabilities are undefined! ", end="") print("Try to use longer time series that might be more " "appropriate for deriving statistics. ", end="") print("As a workaround, default values according to equally " "distributed probabilities ", end="") print("will be applied...", end="") cascade_opt.wxx[np.isnan(cascade_opt.wxx)] = 1.0 / 7.0 wxx = np.zeros((2, 4)) for k in range(0, 7): wxx[:, :] += cascade_opt.wxx[k, :, :] if wxx.any() > 1.001 or wxx.any() < 0.999: print("failed! Using default values!") cascade_opt.fill_with_sample_data() else: print("OK!") return cascade_opt, vdn
python
def aggregate_precipitation(vec_data,hourly=True, percentile=50): """Aggregates highly resolved precipitation data and creates statistics Parameters ---------- vec_data : pd.Series hourly (hourly=True) OR 5-min values Returns ------- output : cascade object representing statistics of the cascade model """ cascade_opt = cascade.CascadeStatistics() cascade_opt.percentile = percentile # length of input time series n_in = len(vec_data) n_out = np.floor(n_in/2) # alternative: # 1st step: new time series vec_time = vec_data.index vdn0 = [] vtn0 = [] j = 0 for i in range(0, n_in): if np.mod(i, 2) != 0: vdn0.append(vec_data.precip.values[i-1] + vec_data.precip.values[i]) vtn0.append(vec_time[i]) j = j+1 vdn = pd.DataFrame(index=vtn0, data={'precip': vdn0}) # length of new time series n_out = len(vdn) # series of box types: vbtype = np.zeros((n_out, ), dtype=np.int) # fields for empirical probabilities # counts nb = np.zeros((2, 4)) nbxx = np.zeros((2, 4)) # class boundaries for histograms # wclassbounds = np.linspace(0, 1, num=8) wlower = np.array([0, 0.1429, 0.2857, 0.4286, 0.5714, 0.7143, 0.8571]) # wclassbounds[0:7] wupper = np.array([0.1429, 0.2857, 0.4286, 0.5714, 0.7143, 0.8571, 1.0]) # wclassbounds[1:8] # evaluate mean rainfall intensity for wet boxes # these values should be determined during the aggregation phase!!!!! # mean volume threshold meanvol = np.percentile(vdn.precip[vdn.precip > 0.], cascade_opt.percentile) # np.mean(vdn.precip[vdn.precip>0.]) cascade_opt.threshold = np.array([meanvol]) # 2nd step: classify boxes at the upper level for i in range(0, n_out): if vdn.precip.values[i] > 0.: # rain? if i == 0: # only starting or isolated if vdn.precip.values[i+1] > 0.: vbtype[i] = cascade.BoxTypes.starting else: vbtype[i] = cascade.BoxTypes.isolated elif i == n_out-1: # only ending or isolated if vdn.precip.values[i-1] > 0.: vbtype[i] = cascade.BoxTypes.ending else: vbtype[i] = cascade.BoxTypes.isolated else: # neither at at the end nor at the beginning if vdn.precip.values[i-1] == 0. and vdn.precip.values[i+1] == 0.: vbtype[i] = cascade.BoxTypes.isolated if vdn.precip.values[i-1] == 0. and vdn.precip.values[i+1] > 0.: vbtype[i] = cascade.BoxTypes.starting if vdn.precip.values[i-1] > 0. and vdn.precip.values[i+1] > 0.: vbtype[i] = cascade.BoxTypes.enclosed if vdn.precip.values[i-1] > 0. and vdn.precip.values[i+1] == 0.: vbtype[i] = cascade.BoxTypes.ending else: vbtype[i] = cascade.BoxTypes.dry # no rain # 3rd step: examine branching j = 0 for i in range(0, n_in): if np.mod(i, 2) != 0: if vdn.precip.values[j] > 0: if vdn.precip.values[j] > meanvol: belowabove = 1 # above mean else: belowabove = 0 # below mean nb[belowabove, vbtype[j]-1] += 1 if vec_data.precip.values[i-1] > 0 and vec_data.precip.values[i] == 0: # P(1/0) cascade_opt.p10[belowabove, vbtype[j]-1] += 1 if vec_data.precip.values[i-1] == 0 and vec_data.precip.values[i] > 0: # P(0/1) cascade_opt.p01[belowabove, vbtype[j]-1] += 1 if vec_data.precip.values[i-1] > 0 and vec_data.precip.values[i] > 0: # P(x/x) cascade_opt.pxx[belowabove, vbtype[j]-1] += 1 nbxx[belowabove, vbtype[j]-1] += 1 # weights r1 = vec_data.precip.values[i-1] r2 = vec_data.precip.values[i] wxxval = r1 / (r1 + r2) # Test if abs(r1+r2-vdn.precip.values[j]) > 1.E-3: print('i=' + str(i) + ', j=' + str(j) + ', r1=' + str(r1) + ", r2=" + str(r2) + ", Summe=" + str(vdn.precip.values[j])) print(vec_data.index[i]) print(vdn.index[j]) print('error') return cascade_opt, vdn for k in range(0, 7): if wxxval > wlower[k] and wxxval <= wupper[k]: cascade_opt.wxx[k, belowabove, vbtype[j]-1] += 1 break j = j + 1 # 4th step: transform counts to percentages cascade_opt.p01 = cascade_opt.p01 / nb cascade_opt.p10 = cascade_opt.p10 / nb cascade_opt.pxx = cascade_opt.pxx / nb with np.errstate(divide='ignore', invalid='ignore'): # do not issue warnings here when dividing by zero, this is handled below for k in range(0, 7): cascade_opt.wxx[k, :, :] = cascade_opt.wxx[k, :, :] / nbxx[:, :] # In some cases, the time series are too short for deriving statistics. if (np.isnan(cascade_opt.p01).any() or np.isnan(cascade_opt.p10).any() or np.isnan(cascade_opt.pxx).any()): print("ERROR (branching probabilities):") print("Invalid statistics. Default values will be returned. " "Try to use longer time series or apply statistics " "derived for another station.") cascade_opt.fill_with_sample_data() # For some box types, the corresponding probabilities might yield nan. # If this happens, nan values will be replaced by 1/7 in order to provide # valid values for disaggregation. if np.isnan(cascade_opt.wxx).any(): print("Warning (weighting probabilities):") print("The derived cascade statistics are not valid as some " "probabilities are undefined! ", end="") print("Try to use longer time series that might be more " "appropriate for deriving statistics. ", end="") print("As a workaround, default values according to equally " "distributed probabilities ", end="") print("will be applied...", end="") cascade_opt.wxx[np.isnan(cascade_opt.wxx)] = 1.0 / 7.0 wxx = np.zeros((2, 4)) for k in range(0, 7): wxx[:, :] += cascade_opt.wxx[k, :, :] if wxx.any() > 1.001 or wxx.any() < 0.999: print("failed! Using default values!") cascade_opt.fill_with_sample_data() else: print("OK!") return cascade_opt, vdn
[ "def", "aggregate_precipitation", "(", "vec_data", ",", "hourly", "=", "True", ",", "percentile", "=", "50", ")", ":", "cascade_opt", "=", "cascade", ".", "CascadeStatistics", "(", ")", "cascade_opt", ".", "percentile", "=", "percentile", "# length of input time series", "n_in", "=", "len", "(", "vec_data", ")", "n_out", "=", "np", ".", "floor", "(", "n_in", "/", "2", ")", "# alternative:", "# 1st step: new time series", "vec_time", "=", "vec_data", ".", "index", "vdn0", "=", "[", "]", "vtn0", "=", "[", "]", "j", "=", "0", "for", "i", "in", "range", "(", "0", ",", "n_in", ")", ":", "if", "np", ".", "mod", "(", "i", ",", "2", ")", "!=", "0", ":", "vdn0", ".", "append", "(", "vec_data", ".", "precip", ".", "values", "[", "i", "-", "1", "]", "+", "vec_data", ".", "precip", ".", "values", "[", "i", "]", ")", "vtn0", ".", "append", "(", "vec_time", "[", "i", "]", ")", "j", "=", "j", "+", "1", "vdn", "=", "pd", ".", "DataFrame", "(", "index", "=", "vtn0", ",", "data", "=", "{", "'precip'", ":", "vdn0", "}", ")", "# length of new time series", "n_out", "=", "len", "(", "vdn", ")", "# series of box types:", "vbtype", "=", "np", ".", "zeros", "(", "(", "n_out", ",", ")", ",", "dtype", "=", "np", ".", "int", ")", "# fields for empirical probabilities", "# counts", "nb", "=", "np", ".", "zeros", "(", "(", "2", ",", "4", ")", ")", "nbxx", "=", "np", ".", "zeros", "(", "(", "2", ",", "4", ")", ")", "# class boundaries for histograms", "# wclassbounds = np.linspace(0, 1, num=8)", "wlower", "=", "np", ".", "array", "(", "[", "0", ",", "0.1429", ",", "0.2857", ",", "0.4286", ",", "0.5714", ",", "0.7143", ",", "0.8571", "]", ")", "# wclassbounds[0:7]", "wupper", "=", "np", ".", "array", "(", "[", "0.1429", ",", "0.2857", ",", "0.4286", ",", "0.5714", ",", "0.7143", ",", "0.8571", ",", "1.0", "]", ")", "# wclassbounds[1:8]", "# evaluate mean rainfall intensity for wet boxes", "# these values should be determined during the aggregation phase!!!!!", "# mean volume threshold", "meanvol", "=", "np", ".", "percentile", "(", "vdn", ".", "precip", "[", "vdn", ".", "precip", ">", "0.", "]", ",", "cascade_opt", ".", "percentile", ")", "# np.mean(vdn.precip[vdn.precip>0.])", "cascade_opt", ".", "threshold", "=", "np", ".", "array", "(", "[", "meanvol", "]", ")", "# 2nd step: classify boxes at the upper level", "for", "i", "in", "range", "(", "0", ",", "n_out", ")", ":", "if", "vdn", ".", "precip", ".", "values", "[", "i", "]", ">", "0.", ":", "# rain?", "if", "i", "==", "0", ":", "# only starting or isolated", "if", "vdn", ".", "precip", ".", "values", "[", "i", "+", "1", "]", ">", "0.", ":", "vbtype", "[", "i", "]", "=", "cascade", ".", "BoxTypes", ".", "starting", "else", ":", "vbtype", "[", "i", "]", "=", "cascade", ".", "BoxTypes", ".", "isolated", "elif", "i", "==", "n_out", "-", "1", ":", "# only ending or isolated", "if", "vdn", ".", "precip", ".", "values", "[", "i", "-", "1", "]", ">", "0.", ":", "vbtype", "[", "i", "]", "=", "cascade", ".", "BoxTypes", ".", "ending", "else", ":", "vbtype", "[", "i", "]", "=", "cascade", ".", "BoxTypes", ".", "isolated", "else", ":", "# neither at at the end nor at the beginning", "if", "vdn", ".", "precip", ".", "values", "[", "i", "-", "1", "]", "==", "0.", "and", "vdn", ".", "precip", ".", "values", "[", "i", "+", "1", "]", "==", "0.", ":", "vbtype", "[", "i", "]", "=", "cascade", ".", "BoxTypes", ".", "isolated", "if", "vdn", ".", "precip", ".", "values", "[", "i", "-", "1", "]", "==", "0.", "and", "vdn", ".", "precip", ".", "values", "[", "i", "+", "1", "]", ">", "0.", ":", "vbtype", "[", "i", "]", "=", "cascade", ".", "BoxTypes", ".", "starting", "if", "vdn", ".", "precip", ".", "values", "[", "i", "-", "1", "]", ">", "0.", "and", "vdn", ".", "precip", ".", "values", "[", "i", "+", "1", "]", ">", "0.", ":", "vbtype", "[", "i", "]", "=", "cascade", ".", "BoxTypes", ".", "enclosed", "if", "vdn", ".", "precip", ".", "values", "[", "i", "-", "1", "]", ">", "0.", "and", "vdn", ".", "precip", ".", "values", "[", "i", "+", "1", "]", "==", "0.", ":", "vbtype", "[", "i", "]", "=", "cascade", ".", "BoxTypes", ".", "ending", "else", ":", "vbtype", "[", "i", "]", "=", "cascade", ".", "BoxTypes", ".", "dry", "# no rain", "# 3rd step: examine branching", "j", "=", "0", "for", "i", "in", "range", "(", "0", ",", "n_in", ")", ":", "if", "np", ".", "mod", "(", "i", ",", "2", ")", "!=", "0", ":", "if", "vdn", ".", "precip", ".", "values", "[", "j", "]", ">", "0", ":", "if", "vdn", ".", "precip", ".", "values", "[", "j", "]", ">", "meanvol", ":", "belowabove", "=", "1", "# above mean", "else", ":", "belowabove", "=", "0", "# below mean", "nb", "[", "belowabove", ",", "vbtype", "[", "j", "]", "-", "1", "]", "+=", "1", "if", "vec_data", ".", "precip", ".", "values", "[", "i", "-", "1", "]", ">", "0", "and", "vec_data", ".", "precip", ".", "values", "[", "i", "]", "==", "0", ":", "# P(1/0)", "cascade_opt", ".", "p10", "[", "belowabove", ",", "vbtype", "[", "j", "]", "-", "1", "]", "+=", "1", "if", "vec_data", ".", "precip", ".", "values", "[", "i", "-", "1", "]", "==", "0", "and", "vec_data", ".", "precip", ".", "values", "[", "i", "]", ">", "0", ":", "# P(0/1)", "cascade_opt", ".", "p01", "[", "belowabove", ",", "vbtype", "[", "j", "]", "-", "1", "]", "+=", "1", "if", "vec_data", ".", "precip", ".", "values", "[", "i", "-", "1", "]", ">", "0", "and", "vec_data", ".", "precip", ".", "values", "[", "i", "]", ">", "0", ":", "# P(x/x)", "cascade_opt", ".", "pxx", "[", "belowabove", ",", "vbtype", "[", "j", "]", "-", "1", "]", "+=", "1", "nbxx", "[", "belowabove", ",", "vbtype", "[", "j", "]", "-", "1", "]", "+=", "1", "# weights", "r1", "=", "vec_data", ".", "precip", ".", "values", "[", "i", "-", "1", "]", "r2", "=", "vec_data", ".", "precip", ".", "values", "[", "i", "]", "wxxval", "=", "r1", "/", "(", "r1", "+", "r2", ")", "# Test", "if", "abs", "(", "r1", "+", "r2", "-", "vdn", ".", "precip", ".", "values", "[", "j", "]", ")", ">", "1.E-3", ":", "print", "(", "'i='", "+", "str", "(", "i", ")", "+", "', j='", "+", "str", "(", "j", ")", "+", "', r1='", "+", "str", "(", "r1", ")", "+", "\", r2=\"", "+", "str", "(", "r2", ")", "+", "\", Summe=\"", "+", "str", "(", "vdn", ".", "precip", ".", "values", "[", "j", "]", ")", ")", "print", "(", "vec_data", ".", "index", "[", "i", "]", ")", "print", "(", "vdn", ".", "index", "[", "j", "]", ")", "print", "(", "'error'", ")", "return", "cascade_opt", ",", "vdn", "for", "k", "in", "range", "(", "0", ",", "7", ")", ":", "if", "wxxval", ">", "wlower", "[", "k", "]", "and", "wxxval", "<=", "wupper", "[", "k", "]", ":", "cascade_opt", ".", "wxx", "[", "k", ",", "belowabove", ",", "vbtype", "[", "j", "]", "-", "1", "]", "+=", "1", "break", "j", "=", "j", "+", "1", "# 4th step: transform counts to percentages", "cascade_opt", ".", "p01", "=", "cascade_opt", ".", "p01", "/", "nb", "cascade_opt", ".", "p10", "=", "cascade_opt", ".", "p10", "/", "nb", "cascade_opt", ".", "pxx", "=", "cascade_opt", ".", "pxx", "/", "nb", "with", "np", ".", "errstate", "(", "divide", "=", "'ignore'", ",", "invalid", "=", "'ignore'", ")", ":", "# do not issue warnings here when dividing by zero, this is handled below", "for", "k", "in", "range", "(", "0", ",", "7", ")", ":", "cascade_opt", ".", "wxx", "[", "k", ",", ":", ",", ":", "]", "=", "cascade_opt", ".", "wxx", "[", "k", ",", ":", ",", ":", "]", "/", "nbxx", "[", ":", ",", ":", "]", "# In some cases, the time series are too short for deriving statistics.", "if", "(", "np", ".", "isnan", "(", "cascade_opt", ".", "p01", ")", ".", "any", "(", ")", "or", "np", ".", "isnan", "(", "cascade_opt", ".", "p10", ")", ".", "any", "(", ")", "or", "np", ".", "isnan", "(", "cascade_opt", ".", "pxx", ")", ".", "any", "(", ")", ")", ":", "print", "(", "\"ERROR (branching probabilities):\"", ")", "print", "(", "\"Invalid statistics. Default values will be returned. \"", "\"Try to use longer time series or apply statistics \"", "\"derived for another station.\"", ")", "cascade_opt", ".", "fill_with_sample_data", "(", ")", "# For some box types, the corresponding probabilities might yield nan.", "# If this happens, nan values will be replaced by 1/7 in order to provide", "# valid values for disaggregation.", "if", "np", ".", "isnan", "(", "cascade_opt", ".", "wxx", ")", ".", "any", "(", ")", ":", "print", "(", "\"Warning (weighting probabilities):\"", ")", "print", "(", "\"The derived cascade statistics are not valid as some \"", "\"probabilities are undefined! \"", ",", "end", "=", "\"\"", ")", "print", "(", "\"Try to use longer time series that might be more \"", "\"appropriate for deriving statistics. \"", ",", "end", "=", "\"\"", ")", "print", "(", "\"As a workaround, default values according to equally \"", "\"distributed probabilities \"", ",", "end", "=", "\"\"", ")", "print", "(", "\"will be applied...\"", ",", "end", "=", "\"\"", ")", "cascade_opt", ".", "wxx", "[", "np", ".", "isnan", "(", "cascade_opt", ".", "wxx", ")", "]", "=", "1.0", "/", "7.0", "wxx", "=", "np", ".", "zeros", "(", "(", "2", ",", "4", ")", ")", "for", "k", "in", "range", "(", "0", ",", "7", ")", ":", "wxx", "[", ":", ",", ":", "]", "+=", "cascade_opt", ".", "wxx", "[", "k", ",", ":", ",", ":", "]", "if", "wxx", ".", "any", "(", ")", ">", "1.001", "or", "wxx", ".", "any", "(", ")", "<", "0.999", ":", "print", "(", "\"failed! Using default values!\"", ")", "cascade_opt", ".", "fill_with_sample_data", "(", ")", "else", ":", "print", "(", "\"OK!\"", ")", "return", "cascade_opt", ",", "vdn" ]
Aggregates highly resolved precipitation data and creates statistics Parameters ---------- vec_data : pd.Series hourly (hourly=True) OR 5-min values Returns ------- output : cascade object representing statistics of the cascade model
[ "Aggregates", "highly", "resolved", "precipitation", "data", "and", "creates", "statistics" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/precipitation.py#L403-L586
kristianfoerster/melodist
melodist/precipitation.py
seasonal_subset
def seasonal_subset(dataframe, months='all'): '''Get the seasonal data. Parameters ---------- dataframe : pd.DataFrame months: int, str Months to use for statistics, or 'all' for 1-12 (default='all') ''' if isinstance(months, str) and months == 'all': months = np.arange(12) + 1 for month_num, month in enumerate(months): df_cur = dataframe[dataframe.index.month == month] if month_num == 0: df = df_cur else: df = df.append(df_cur) return df.sort_index()
python
def seasonal_subset(dataframe, months='all'): '''Get the seasonal data. Parameters ---------- dataframe : pd.DataFrame months: int, str Months to use for statistics, or 'all' for 1-12 (default='all') ''' if isinstance(months, str) and months == 'all': months = np.arange(12) + 1 for month_num, month in enumerate(months): df_cur = dataframe[dataframe.index.month == month] if month_num == 0: df = df_cur else: df = df.append(df_cur) return df.sort_index()
[ "def", "seasonal_subset", "(", "dataframe", ",", "months", "=", "'all'", ")", ":", "if", "isinstance", "(", "months", ",", "str", ")", "and", "months", "==", "'all'", ":", "months", "=", "np", ".", "arange", "(", "12", ")", "+", "1", "for", "month_num", ",", "month", "in", "enumerate", "(", "months", ")", ":", "df_cur", "=", "dataframe", "[", "dataframe", ".", "index", ".", "month", "==", "month", "]", "if", "month_num", "==", "0", ":", "df", "=", "df_cur", "else", ":", "df", "=", "df", ".", "append", "(", "df_cur", ")", "return", "df", ".", "sort_index", "(", ")" ]
Get the seasonal data. Parameters ---------- dataframe : pd.DataFrame months: int, str Months to use for statistics, or 'all' for 1-12 (default='all')
[ "Get", "the", "seasonal", "data", "." ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/precipitation.py#L589-L611
kristianfoerster/melodist
melodist/precipitation.py
build_casc
def build_casc(ObsData, hourly=True,level=9, months=None, avg_stats=True, percentile=50): '''Builds the cascade statistics of observed data for disaggregation Parameters ----------- ObsData : pd.Series hourly=True -> hourly obs data else -> 5min data (disaggregation level=9 (default), 10, 11) months : numpy array of ints Months for each seasons to be used for statistics (array of numpy array, default=1-12, e.g., [np.arange(12) + 1]) avg_stats : bool average statistics for all levels True/False (default=True) percentile : int, float percentile for splitting the dataset in small and high intensities (default=50) Returns ------- list_seasonal_casc : list holding the results ''' list_seasonal_casc = list() if months is None: months = [np.arange(12) + 1] # Parameter estimation for each season for cur_months in months: vdn = seasonal_subset(ObsData, cur_months) if len(ObsData.precip[np.isnan(ObsData.precip)]) > 0: ObsData.precip[np.isnan(ObsData.precip)] = 0 casc_opt = melodist.cascade.CascadeStatistics() casc_opt.percentile = percentile list_casc_opt = list() count = 0 if hourly: aggre_level = 5 else: aggre_level = level thresholds = np.zeros(aggre_level) #np.array([0., 0., 0., 0., 0.]) for i in range(0, aggre_level): # aggregate the data casc_opt_i, vdn = aggregate_precipitation(vdn, hourly, \ percentile=percentile) thresholds[i] = casc_opt_i.threshold copy_of_casc_opt_i = copy.copy(casc_opt_i) list_casc_opt.append(copy_of_casc_opt_i) n_vdn = len(vdn) casc_opt_i * n_vdn # level related weighting casc_opt + casc_opt_i # add to total statistics count = count + n_vdn casc_opt * (1. / count) # transfer weighted matrices to probabilities casc_opt.threshold = thresholds # statistics object if avg_stats: # in this case, the average statistics will be applied for all levels likewise stat_obj = casc_opt else: # for longer time series, separate statistics might be more appropriate # level dependent statistics will be assumed stat_obj = list_casc_opt list_seasonal_casc.append(stat_obj) return list_seasonal_casc
python
def build_casc(ObsData, hourly=True,level=9, months=None, avg_stats=True, percentile=50): '''Builds the cascade statistics of observed data for disaggregation Parameters ----------- ObsData : pd.Series hourly=True -> hourly obs data else -> 5min data (disaggregation level=9 (default), 10, 11) months : numpy array of ints Months for each seasons to be used for statistics (array of numpy array, default=1-12, e.g., [np.arange(12) + 1]) avg_stats : bool average statistics for all levels True/False (default=True) percentile : int, float percentile for splitting the dataset in small and high intensities (default=50) Returns ------- list_seasonal_casc : list holding the results ''' list_seasonal_casc = list() if months is None: months = [np.arange(12) + 1] # Parameter estimation for each season for cur_months in months: vdn = seasonal_subset(ObsData, cur_months) if len(ObsData.precip[np.isnan(ObsData.precip)]) > 0: ObsData.precip[np.isnan(ObsData.precip)] = 0 casc_opt = melodist.cascade.CascadeStatistics() casc_opt.percentile = percentile list_casc_opt = list() count = 0 if hourly: aggre_level = 5 else: aggre_level = level thresholds = np.zeros(aggre_level) #np.array([0., 0., 0., 0., 0.]) for i in range(0, aggre_level): # aggregate the data casc_opt_i, vdn = aggregate_precipitation(vdn, hourly, \ percentile=percentile) thresholds[i] = casc_opt_i.threshold copy_of_casc_opt_i = copy.copy(casc_opt_i) list_casc_opt.append(copy_of_casc_opt_i) n_vdn = len(vdn) casc_opt_i * n_vdn # level related weighting casc_opt + casc_opt_i # add to total statistics count = count + n_vdn casc_opt * (1. / count) # transfer weighted matrices to probabilities casc_opt.threshold = thresholds # statistics object if avg_stats: # in this case, the average statistics will be applied for all levels likewise stat_obj = casc_opt else: # for longer time series, separate statistics might be more appropriate # level dependent statistics will be assumed stat_obj = list_casc_opt list_seasonal_casc.append(stat_obj) return list_seasonal_casc
[ "def", "build_casc", "(", "ObsData", ",", "hourly", "=", "True", ",", "level", "=", "9", ",", "months", "=", "None", ",", "avg_stats", "=", "True", ",", "percentile", "=", "50", ")", ":", "list_seasonal_casc", "=", "list", "(", ")", "if", "months", "is", "None", ":", "months", "=", "[", "np", ".", "arange", "(", "12", ")", "+", "1", "]", "# Parameter estimation for each season", "for", "cur_months", "in", "months", ":", "vdn", "=", "seasonal_subset", "(", "ObsData", ",", "cur_months", ")", "if", "len", "(", "ObsData", ".", "precip", "[", "np", ".", "isnan", "(", "ObsData", ".", "precip", ")", "]", ")", ">", "0", ":", "ObsData", ".", "precip", "[", "np", ".", "isnan", "(", "ObsData", ".", "precip", ")", "]", "=", "0", "casc_opt", "=", "melodist", ".", "cascade", ".", "CascadeStatistics", "(", ")", "casc_opt", ".", "percentile", "=", "percentile", "list_casc_opt", "=", "list", "(", ")", "count", "=", "0", "if", "hourly", ":", "aggre_level", "=", "5", "else", ":", "aggre_level", "=", "level", "thresholds", "=", "np", ".", "zeros", "(", "aggre_level", ")", "#np.array([0., 0., 0., 0., 0.])", "for", "i", "in", "range", "(", "0", ",", "aggre_level", ")", ":", "# aggregate the data", "casc_opt_i", ",", "vdn", "=", "aggregate_precipitation", "(", "vdn", ",", "hourly", ",", "percentile", "=", "percentile", ")", "thresholds", "[", "i", "]", "=", "casc_opt_i", ".", "threshold", "copy_of_casc_opt_i", "=", "copy", ".", "copy", "(", "casc_opt_i", ")", "list_casc_opt", ".", "append", "(", "copy_of_casc_opt_i", ")", "n_vdn", "=", "len", "(", "vdn", ")", "casc_opt_i", "*", "n_vdn", "# level related weighting", "casc_opt", "+", "casc_opt_i", "# add to total statistics", "count", "=", "count", "+", "n_vdn", "casc_opt", "*", "(", "1.", "/", "count", ")", "# transfer weighted matrices to probabilities", "casc_opt", ".", "threshold", "=", "thresholds", "# statistics object", "if", "avg_stats", ":", "# in this case, the average statistics will be applied for all levels likewise", "stat_obj", "=", "casc_opt", "else", ":", "# for longer time series, separate statistics might be more appropriate", "# level dependent statistics will be assumed", "stat_obj", "=", "list_casc_opt", "list_seasonal_casc", ".", "append", "(", "stat_obj", ")", "return", "list_seasonal_casc" ]
Builds the cascade statistics of observed data for disaggregation Parameters ----------- ObsData : pd.Series hourly=True -> hourly obs data else -> 5min data (disaggregation level=9 (default), 10, 11) months : numpy array of ints Months for each seasons to be used for statistics (array of numpy array, default=1-12, e.g., [np.arange(12) + 1]) avg_stats : bool average statistics for all levels True/False (default=True) percentile : int, float percentile for splitting the dataset in small and high intensities (default=50) Returns ------- list_seasonal_casc : list holding the results
[ "Builds", "the", "cascade", "statistics", "of", "observed", "data", "for", "disaggregation" ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/precipitation.py#L614-L690
kristianfoerster/melodist
melodist/cascade.py
CascadeStatistics.fill_with_sample_data
def fill_with_sample_data(self): """This function fills the corresponding object with sample data.""" # replace these sample data with another dataset later # this function is deprecated as soon as a common file format for this # type of data will be available self.p01 = np.array([[0.576724636119866, 0.238722774405744, 0.166532122130638, 0.393474644666218], [0.303345245644811, 0.0490956843857575, 0.0392403031072856, 0.228441890034704]]) self.p10 = np.array([[0.158217002255554, 0.256581140990052, 0.557852226779526, 0.422638238585814], [0.0439831163244427, 0.0474928027621488, 0.303675296728195, 0.217512052135178]]) self.pxx = np.array([[0.265058361624580, 0.504696084604205, 0.275615651089836, 0.183887116747968], [0.652671638030746, 0.903411512852094, 0.657084400164519, 0.554046057830118]]) self.wxx = np.array([[[0.188389148850583, 0.0806836453984190, 0.0698113025807722, 0.0621499191745602], [0.240993281622128, 0.0831019646519721, 0.0415130545715575, 0.155284541403192]], [[0.190128959522795, 0.129220679033862, 0.0932213021787505, 0.193080698516532], [0.196379692358065, 0.108549414860949, 0.0592714297292217, 0.0421945385836429]], [[0.163043672107111, 0.152063537378127, 0.102823783410167, 0.0906028835221283], [0.186579466868095, 0.189705690316132, 0.0990207345993082, 0.107831389238912]], [[0.197765724699431, 0.220046257566978, 0.177876233348082, 0.261288786454262], [0.123823472714948, 0.220514673922285, 0.102486496386323, 0.101975538893918]], [[0.114435243444815, 0.170857634762767, 0.177327072603662, 0.135362730582518], [0.0939211776723413,0.174291820501902, 0.125275822078525, 0.150842841725936]], [[0.0988683809545079, 0.152323481100248, 0.185606883566286, 0.167242856061538], [0.0760275616817939, 0.127275603247149, 0.202466168603738, 0.186580243138018]], [[0.0473688704207573, 0.0948047647595988, 0.193333422312280, 0.0902721256884624], [0.0822753470826286, 0.0965608324996108, 0.369966294031327, 0.255290907016382]]])
python
def fill_with_sample_data(self): """This function fills the corresponding object with sample data.""" # replace these sample data with another dataset later # this function is deprecated as soon as a common file format for this # type of data will be available self.p01 = np.array([[0.576724636119866, 0.238722774405744, 0.166532122130638, 0.393474644666218], [0.303345245644811, 0.0490956843857575, 0.0392403031072856, 0.228441890034704]]) self.p10 = np.array([[0.158217002255554, 0.256581140990052, 0.557852226779526, 0.422638238585814], [0.0439831163244427, 0.0474928027621488, 0.303675296728195, 0.217512052135178]]) self.pxx = np.array([[0.265058361624580, 0.504696084604205, 0.275615651089836, 0.183887116747968], [0.652671638030746, 0.903411512852094, 0.657084400164519, 0.554046057830118]]) self.wxx = np.array([[[0.188389148850583, 0.0806836453984190, 0.0698113025807722, 0.0621499191745602], [0.240993281622128, 0.0831019646519721, 0.0415130545715575, 0.155284541403192]], [[0.190128959522795, 0.129220679033862, 0.0932213021787505, 0.193080698516532], [0.196379692358065, 0.108549414860949, 0.0592714297292217, 0.0421945385836429]], [[0.163043672107111, 0.152063537378127, 0.102823783410167, 0.0906028835221283], [0.186579466868095, 0.189705690316132, 0.0990207345993082, 0.107831389238912]], [[0.197765724699431, 0.220046257566978, 0.177876233348082, 0.261288786454262], [0.123823472714948, 0.220514673922285, 0.102486496386323, 0.101975538893918]], [[0.114435243444815, 0.170857634762767, 0.177327072603662, 0.135362730582518], [0.0939211776723413,0.174291820501902, 0.125275822078525, 0.150842841725936]], [[0.0988683809545079, 0.152323481100248, 0.185606883566286, 0.167242856061538], [0.0760275616817939, 0.127275603247149, 0.202466168603738, 0.186580243138018]], [[0.0473688704207573, 0.0948047647595988, 0.193333422312280, 0.0902721256884624], [0.0822753470826286, 0.0965608324996108, 0.369966294031327, 0.255290907016382]]])
[ "def", "fill_with_sample_data", "(", "self", ")", ":", "# replace these sample data with another dataset later", "# this function is deprecated as soon as a common file format for this", "# type of data will be available", "self", ".", "p01", "=", "np", ".", "array", "(", "[", "[", "0.576724636119866", ",", "0.238722774405744", ",", "0.166532122130638", ",", "0.393474644666218", "]", ",", "[", "0.303345245644811", ",", "0.0490956843857575", ",", "0.0392403031072856", ",", "0.228441890034704", "]", "]", ")", "self", ".", "p10", "=", "np", ".", "array", "(", "[", "[", "0.158217002255554", ",", "0.256581140990052", ",", "0.557852226779526", ",", "0.422638238585814", "]", ",", "[", "0.0439831163244427", ",", "0.0474928027621488", ",", "0.303675296728195", ",", "0.217512052135178", "]", "]", ")", "self", ".", "pxx", "=", "np", ".", "array", "(", "[", "[", "0.265058361624580", ",", "0.504696084604205", ",", "0.275615651089836", ",", "0.183887116747968", "]", ",", "[", "0.652671638030746", ",", "0.903411512852094", ",", "0.657084400164519", ",", "0.554046057830118", "]", "]", ")", "self", ".", "wxx", "=", "np", ".", "array", "(", "[", "[", "[", "0.188389148850583", ",", "0.0806836453984190", ",", "0.0698113025807722", ",", "0.0621499191745602", "]", ",", "[", "0.240993281622128", ",", "0.0831019646519721", ",", "0.0415130545715575", ",", "0.155284541403192", "]", "]", ",", "[", "[", "0.190128959522795", ",", "0.129220679033862", ",", "0.0932213021787505", ",", "0.193080698516532", "]", ",", "[", "0.196379692358065", ",", "0.108549414860949", ",", "0.0592714297292217", ",", "0.0421945385836429", "]", "]", ",", "[", "[", "0.163043672107111", ",", "0.152063537378127", ",", "0.102823783410167", ",", "0.0906028835221283", "]", ",", "[", "0.186579466868095", ",", "0.189705690316132", ",", "0.0990207345993082", ",", "0.107831389238912", "]", "]", ",", "[", "[", "0.197765724699431", ",", "0.220046257566978", ",", "0.177876233348082", ",", "0.261288786454262", "]", ",", "[", "0.123823472714948", ",", "0.220514673922285", ",", "0.102486496386323", ",", "0.101975538893918", "]", "]", ",", "[", "[", "0.114435243444815", ",", "0.170857634762767", ",", "0.177327072603662", ",", "0.135362730582518", "]", ",", "[", "0.0939211776723413", ",", "0.174291820501902", ",", "0.125275822078525", ",", "0.150842841725936", "]", "]", ",", "[", "[", "0.0988683809545079", ",", "0.152323481100248", ",", "0.185606883566286", ",", "0.167242856061538", "]", ",", "[", "0.0760275616817939", ",", "0.127275603247149", ",", "0.202466168603738", ",", "0.186580243138018", "]", "]", ",", "[", "[", "0.0473688704207573", ",", "0.0948047647595988", ",", "0.193333422312280", ",", "0.0902721256884624", "]", ",", "[", "0.0822753470826286", ",", "0.0965608324996108", ",", "0.369966294031327", ",", "0.255290907016382", "]", "]", "]", ")" ]
This function fills the corresponding object with sample data.
[ "This", "function", "fills", "the", "corresponding", "object", "with", "sample", "data", "." ]
train
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/cascade.py#L54-L81
gaqzi/django-emoji
emoji/models.py
Emoji.names
def names(cls): """A list of all emoji names without file extension.""" if not cls._files: for f in os.listdir(cls._image_path): if(not f.startswith('.') and os.path.isfile(os.path.join(cls._image_path, f))): cls._files.append(os.path.splitext(f)[0]) return cls._files
python
def names(cls): """A list of all emoji names without file extension.""" if not cls._files: for f in os.listdir(cls._image_path): if(not f.startswith('.') and os.path.isfile(os.path.join(cls._image_path, f))): cls._files.append(os.path.splitext(f)[0]) return cls._files
[ "def", "names", "(", "cls", ")", ":", "if", "not", "cls", ".", "_files", ":", "for", "f", "in", "os", ".", "listdir", "(", "cls", ".", "_image_path", ")", ":", "if", "(", "not", "f", ".", "startswith", "(", "'.'", ")", "and", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "cls", ".", "_image_path", ",", "f", ")", ")", ")", ":", "cls", ".", "_files", ".", "append", "(", "os", ".", "path", ".", "splitext", "(", "f", ")", "[", "0", "]", ")", "return", "cls", ".", "_files" ]
A list of all emoji names without file extension.
[ "A", "list", "of", "all", "emoji", "names", "without", "file", "extension", "." ]
train
https://github.com/gaqzi/django-emoji/blob/08625d14f5b4251f4784bb5abf2620cb46bbdcab/emoji/models.py#L114-L122
gaqzi/django-emoji
emoji/models.py
Emoji.replace
def replace(cls, replacement_string): """Add in valid emojis in a string where a valid emoji is between ::""" e = cls() def _replace_emoji(match): val = match.group(1) if val in e: return e._image_string(match.group(1)) else: return match.group(0) return e._pattern.sub(_replace_emoji, replacement_string)
python
def replace(cls, replacement_string): """Add in valid emojis in a string where a valid emoji is between ::""" e = cls() def _replace_emoji(match): val = match.group(1) if val in e: return e._image_string(match.group(1)) else: return match.group(0) return e._pattern.sub(_replace_emoji, replacement_string)
[ "def", "replace", "(", "cls", ",", "replacement_string", ")", ":", "e", "=", "cls", "(", ")", "def", "_replace_emoji", "(", "match", ")", ":", "val", "=", "match", ".", "group", "(", "1", ")", "if", "val", "in", "e", ":", "return", "e", ".", "_image_string", "(", "match", ".", "group", "(", "1", ")", ")", "else", ":", "return", "match", ".", "group", "(", "0", ")", "return", "e", ".", "_pattern", ".", "sub", "(", "_replace_emoji", ",", "replacement_string", ")" ]
Add in valid emojis in a string where a valid emoji is between ::
[ "Add", "in", "valid", "emojis", "in", "a", "string", "where", "a", "valid", "emoji", "is", "between", "::" ]
train
https://github.com/gaqzi/django-emoji/blob/08625d14f5b4251f4784bb5abf2620cb46bbdcab/emoji/models.py#L125-L136
gaqzi/django-emoji
emoji/models.py
Emoji.replace_unicode
def replace_unicode(cls, replacement_string): """This method will iterate over every character in ``replacement_string`` and see if it mathces any of the unicode codepoints that we recognize. If it does then it will replace that codepoint with an image just like ``replace``. NOTE: This will only work with Python versions built with wide unicode caracter support. Python 3 should always work but Python 2 will have to tested before deploy. """ e = cls() output = [] surrogate_character = None if settings.EMOJI_REPLACE_HTML_ENTITIES: replacement_string = cls.replace_html_entities(replacement_string) for i, character in enumerate(replacement_string): if character in cls._unicode_modifiers: continue # Check whether this is the first character in a Unicode # surrogate pair when Python doesn't have wide Unicode # support. # # Is there any reason to do this even if Python got wide # support enabled? if(not UNICODE_WIDE and not surrogate_character and ord(character) >= UNICODE_SURROGATE_MIN and ord(character) <= UNICODE_SURROGATE_MAX): surrogate_character = character continue if surrogate_character: character = convert_unicode_surrogates( surrogate_character + character ) surrogate_character = None name = e.name_for(character) if name: if settings.EMOJI_ALT_AS_UNICODE: character = e._image_string(name, alt=character) else: character = e._image_string(name) output.append(character) return ''.join(output)
python
def replace_unicode(cls, replacement_string): """This method will iterate over every character in ``replacement_string`` and see if it mathces any of the unicode codepoints that we recognize. If it does then it will replace that codepoint with an image just like ``replace``. NOTE: This will only work with Python versions built with wide unicode caracter support. Python 3 should always work but Python 2 will have to tested before deploy. """ e = cls() output = [] surrogate_character = None if settings.EMOJI_REPLACE_HTML_ENTITIES: replacement_string = cls.replace_html_entities(replacement_string) for i, character in enumerate(replacement_string): if character in cls._unicode_modifiers: continue # Check whether this is the first character in a Unicode # surrogate pair when Python doesn't have wide Unicode # support. # # Is there any reason to do this even if Python got wide # support enabled? if(not UNICODE_WIDE and not surrogate_character and ord(character) >= UNICODE_SURROGATE_MIN and ord(character) <= UNICODE_SURROGATE_MAX): surrogate_character = character continue if surrogate_character: character = convert_unicode_surrogates( surrogate_character + character ) surrogate_character = None name = e.name_for(character) if name: if settings.EMOJI_ALT_AS_UNICODE: character = e._image_string(name, alt=character) else: character = e._image_string(name) output.append(character) return ''.join(output)
[ "def", "replace_unicode", "(", "cls", ",", "replacement_string", ")", ":", "e", "=", "cls", "(", ")", "output", "=", "[", "]", "surrogate_character", "=", "None", "if", "settings", ".", "EMOJI_REPLACE_HTML_ENTITIES", ":", "replacement_string", "=", "cls", ".", "replace_html_entities", "(", "replacement_string", ")", "for", "i", ",", "character", "in", "enumerate", "(", "replacement_string", ")", ":", "if", "character", "in", "cls", ".", "_unicode_modifiers", ":", "continue", "# Check whether this is the first character in a Unicode", "# surrogate pair when Python doesn't have wide Unicode", "# support.", "#", "# Is there any reason to do this even if Python got wide", "# support enabled?", "if", "(", "not", "UNICODE_WIDE", "and", "not", "surrogate_character", "and", "ord", "(", "character", ")", ">=", "UNICODE_SURROGATE_MIN", "and", "ord", "(", "character", ")", "<=", "UNICODE_SURROGATE_MAX", ")", ":", "surrogate_character", "=", "character", "continue", "if", "surrogate_character", ":", "character", "=", "convert_unicode_surrogates", "(", "surrogate_character", "+", "character", ")", "surrogate_character", "=", "None", "name", "=", "e", ".", "name_for", "(", "character", ")", "if", "name", ":", "if", "settings", ".", "EMOJI_ALT_AS_UNICODE", ":", "character", "=", "e", ".", "_image_string", "(", "name", ",", "alt", "=", "character", ")", "else", ":", "character", "=", "e", ".", "_image_string", "(", "name", ")", "output", ".", "append", "(", "character", ")", "return", "''", ".", "join", "(", "output", ")" ]
This method will iterate over every character in ``replacement_string`` and see if it mathces any of the unicode codepoints that we recognize. If it does then it will replace that codepoint with an image just like ``replace``. NOTE: This will only work with Python versions built with wide unicode caracter support. Python 3 should always work but Python 2 will have to tested before deploy.
[ "This", "method", "will", "iterate", "over", "every", "character", "in", "replacement_string", "and", "see", "if", "it", "mathces", "any", "of", "the", "unicode", "codepoints", "that", "we", "recognize", ".", "If", "it", "does", "then", "it", "will", "replace", "that", "codepoint", "with", "an", "image", "just", "like", "replace", "." ]
train
https://github.com/gaqzi/django-emoji/blob/08625d14f5b4251f4784bb5abf2620cb46bbdcab/emoji/models.py#L139-L188
gaqzi/django-emoji
emoji/models.py
Emoji.replace_html_entities
def replace_html_entities(cls, replacement_string): """Replaces HTML escaped unicode entities with their unicode equivalent. If the setting `EMOJI_REPLACE_HTML_ENTITIES` is `True` then this conversation will always be done in `replace_unicode` (default: True). """ def _hex_to_unicode(hex_code): if PYTHON3: hex_code = '{0:0>8}'.format(hex_code) as_int = struct.unpack('>i', bytes.fromhex(hex_code))[0] return '{0:c}'.format(as_int) else: return hex_to_unicode(hex_code) def _replace_integer_entity(match): hex_val = hex(int(match.group(1))) return _hex_to_unicode(hex_val.replace('0x', '')) def _replace_hex_entity(match): return _hex_to_unicode(match.group(1)) # replace integer code points, &#65; replacement_string = re.sub( cls._html_entities_integer_unicode_regex, _replace_integer_entity, replacement_string ) # replace hex code points, &#x41; replacement_string = re.sub( cls._html_entities_hex_unicode_regex, _replace_hex_entity, replacement_string ) return replacement_string
python
def replace_html_entities(cls, replacement_string): """Replaces HTML escaped unicode entities with their unicode equivalent. If the setting `EMOJI_REPLACE_HTML_ENTITIES` is `True` then this conversation will always be done in `replace_unicode` (default: True). """ def _hex_to_unicode(hex_code): if PYTHON3: hex_code = '{0:0>8}'.format(hex_code) as_int = struct.unpack('>i', bytes.fromhex(hex_code))[0] return '{0:c}'.format(as_int) else: return hex_to_unicode(hex_code) def _replace_integer_entity(match): hex_val = hex(int(match.group(1))) return _hex_to_unicode(hex_val.replace('0x', '')) def _replace_hex_entity(match): return _hex_to_unicode(match.group(1)) # replace integer code points, &#65; replacement_string = re.sub( cls._html_entities_integer_unicode_regex, _replace_integer_entity, replacement_string ) # replace hex code points, &#x41; replacement_string = re.sub( cls._html_entities_hex_unicode_regex, _replace_hex_entity, replacement_string ) return replacement_string
[ "def", "replace_html_entities", "(", "cls", ",", "replacement_string", ")", ":", "def", "_hex_to_unicode", "(", "hex_code", ")", ":", "if", "PYTHON3", ":", "hex_code", "=", "'{0:0>8}'", ".", "format", "(", "hex_code", ")", "as_int", "=", "struct", ".", "unpack", "(", "'>i'", ",", "bytes", ".", "fromhex", "(", "hex_code", ")", ")", "[", "0", "]", "return", "'{0:c}'", ".", "format", "(", "as_int", ")", "else", ":", "return", "hex_to_unicode", "(", "hex_code", ")", "def", "_replace_integer_entity", "(", "match", ")", ":", "hex_val", "=", "hex", "(", "int", "(", "match", ".", "group", "(", "1", ")", ")", ")", "return", "_hex_to_unicode", "(", "hex_val", ".", "replace", "(", "'0x'", ",", "''", ")", ")", "def", "_replace_hex_entity", "(", "match", ")", ":", "return", "_hex_to_unicode", "(", "match", ".", "group", "(", "1", ")", ")", "# replace integer code points, &#65;", "replacement_string", "=", "re", ".", "sub", "(", "cls", ".", "_html_entities_integer_unicode_regex", ",", "_replace_integer_entity", ",", "replacement_string", ")", "# replace hex code points, &#x41;", "replacement_string", "=", "re", ".", "sub", "(", "cls", ".", "_html_entities_hex_unicode_regex", ",", "_replace_hex_entity", ",", "replacement_string", ")", "return", "replacement_string" ]
Replaces HTML escaped unicode entities with their unicode equivalent. If the setting `EMOJI_REPLACE_HTML_ENTITIES` is `True` then this conversation will always be done in `replace_unicode` (default: True).
[ "Replaces", "HTML", "escaped", "unicode", "entities", "with", "their", "unicode", "equivalent", ".", "If", "the", "setting", "EMOJI_REPLACE_HTML_ENTITIES", "is", "True", "then", "this", "conversation", "will", "always", "be", "done", "in", "replace_unicode", "(", "default", ":", "True", ")", "." ]
train
https://github.com/gaqzi/django-emoji/blob/08625d14f5b4251f4784bb5abf2620cb46bbdcab/emoji/models.py#L198-L234
gaqzi/django-emoji
bin/generate-unicode-aliases.py
_convert_to_unicode
def _convert_to_unicode(string): """This method should work with both Python 2 and 3 with the caveat that they need to be compiled with wide unicode character support. If there isn't wide unicode character support it'll blow up with a warning. """ codepoints = [] for character in string.split('-'): if character in BLACKLIST_UNICODE: next codepoints.append( '\U{0:0>8}'.format(character).decode('unicode-escape') ) return codepoints
python
def _convert_to_unicode(string): """This method should work with both Python 2 and 3 with the caveat that they need to be compiled with wide unicode character support. If there isn't wide unicode character support it'll blow up with a warning. """ codepoints = [] for character in string.split('-'): if character in BLACKLIST_UNICODE: next codepoints.append( '\U{0:0>8}'.format(character).decode('unicode-escape') ) return codepoints
[ "def", "_convert_to_unicode", "(", "string", ")", ":", "codepoints", "=", "[", "]", "for", "character", "in", "string", ".", "split", "(", "'-'", ")", ":", "if", "character", "in", "BLACKLIST_UNICODE", ":", "next", "codepoints", ".", "append", "(", "'\\U{0:0>8}'", ".", "format", "(", "character", ")", ".", "decode", "(", "'unicode-escape'", ")", ")", "return", "codepoints" ]
This method should work with both Python 2 and 3 with the caveat that they need to be compiled with wide unicode character support. If there isn't wide unicode character support it'll blow up with a warning.
[ "This", "method", "should", "work", "with", "both", "Python", "2", "and", "3", "with", "the", "caveat", "that", "they", "need", "to", "be", "compiled", "with", "wide", "unicode", "character", "support", "." ]
train
https://github.com/gaqzi/django-emoji/blob/08625d14f5b4251f4784bb5abf2620cb46bbdcab/bin/generate-unicode-aliases.py#L32-L49
acsone/bobtemplates.odoo
bobtemplates/odoo/hooks.py
_delete_file
def _delete_file(configurator, path): """ remove file and remove it's directories if empty """ path = os.path.join(configurator.target_directory, path) os.remove(path) try: os.removedirs(os.path.dirname(path)) except OSError: pass
python
def _delete_file(configurator, path): """ remove file and remove it's directories if empty """ path = os.path.join(configurator.target_directory, path) os.remove(path) try: os.removedirs(os.path.dirname(path)) except OSError: pass
[ "def", "_delete_file", "(", "configurator", ",", "path", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "configurator", ".", "target_directory", ",", "path", ")", "os", ".", "remove", "(", "path", ")", "try", ":", "os", ".", "removedirs", "(", "os", ".", "path", ".", "dirname", "(", "path", ")", ")", "except", "OSError", ":", "pass" ]
remove file and remove it's directories if empty
[ "remove", "file", "and", "remove", "it", "s", "directories", "if", "empty" ]
train
https://github.com/acsone/bobtemplates.odoo/blob/6e8c3cb12747d8b5af5a9821f995f285251e4d4d/bobtemplates/odoo/hooks.py#L34-L41
acsone/bobtemplates.odoo
bobtemplates/odoo/hooks.py
_insert_manifest_item
def _insert_manifest_item(configurator, key, item): """ Insert an item in the list of an existing manifest key """ with _open_manifest(configurator) as f: manifest = f.read() if item in ast.literal_eval(manifest).get(key, []): return pattern = """(["']{}["']:\\s*\\[)""".format(key) repl = """\\1\n '{}',""".format(item) manifest = re.sub(pattern, repl, manifest, re.MULTILINE) with _open_manifest(configurator, "w") as f: f.write(manifest)
python
def _insert_manifest_item(configurator, key, item): """ Insert an item in the list of an existing manifest key """ with _open_manifest(configurator) as f: manifest = f.read() if item in ast.literal_eval(manifest).get(key, []): return pattern = """(["']{}["']:\\s*\\[)""".format(key) repl = """\\1\n '{}',""".format(item) manifest = re.sub(pattern, repl, manifest, re.MULTILINE) with _open_manifest(configurator, "w") as f: f.write(manifest)
[ "def", "_insert_manifest_item", "(", "configurator", ",", "key", ",", "item", ")", ":", "with", "_open_manifest", "(", "configurator", ")", "as", "f", ":", "manifest", "=", "f", ".", "read", "(", ")", "if", "item", "in", "ast", ".", "literal_eval", "(", "manifest", ")", ".", "get", "(", "key", ",", "[", "]", ")", ":", "return", "pattern", "=", "\"\"\"([\"']{}[\"']:\\\\s*\\\\[)\"\"\"", ".", "format", "(", "key", ")", "repl", "=", "\"\"\"\\\\1\\n '{}',\"\"\"", ".", "format", "(", "item", ")", "manifest", "=", "re", ".", "sub", "(", "pattern", ",", "repl", ",", "manifest", ",", "re", ".", "MULTILINE", ")", "with", "_open_manifest", "(", "configurator", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "manifest", ")" ]
Insert an item in the list of an existing manifest key
[ "Insert", "an", "item", "in", "the", "list", "of", "an", "existing", "manifest", "key" ]
train
https://github.com/acsone/bobtemplates.odoo/blob/6e8c3cb12747d8b5af5a9821f995f285251e4d4d/bobtemplates/odoo/hooks.py#L58-L68
nyaruka/smartmin
setup.py
_read_requirements
def _read_requirements(filename): """Parses a file for pip installation requirements.""" with open(filename) as requirements_file: contents = requirements_file.read() return [line.strip() for line in contents.splitlines() if _is_requirement(line)]
python
def _read_requirements(filename): """Parses a file for pip installation requirements.""" with open(filename) as requirements_file: contents = requirements_file.read() return [line.strip() for line in contents.splitlines() if _is_requirement(line)]
[ "def", "_read_requirements", "(", "filename", ")", ":", "with", "open", "(", "filename", ")", "as", "requirements_file", ":", "contents", "=", "requirements_file", ".", "read", "(", ")", "return", "[", "line", ".", "strip", "(", ")", "for", "line", "in", "contents", ".", "splitlines", "(", ")", "if", "_is_requirement", "(", "line", ")", "]" ]
Parses a file for pip installation requirements.
[ "Parses", "a", "file", "for", "pip", "installation", "requirements", "." ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/setup.py#L17-L21
nyaruka/smartmin
smartmin/perms.py
assign_perm
def assign_perm(perm, group): """ Assigns a permission to a group """ if not isinstance(perm, Permission): try: app_label, codename = perm.split('.', 1) except ValueError: raise ValueError("For global permissions, first argument must be in" " format: 'app_label.codename' (is %r)" % perm) perm = Permission.objects.get(content_type__app_label=app_label, codename=codename) group.permissions.add(perm) return perm
python
def assign_perm(perm, group): """ Assigns a permission to a group """ if not isinstance(perm, Permission): try: app_label, codename = perm.split('.', 1) except ValueError: raise ValueError("For global permissions, first argument must be in" " format: 'app_label.codename' (is %r)" % perm) perm = Permission.objects.get(content_type__app_label=app_label, codename=codename) group.permissions.add(perm) return perm
[ "def", "assign_perm", "(", "perm", ",", "group", ")", ":", "if", "not", "isinstance", "(", "perm", ",", "Permission", ")", ":", "try", ":", "app_label", ",", "codename", "=", "perm", ".", "split", "(", "'.'", ",", "1", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"For global permissions, first argument must be in\"", "\" format: 'app_label.codename' (is %r)\"", "%", "perm", ")", "perm", "=", "Permission", ".", "objects", ".", "get", "(", "content_type__app_label", "=", "app_label", ",", "codename", "=", "codename", ")", "group", ".", "permissions", ".", "add", "(", "perm", ")", "return", "perm" ]
Assigns a permission to a group
[ "Assigns", "a", "permission", "to", "a", "group" ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/perms.py#L4-L17
nyaruka/smartmin
smartmin/perms.py
remove_perm
def remove_perm(perm, group): """ Removes a permission from a group """ if not isinstance(perm, Permission): try: app_label, codename = perm.split('.', 1) except ValueError: raise ValueError("For global permissions, first argument must be in" " format: 'app_label.codename' (is %r)" % perm) perm = Permission.objects.get(content_type__app_label=app_label, codename=codename) group.permissions.remove(perm) return
python
def remove_perm(perm, group): """ Removes a permission from a group """ if not isinstance(perm, Permission): try: app_label, codename = perm.split('.', 1) except ValueError: raise ValueError("For global permissions, first argument must be in" " format: 'app_label.codename' (is %r)" % perm) perm = Permission.objects.get(content_type__app_label=app_label, codename=codename) group.permissions.remove(perm) return
[ "def", "remove_perm", "(", "perm", ",", "group", ")", ":", "if", "not", "isinstance", "(", "perm", ",", "Permission", ")", ":", "try", ":", "app_label", ",", "codename", "=", "perm", ".", "split", "(", "'.'", ",", "1", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"For global permissions, first argument must be in\"", "\" format: 'app_label.codename' (is %r)\"", "%", "perm", ")", "perm", "=", "Permission", ".", "objects", ".", "get", "(", "content_type__app_label", "=", "app_label", ",", "codename", "=", "codename", ")", "group", ".", "permissions", ".", "remove", "(", "perm", ")", "return" ]
Removes a permission from a group
[ "Removes", "a", "permission", "from", "a", "group" ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/perms.py#L20-L33
nyaruka/smartmin
smartmin/templatetags/smartmin.py
get_list_class
def get_list_class(context, list): """ Returns the class to use for the passed in list. We just build something up from the object type for the list. """ return "list_%s_%s" % (list.model._meta.app_label, list.model._meta.model_name)
python
def get_list_class(context, list): """ Returns the class to use for the passed in list. We just build something up from the object type for the list. """ return "list_%s_%s" % (list.model._meta.app_label, list.model._meta.model_name)
[ "def", "get_list_class", "(", "context", ",", "list", ")", ":", "return", "\"list_%s_%s\"", "%", "(", "list", ".", "model", ".", "_meta", ".", "app_label", ",", "list", ".", "model", ".", "_meta", ".", "model_name", ")" ]
Returns the class to use for the passed in list. We just build something up from the object type for the list.
[ "Returns", "the", "class", "to", "use", "for", "the", "passed", "in", "list", ".", "We", "just", "build", "something", "up", "from", "the", "object", "type", "for", "the", "list", "." ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/templatetags/smartmin.py#L24-L29
nyaruka/smartmin
smartmin/templatetags/smartmin.py
format_datetime
def format_datetime(time): """ Formats a date, converting the time to the user timezone if one is specified """ user_time_zone = timezone.get_current_timezone() if time.tzinfo is None: time = time.replace(tzinfo=pytz.utc) user_time_zone = pytz.timezone(getattr(settings, 'USER_TIME_ZONE', 'GMT')) time = time.astimezone(user_time_zone) return time.strftime("%b %d, %Y %H:%M")
python
def format_datetime(time): """ Formats a date, converting the time to the user timezone if one is specified """ user_time_zone = timezone.get_current_timezone() if time.tzinfo is None: time = time.replace(tzinfo=pytz.utc) user_time_zone = pytz.timezone(getattr(settings, 'USER_TIME_ZONE', 'GMT')) time = time.astimezone(user_time_zone) return time.strftime("%b %d, %Y %H:%M")
[ "def", "format_datetime", "(", "time", ")", ":", "user_time_zone", "=", "timezone", ".", "get_current_timezone", "(", ")", "if", "time", ".", "tzinfo", "is", "None", ":", "time", "=", "time", ".", "replace", "(", "tzinfo", "=", "pytz", ".", "utc", ")", "user_time_zone", "=", "pytz", ".", "timezone", "(", "getattr", "(", "settings", ",", "'USER_TIME_ZONE'", ",", "'GMT'", ")", ")", "time", "=", "time", ".", "astimezone", "(", "user_time_zone", ")", "return", "time", ".", "strftime", "(", "\"%b %d, %Y %H:%M\"", ")" ]
Formats a date, converting the time to the user timezone if one is specified
[ "Formats", "a", "date", "converting", "the", "time", "to", "the", "user", "timezone", "if", "one", "is", "specified" ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/templatetags/smartmin.py#L32-L42
nyaruka/smartmin
smartmin/templatetags/smartmin.py
get_value_from_view
def get_value_from_view(context, field): """ Responsible for deriving the displayed value for the passed in 'field'. This first checks for a particular method on the ListView, then looks for a method on the object, then finally treats it as an attribute. """ view = context['view'] obj = None if 'object' in context: obj = context['object'] value = view.lookup_field_value(context, obj, field) # it's a date if type(value) == datetime: return format_datetime(value) return value
python
def get_value_from_view(context, field): """ Responsible for deriving the displayed value for the passed in 'field'. This first checks for a particular method on the ListView, then looks for a method on the object, then finally treats it as an attribute. """ view = context['view'] obj = None if 'object' in context: obj = context['object'] value = view.lookup_field_value(context, obj, field) # it's a date if type(value) == datetime: return format_datetime(value) return value
[ "def", "get_value_from_view", "(", "context", ",", "field", ")", ":", "view", "=", "context", "[", "'view'", "]", "obj", "=", "None", "if", "'object'", "in", "context", ":", "obj", "=", "context", "[", "'object'", "]", "value", "=", "view", ".", "lookup_field_value", "(", "context", ",", "obj", ",", "field", ")", "# it's a date", "if", "type", "(", "value", ")", "==", "datetime", ":", "return", "format_datetime", "(", "value", ")", "return", "value" ]
Responsible for deriving the displayed value for the passed in 'field'. This first checks for a particular method on the ListView, then looks for a method on the object, then finally treats it as an attribute.
[ "Responsible", "for", "deriving", "the", "displayed", "value", "for", "the", "passed", "in", "field", "." ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/templatetags/smartmin.py#L46-L64
nyaruka/smartmin
smartmin/templatetags/smartmin.py
get_class
def get_class(context, field, obj=None): """ Looks up the class for this field """ view = context['view'] return view.lookup_field_class(field, obj, "field_" + field)
python
def get_class(context, field, obj=None): """ Looks up the class for this field """ view = context['view'] return view.lookup_field_class(field, obj, "field_" + field)
[ "def", "get_class", "(", "context", ",", "field", ",", "obj", "=", "None", ")", ":", "view", "=", "context", "[", "'view'", "]", "return", "view", ".", "lookup_field_class", "(", "field", ",", "obj", ",", "\"field_\"", "+", "field", ")" ]
Looks up the class for this field
[ "Looks", "up", "the", "class", "for", "this", "field" ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/templatetags/smartmin.py#L84-L89
nyaruka/smartmin
smartmin/templatetags/smartmin.py
get_label
def get_label(context, field, obj=None): """ Responsible for figuring out the right label for the passed in field. The order of precedence is: 1) if the view has a field_config and a label specified there, use that label 2) check for a form in the view, if it contains that field, use it's value """ view = context['view'] return view.lookup_field_label(context, field, obj)
python
def get_label(context, field, obj=None): """ Responsible for figuring out the right label for the passed in field. The order of precedence is: 1) if the view has a field_config and a label specified there, use that label 2) check for a form in the view, if it contains that field, use it's value """ view = context['view'] return view.lookup_field_label(context, field, obj)
[ "def", "get_label", "(", "context", ",", "field", ",", "obj", "=", "None", ")", ":", "view", "=", "context", "[", "'view'", "]", "return", "view", ".", "lookup_field_label", "(", "context", ",", "field", ",", "obj", ")" ]
Responsible for figuring out the right label for the passed in field. The order of precedence is: 1) if the view has a field_config and a label specified there, use that label 2) check for a form in the view, if it contains that field, use it's value
[ "Responsible", "for", "figuring", "out", "the", "right", "label", "for", "the", "passed", "in", "field", "." ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/templatetags/smartmin.py#L93-L102
nyaruka/smartmin
smartmin/templatetags/smartmin.py
get_field_link
def get_field_link(context, field, obj=None): """ Determine what the field link should be for the given field, object pair """ view = context['view'] return view.lookup_field_link(context, field, obj)
python
def get_field_link(context, field, obj=None): """ Determine what the field link should be for the given field, object pair """ view = context['view'] return view.lookup_field_link(context, field, obj)
[ "def", "get_field_link", "(", "context", ",", "field", ",", "obj", "=", "None", ")", ":", "view", "=", "context", "[", "'view'", "]", "return", "view", ".", "lookup_field_link", "(", "context", ",", "field", ",", "obj", ")" ]
Determine what the field link should be for the given field, object pair
[ "Determine", "what", "the", "field", "link", "should", "be", "for", "the", "given", "field", "object", "pair" ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/templatetags/smartmin.py#L106-L111
nyaruka/smartmin
smartmin/management/__init__.py
get_permissions_app_name
def get_permissions_app_name(): """ Gets the app after which smartmin permissions should be installed. This can be specified by PERMISSIONS_APP in the Django settings or defaults to the last app with models """ global permissions_app_name if not permissions_app_name: permissions_app_name = getattr(settings, 'PERMISSIONS_APP', None) if not permissions_app_name: app_names_with_models = [a.name for a in apps.get_app_configs() if a.models_module is not None] if app_names_with_models: permissions_app_name = app_names_with_models[-1] return permissions_app_name
python
def get_permissions_app_name(): """ Gets the app after which smartmin permissions should be installed. This can be specified by PERMISSIONS_APP in the Django settings or defaults to the last app with models """ global permissions_app_name if not permissions_app_name: permissions_app_name = getattr(settings, 'PERMISSIONS_APP', None) if not permissions_app_name: app_names_with_models = [a.name for a in apps.get_app_configs() if a.models_module is not None] if app_names_with_models: permissions_app_name = app_names_with_models[-1] return permissions_app_name
[ "def", "get_permissions_app_name", "(", ")", ":", "global", "permissions_app_name", "if", "not", "permissions_app_name", ":", "permissions_app_name", "=", "getattr", "(", "settings", ",", "'PERMISSIONS_APP'", ",", "None", ")", "if", "not", "permissions_app_name", ":", "app_names_with_models", "=", "[", "a", ".", "name", "for", "a", "in", "apps", ".", "get_app_configs", "(", ")", "if", "a", ".", "models_module", "is", "not", "None", "]", "if", "app_names_with_models", ":", "permissions_app_name", "=", "app_names_with_models", "[", "-", "1", "]", "return", "permissions_app_name" ]
Gets the app after which smartmin permissions should be installed. This can be specified by PERMISSIONS_APP in the Django settings or defaults to the last app with models
[ "Gets", "the", "app", "after", "which", "smartmin", "permissions", "should", "be", "installed", ".", "This", "can", "be", "specified", "by", "PERMISSIONS_APP", "in", "the", "Django", "settings", "or", "defaults", "to", "the", "last", "app", "with", "models" ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/management/__init__.py#L15-L30
nyaruka/smartmin
smartmin/management/__init__.py
check_role_permissions
def check_role_permissions(role, permissions, current_permissions): """ Checks the the passed in role (can be user, group or AnonymousUser) has all the passed in permissions, granting them if necessary. """ role_permissions = [] # get all the current permissions, we'll remove these as we verify they should still be granted for permission in permissions: splits = permission.split(".") if len(splits) != 2 and len(splits) != 3: sys.stderr.write(" invalid permission %s, ignoring\n" % permission) continue app = splits[0] codenames = [] if len(splits) == 2: codenames.append(splits[1]) else: (object, action) = splits[1:] # if this is a wildcard, then query our database for all the permissions that exist on this object if action == '*': for perm in Permission.objects.filter(codename__startswith="%s_" % object, content_type__app_label=app): codenames.append(perm.codename) # otherwise, this is an error, continue else: sys.stderr.write(" invalid permission %s, ignoring\n" % permission) continue if len(codenames) == 0: continue for codename in codenames: # the full codename for this permission full_codename = "%s.%s" % (app, codename) # this marks all the permissions which should remain role_permissions.append(full_codename) try: assign_perm(full_codename, role) except ObjectDoesNotExist: pass # sys.stderr.write(" unknown permission %s, ignoring\n" % permission) # remove any that are extra for permission in current_permissions: if isinstance(permission, str): key = permission else: key = "%s.%s" % (permission.content_type.app_label, permission.codename) if key not in role_permissions: remove_perm(key, role)
python
def check_role_permissions(role, permissions, current_permissions): """ Checks the the passed in role (can be user, group or AnonymousUser) has all the passed in permissions, granting them if necessary. """ role_permissions = [] # get all the current permissions, we'll remove these as we verify they should still be granted for permission in permissions: splits = permission.split(".") if len(splits) != 2 and len(splits) != 3: sys.stderr.write(" invalid permission %s, ignoring\n" % permission) continue app = splits[0] codenames = [] if len(splits) == 2: codenames.append(splits[1]) else: (object, action) = splits[1:] # if this is a wildcard, then query our database for all the permissions that exist on this object if action == '*': for perm in Permission.objects.filter(codename__startswith="%s_" % object, content_type__app_label=app): codenames.append(perm.codename) # otherwise, this is an error, continue else: sys.stderr.write(" invalid permission %s, ignoring\n" % permission) continue if len(codenames) == 0: continue for codename in codenames: # the full codename for this permission full_codename = "%s.%s" % (app, codename) # this marks all the permissions which should remain role_permissions.append(full_codename) try: assign_perm(full_codename, role) except ObjectDoesNotExist: pass # sys.stderr.write(" unknown permission %s, ignoring\n" % permission) # remove any that are extra for permission in current_permissions: if isinstance(permission, str): key = permission else: key = "%s.%s" % (permission.content_type.app_label, permission.codename) if key not in role_permissions: remove_perm(key, role)
[ "def", "check_role_permissions", "(", "role", ",", "permissions", ",", "current_permissions", ")", ":", "role_permissions", "=", "[", "]", "# get all the current permissions, we'll remove these as we verify they should still be granted", "for", "permission", "in", "permissions", ":", "splits", "=", "permission", ".", "split", "(", "\".\"", ")", "if", "len", "(", "splits", ")", "!=", "2", "and", "len", "(", "splits", ")", "!=", "3", ":", "sys", ".", "stderr", ".", "write", "(", "\" invalid permission %s, ignoring\\n\"", "%", "permission", ")", "continue", "app", "=", "splits", "[", "0", "]", "codenames", "=", "[", "]", "if", "len", "(", "splits", ")", "==", "2", ":", "codenames", ".", "append", "(", "splits", "[", "1", "]", ")", "else", ":", "(", "object", ",", "action", ")", "=", "splits", "[", "1", ":", "]", "# if this is a wildcard, then query our database for all the permissions that exist on this object", "if", "action", "==", "'*'", ":", "for", "perm", "in", "Permission", ".", "objects", ".", "filter", "(", "codename__startswith", "=", "\"%s_\"", "%", "object", ",", "content_type__app_label", "=", "app", ")", ":", "codenames", ".", "append", "(", "perm", ".", "codename", ")", "# otherwise, this is an error, continue", "else", ":", "sys", ".", "stderr", ".", "write", "(", "\" invalid permission %s, ignoring\\n\"", "%", "permission", ")", "continue", "if", "len", "(", "codenames", ")", "==", "0", ":", "continue", "for", "codename", "in", "codenames", ":", "# the full codename for this permission", "full_codename", "=", "\"%s.%s\"", "%", "(", "app", ",", "codename", ")", "# this marks all the permissions which should remain", "role_permissions", ".", "append", "(", "full_codename", ")", "try", ":", "assign_perm", "(", "full_codename", ",", "role", ")", "except", "ObjectDoesNotExist", ":", "pass", "# sys.stderr.write(\" unknown permission %s, ignoring\\n\" % permission)", "# remove any that are extra", "for", "permission", "in", "current_permissions", ":", "if", "isinstance", "(", "permission", ",", "str", ")", ":", "key", "=", "permission", "else", ":", "key", "=", "\"%s.%s\"", "%", "(", "permission", ".", "content_type", ".", "app_label", ",", "permission", ".", "codename", ")", "if", "key", "not", "in", "role_permissions", ":", "remove_perm", "(", "key", ",", "role", ")" ]
Checks the the passed in role (can be user, group or AnonymousUser) has all the passed in permissions, granting them if necessary.
[ "Checks", "the", "the", "passed", "in", "role", "(", "can", "be", "user", "group", "or", "AnonymousUser", ")", "has", "all", "the", "passed", "in", "permissions", "granting", "them", "if", "necessary", "." ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/management/__init__.py#L40-L95
nyaruka/smartmin
smartmin/management/__init__.py
check_all_group_permissions
def check_all_group_permissions(sender, **kwargs): """ Checks that all the permissions specified in our settings.py are set for our groups. """ if not is_permissions_app(sender): return config = getattr(settings, 'GROUP_PERMISSIONS', dict()) # for each of our items for name, permissions in config.items(): # get or create the group (group, created) = Group.objects.get_or_create(name=name) if created: pass check_role_permissions(group, permissions, group.permissions.all())
python
def check_all_group_permissions(sender, **kwargs): """ Checks that all the permissions specified in our settings.py are set for our groups. """ if not is_permissions_app(sender): return config = getattr(settings, 'GROUP_PERMISSIONS', dict()) # for each of our items for name, permissions in config.items(): # get or create the group (group, created) = Group.objects.get_or_create(name=name) if created: pass check_role_permissions(group, permissions, group.permissions.all())
[ "def", "check_all_group_permissions", "(", "sender", ",", "*", "*", "kwargs", ")", ":", "if", "not", "is_permissions_app", "(", "sender", ")", ":", "return", "config", "=", "getattr", "(", "settings", ",", "'GROUP_PERMISSIONS'", ",", "dict", "(", ")", ")", "# for each of our items", "for", "name", ",", "permissions", "in", "config", ".", "items", "(", ")", ":", "# get or create the group", "(", "group", ",", "created", ")", "=", "Group", ".", "objects", ".", "get_or_create", "(", "name", "=", "name", ")", "if", "created", ":", "pass", "check_role_permissions", "(", "group", ",", "permissions", ",", "group", ".", "permissions", ".", "all", "(", ")", ")" ]
Checks that all the permissions specified in our settings.py are set for our groups.
[ "Checks", "that", "all", "the", "permissions", "specified", "in", "our", "settings", ".", "py", "are", "set", "for", "our", "groups", "." ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/management/__init__.py#L98-L114
nyaruka/smartmin
smartmin/management/__init__.py
add_permission
def add_permission(content_type, permission): """ Adds the passed in permission to that content type. Note that the permission passed in should be a single word, or verb. The proper 'codename' will be generated from that. """ # build our permission slug codename = "%s_%s" % (content_type.model, permission) # sys.stderr.write("Checking %s permission for %s\n" % (permission, content_type.name)) # does it already exist if not Permission.objects.filter(content_type=content_type, codename=codename): Permission.objects.create(content_type=content_type, codename=codename, name="Can %s %s" % (permission, content_type.name))
python
def add_permission(content_type, permission): """ Adds the passed in permission to that content type. Note that the permission passed in should be a single word, or verb. The proper 'codename' will be generated from that. """ # build our permission slug codename = "%s_%s" % (content_type.model, permission) # sys.stderr.write("Checking %s permission for %s\n" % (permission, content_type.name)) # does it already exist if not Permission.objects.filter(content_type=content_type, codename=codename): Permission.objects.create(content_type=content_type, codename=codename, name="Can %s %s" % (permission, content_type.name))
[ "def", "add_permission", "(", "content_type", ",", "permission", ")", ":", "# build our permission slug", "codename", "=", "\"%s_%s\"", "%", "(", "content_type", ".", "model", ",", "permission", ")", "# sys.stderr.write(\"Checking %s permission for %s\\n\" % (permission, content_type.name))", "# does it already exist", "if", "not", "Permission", ".", "objects", ".", "filter", "(", "content_type", "=", "content_type", ",", "codename", "=", "codename", ")", ":", "Permission", ".", "objects", ".", "create", "(", "content_type", "=", "content_type", ",", "codename", "=", "codename", ",", "name", "=", "\"Can %s %s\"", "%", "(", "permission", ",", "content_type", ".", "name", ")", ")" ]
Adds the passed in permission to that content type. Note that the permission passed in should be a single word, or verb. The proper 'codename' will be generated from that.
[ "Adds", "the", "passed", "in", "permission", "to", "that", "content", "type", ".", "Note", "that", "the", "permission", "passed", "in", "should", "be", "a", "single", "word", "or", "verb", ".", "The", "proper", "codename", "will", "be", "generated", "from", "that", "." ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/management/__init__.py#L117-L131
nyaruka/smartmin
smartmin/management/__init__.py
check_all_permissions
def check_all_permissions(sender, **kwargs): """ This syncdb checks our PERMISSIONS setting in settings.py and makes sure all those permissions actually exit. """ if not is_permissions_app(sender): return config = getattr(settings, 'PERMISSIONS', dict()) # for each of our items for natural_key, permissions in config.items(): # if the natural key '*' then that means add to all objects if natural_key == '*': # for each of our content types for content_type in ContentType.objects.all(): for permission in permissions: add_permission(content_type, permission) # otherwise, this is on a specific content type, add for each of those else: app, model = natural_key.split('.') try: content_type = ContentType.objects.get_by_natural_key(app, model) except ContentType.DoesNotExist: continue # add each permission for permission in permissions: add_permission(content_type, permission)
python
def check_all_permissions(sender, **kwargs): """ This syncdb checks our PERMISSIONS setting in settings.py and makes sure all those permissions actually exit. """ if not is_permissions_app(sender): return config = getattr(settings, 'PERMISSIONS', dict()) # for each of our items for natural_key, permissions in config.items(): # if the natural key '*' then that means add to all objects if natural_key == '*': # for each of our content types for content_type in ContentType.objects.all(): for permission in permissions: add_permission(content_type, permission) # otherwise, this is on a specific content type, add for each of those else: app, model = natural_key.split('.') try: content_type = ContentType.objects.get_by_natural_key(app, model) except ContentType.DoesNotExist: continue # add each permission for permission in permissions: add_permission(content_type, permission)
[ "def", "check_all_permissions", "(", "sender", ",", "*", "*", "kwargs", ")", ":", "if", "not", "is_permissions_app", "(", "sender", ")", ":", "return", "config", "=", "getattr", "(", "settings", ",", "'PERMISSIONS'", ",", "dict", "(", ")", ")", "# for each of our items", "for", "natural_key", ",", "permissions", "in", "config", ".", "items", "(", ")", ":", "# if the natural key '*' then that means add to all objects", "if", "natural_key", "==", "'*'", ":", "# for each of our content types", "for", "content_type", "in", "ContentType", ".", "objects", ".", "all", "(", ")", ":", "for", "permission", "in", "permissions", ":", "add_permission", "(", "content_type", ",", "permission", ")", "# otherwise, this is on a specific content type, add for each of those", "else", ":", "app", ",", "model", "=", "natural_key", ".", "split", "(", "'.'", ")", "try", ":", "content_type", "=", "ContentType", ".", "objects", ".", "get_by_natural_key", "(", "app", ",", "model", ")", "except", "ContentType", ".", "DoesNotExist", ":", "continue", "# add each permission", "for", "permission", "in", "permissions", ":", "add_permission", "(", "content_type", ",", "permission", ")" ]
This syncdb checks our PERMISSIONS setting in settings.py and makes sure all those permissions actually exit.
[ "This", "syncdb", "checks", "our", "PERMISSIONS", "setting", "in", "settings", ".", "py", "and", "makes", "sure", "all", "those", "permissions", "actually", "exit", "." ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/management/__init__.py#L135-L164
nyaruka/smartmin
smartmin/users/views.py
UserForm.save
def save(self, commit=True): """ Overloaded so we can save any new password that is included. """ is_new_user = self.instance.pk is None user = super(UserForm, self).save(commit) # new users should be made active by default if is_new_user: user.is_active = True # if we had a new password set, use it new_pass = self.cleaned_data['new_password'] if new_pass: user.set_password(new_pass) if commit: user.save() return user
python
def save(self, commit=True): """ Overloaded so we can save any new password that is included. """ is_new_user = self.instance.pk is None user = super(UserForm, self).save(commit) # new users should be made active by default if is_new_user: user.is_active = True # if we had a new password set, use it new_pass = self.cleaned_data['new_password'] if new_pass: user.set_password(new_pass) if commit: user.save() return user
[ "def", "save", "(", "self", ",", "commit", "=", "True", ")", ":", "is_new_user", "=", "self", ".", "instance", ".", "pk", "is", "None", "user", "=", "super", "(", "UserForm", ",", "self", ")", ".", "save", "(", "commit", ")", "# new users should be made active by default", "if", "is_new_user", ":", "user", ".", "is_active", "=", "True", "# if we had a new password set, use it", "new_pass", "=", "self", ".", "cleaned_data", "[", "'new_password'", "]", "if", "new_pass", ":", "user", ".", "set_password", "(", "new_pass", ")", "if", "commit", ":", "user", ".", "save", "(", ")", "return", "user" ]
Overloaded so we can save any new password that is included.
[ "Overloaded", "so", "we", "can", "save", "any", "new", "password", "that", "is", "included", "." ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/users/views.py#L39-L58
nyaruka/smartmin
smartmin/views.py
smart_url
def smart_url(url, obj=None): """ URLs that start with @ are reversed, using the passed in arguments. Otherwise a straight % substitution is applied. """ if url.find("@") >= 0: (args, value) = url.split('@') if args: val = getattr(obj, args, None) return reverse(value, args=[val]) else: return reverse(value) else: if obj is None: return url else: return url % obj.id
python
def smart_url(url, obj=None): """ URLs that start with @ are reversed, using the passed in arguments. Otherwise a straight % substitution is applied. """ if url.find("@") >= 0: (args, value) = url.split('@') if args: val = getattr(obj, args, None) return reverse(value, args=[val]) else: return reverse(value) else: if obj is None: return url else: return url % obj.id
[ "def", "smart_url", "(", "url", ",", "obj", "=", "None", ")", ":", "if", "url", ".", "find", "(", "\"@\"", ")", ">=", "0", ":", "(", "args", ",", "value", ")", "=", "url", ".", "split", "(", "'@'", ")", "if", "args", ":", "val", "=", "getattr", "(", "obj", ",", "args", ",", "None", ")", "return", "reverse", "(", "value", ",", "args", "=", "[", "val", "]", ")", "else", ":", "return", "reverse", "(", "value", ")", "else", ":", "if", "obj", "is", "None", ":", "return", "url", "else", ":", "return", "url", "%", "obj", ".", "id" ]
URLs that start with @ are reversed, using the passed in arguments. Otherwise a straight % substitution is applied.
[ "URLs", "that", "start", "with", "@", "are", "reversed", "using", "the", "passed", "in", "arguments", "." ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L31-L49
nyaruka/smartmin
smartmin/views.py
derive_single_object_url_pattern
def derive_single_object_url_pattern(slug_url_kwarg, path, action): """ Utility function called by class methods for single object views """ if slug_url_kwarg: return r'^%s/%s/(?P<%s>[^/]+)/$' % (path, action, slug_url_kwarg) else: return r'^%s/%s/(?P<pk>\d+)/$' % (path, action)
python
def derive_single_object_url_pattern(slug_url_kwarg, path, action): """ Utility function called by class methods for single object views """ if slug_url_kwarg: return r'^%s/%s/(?P<%s>[^/]+)/$' % (path, action, slug_url_kwarg) else: return r'^%s/%s/(?P<pk>\d+)/$' % (path, action)
[ "def", "derive_single_object_url_pattern", "(", "slug_url_kwarg", ",", "path", ",", "action", ")", ":", "if", "slug_url_kwarg", ":", "return", "r'^%s/%s/(?P<%s>[^/]+)/$'", "%", "(", "path", ",", "action", ",", "slug_url_kwarg", ")", "else", ":", "return", "r'^%s/%s/(?P<pk>\\d+)/$'", "%", "(", "path", ",", "action", ")" ]
Utility function called by class methods for single object views
[ "Utility", "function", "called", "by", "class", "methods", "for", "single", "object", "views" ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L390-L397
nyaruka/smartmin
smartmin/views.py
SmartView.has_permission
def has_permission(self, request, *args, **kwargs): """ Figures out if the current user has permissions for this view. """ self.kwargs = kwargs self.args = args self.request = request if not getattr(self, 'permission', None): return True else: return request.user.has_perm(self.permission)
python
def has_permission(self, request, *args, **kwargs): """ Figures out if the current user has permissions for this view. """ self.kwargs = kwargs self.args = args self.request = request if not getattr(self, 'permission', None): return True else: return request.user.has_perm(self.permission)
[ "def", "has_permission", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "kwargs", "=", "kwargs", "self", ".", "args", "=", "args", "self", ".", "request", "=", "request", "if", "not", "getattr", "(", "self", ",", "'permission'", ",", "None", ")", ":", "return", "True", "else", ":", "return", "request", ".", "user", ".", "has_perm", "(", "self", ".", "permission", ")" ]
Figures out if the current user has permissions for this view.
[ "Figures", "out", "if", "the", "current", "user", "has", "permissions", "for", "this", "view", "." ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L88-L99
nyaruka/smartmin
smartmin/views.py
SmartView.dispatch
def dispatch(self, request, *args, **kwargs): """ Overloaded to check permissions if appropriate """ def wrapper(request, *args, **kwargs): if not self.has_permission(request, *args, **kwargs): path = urlquote(request.get_full_path()) login_url = kwargs.pop('login_url', settings.LOGIN_URL) redirect_field_name = kwargs.pop('redirect_field_name', REDIRECT_FIELD_NAME) return HttpResponseRedirect("%s?%s=%s" % (login_url, redirect_field_name, path)) else: response = self.pre_process(request, *args, **kwargs) if not response: return super(SmartView, self).dispatch(request, *args, **kwargs) else: return response return wrapper(request, *args, **kwargs)
python
def dispatch(self, request, *args, **kwargs): """ Overloaded to check permissions if appropriate """ def wrapper(request, *args, **kwargs): if not self.has_permission(request, *args, **kwargs): path = urlquote(request.get_full_path()) login_url = kwargs.pop('login_url', settings.LOGIN_URL) redirect_field_name = kwargs.pop('redirect_field_name', REDIRECT_FIELD_NAME) return HttpResponseRedirect("%s?%s=%s" % (login_url, redirect_field_name, path)) else: response = self.pre_process(request, *args, **kwargs) if not response: return super(SmartView, self).dispatch(request, *args, **kwargs) else: return response return wrapper(request, *args, **kwargs)
[ "def", "dispatch", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "wrapper", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "has_permission", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "path", "=", "urlquote", "(", "request", ".", "get_full_path", "(", ")", ")", "login_url", "=", "kwargs", ".", "pop", "(", "'login_url'", ",", "settings", ".", "LOGIN_URL", ")", "redirect_field_name", "=", "kwargs", ".", "pop", "(", "'redirect_field_name'", ",", "REDIRECT_FIELD_NAME", ")", "return", "HttpResponseRedirect", "(", "\"%s?%s=%s\"", "%", "(", "login_url", ",", "redirect_field_name", ",", "path", ")", ")", "else", ":", "response", "=", "self", ".", "pre_process", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "not", "response", ":", "return", "super", "(", "SmartView", ",", "self", ")", ".", "dispatch", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "return", "response", "return", "wrapper", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Overloaded to check permissions if appropriate
[ "Overloaded", "to", "check", "permissions", "if", "appropriate" ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L101-L118
nyaruka/smartmin
smartmin/views.py
SmartView.lookup_obj_attribute
def lookup_obj_attribute(self, obj, field): """ Looks for a field's value from the passed in obj. Note that this will strip leading attributes to deal with subelements if possible """ curr_field = field.encode('ascii', 'ignore').decode("utf-8") rest = None if field.find('.') >= 0: curr_field = field.split('.')[0] rest = '.'.join(field.split('.')[1:]) # next up is the object itself obj_field = getattr(obj, curr_field, None) # if it is callable, do so if obj_field and getattr(obj_field, '__call__', None): obj_field = obj_field() if obj_field and rest: return self.lookup_obj_attribute(obj_field, rest) else: return obj_field
python
def lookup_obj_attribute(self, obj, field): """ Looks for a field's value from the passed in obj. Note that this will strip leading attributes to deal with subelements if possible """ curr_field = field.encode('ascii', 'ignore').decode("utf-8") rest = None if field.find('.') >= 0: curr_field = field.split('.')[0] rest = '.'.join(field.split('.')[1:]) # next up is the object itself obj_field = getattr(obj, curr_field, None) # if it is callable, do so if obj_field and getattr(obj_field, '__call__', None): obj_field = obj_field() if obj_field and rest: return self.lookup_obj_attribute(obj_field, rest) else: return obj_field
[ "def", "lookup_obj_attribute", "(", "self", ",", "obj", ",", "field", ")", ":", "curr_field", "=", "field", ".", "encode", "(", "'ascii'", ",", "'ignore'", ")", ".", "decode", "(", "\"utf-8\"", ")", "rest", "=", "None", "if", "field", ".", "find", "(", "'.'", ")", ">=", "0", ":", "curr_field", "=", "field", ".", "split", "(", "'.'", ")", "[", "0", "]", "rest", "=", "'.'", ".", "join", "(", "field", ".", "split", "(", "'.'", ")", "[", "1", ":", "]", ")", "# next up is the object itself", "obj_field", "=", "getattr", "(", "obj", ",", "curr_field", ",", "None", ")", "# if it is callable, do so", "if", "obj_field", "and", "getattr", "(", "obj_field", ",", "'__call__'", ",", "None", ")", ":", "obj_field", "=", "obj_field", "(", ")", "if", "obj_field", "and", "rest", ":", "return", "self", ".", "lookup_obj_attribute", "(", "obj_field", ",", "rest", ")", "else", ":", "return", "obj_field" ]
Looks for a field's value from the passed in obj. Note that this will strip leading attributes to deal with subelements if possible
[ "Looks", "for", "a", "field", "s", "value", "from", "the", "passed", "in", "obj", ".", "Note", "that", "this", "will", "strip", "leading", "attributes", "to", "deal", "with", "subelements", "if", "possible" ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L130-L152
nyaruka/smartmin
smartmin/views.py
SmartView.lookup_field_value
def lookup_field_value(self, context, obj, field): """ Looks up the field value for the passed in object and field name. Note that this method is actually called from a template, but this provides a hook for subclasses to modify behavior if they wish to do so. This may be used for example to change the display value of a variable depending on other variables within our context. """ curr_field = field.encode('ascii', 'ignore').decode("utf-8") # if this isn't a subfield, check the view to see if it has a get_ method if field.find('.') == -1: # view supercedes all, does it have a 'get_' method for this obj view_method = getattr(self, 'get_%s' % curr_field, None) if view_method: return view_method(obj) return self.lookup_obj_attribute(obj, field)
python
def lookup_field_value(self, context, obj, field): """ Looks up the field value for the passed in object and field name. Note that this method is actually called from a template, but this provides a hook for subclasses to modify behavior if they wish to do so. This may be used for example to change the display value of a variable depending on other variables within our context. """ curr_field = field.encode('ascii', 'ignore').decode("utf-8") # if this isn't a subfield, check the view to see if it has a get_ method if field.find('.') == -1: # view supercedes all, does it have a 'get_' method for this obj view_method = getattr(self, 'get_%s' % curr_field, None) if view_method: return view_method(obj) return self.lookup_obj_attribute(obj, field)
[ "def", "lookup_field_value", "(", "self", ",", "context", ",", "obj", ",", "field", ")", ":", "curr_field", "=", "field", ".", "encode", "(", "'ascii'", ",", "'ignore'", ")", ".", "decode", "(", "\"utf-8\"", ")", "# if this isn't a subfield, check the view to see if it has a get_ method", "if", "field", ".", "find", "(", "'.'", ")", "==", "-", "1", ":", "# view supercedes all, does it have a 'get_' method for this obj", "view_method", "=", "getattr", "(", "self", ",", "'get_%s'", "%", "curr_field", ",", "None", ")", "if", "view_method", ":", "return", "view_method", "(", "obj", ")", "return", "self", ".", "lookup_obj_attribute", "(", "obj", ",", "field", ")" ]
Looks up the field value for the passed in object and field name. Note that this method is actually called from a template, but this provides a hook for subclasses to modify behavior if they wish to do so. This may be used for example to change the display value of a variable depending on other variables within our context.
[ "Looks", "up", "the", "field", "value", "for", "the", "passed", "in", "object", "and", "field", "name", "." ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L154-L173
nyaruka/smartmin
smartmin/views.py
SmartView.lookup_field_label
def lookup_field_label(self, context, field, default=None): """ Figures out what the field label should be for the passed in field name. Our heuristic is as follows: 1) we check to see if our field_config has a label specified 2) if not, then we derive a field value from the field name """ # if this is a subfield, strip off everything but the last field name if field.find('.') >= 0: return self.lookup_field_label(context, field.split('.')[-1], default) label = None # is there a label specified for this field if field in self.field_config and 'label' in self.field_config[field]: label = self.field_config[field]['label'] # if we were given a default, use that elif default: label = default # check our model else: for model_field in self.model._meta.fields: if model_field.name == field: return model_field.verbose_name.title() # otherwise, derive it from our field name if label is None: label = self.derive_field_label(field) return label
python
def lookup_field_label(self, context, field, default=None): """ Figures out what the field label should be for the passed in field name. Our heuristic is as follows: 1) we check to see if our field_config has a label specified 2) if not, then we derive a field value from the field name """ # if this is a subfield, strip off everything but the last field name if field.find('.') >= 0: return self.lookup_field_label(context, field.split('.')[-1], default) label = None # is there a label specified for this field if field in self.field_config and 'label' in self.field_config[field]: label = self.field_config[field]['label'] # if we were given a default, use that elif default: label = default # check our model else: for model_field in self.model._meta.fields: if model_field.name == field: return model_field.verbose_name.title() # otherwise, derive it from our field name if label is None: label = self.derive_field_label(field) return label
[ "def", "lookup_field_label", "(", "self", ",", "context", ",", "field", ",", "default", "=", "None", ")", ":", "# if this is a subfield, strip off everything but the last field name", "if", "field", ".", "find", "(", "'.'", ")", ">=", "0", ":", "return", "self", ".", "lookup_field_label", "(", "context", ",", "field", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", ",", "default", ")", "label", "=", "None", "# is there a label specified for this field", "if", "field", "in", "self", ".", "field_config", "and", "'label'", "in", "self", ".", "field_config", "[", "field", "]", ":", "label", "=", "self", ".", "field_config", "[", "field", "]", "[", "'label'", "]", "# if we were given a default, use that", "elif", "default", ":", "label", "=", "default", "# check our model", "else", ":", "for", "model_field", "in", "self", ".", "model", ".", "_meta", ".", "fields", ":", "if", "model_field", ".", "name", "==", "field", ":", "return", "model_field", ".", "verbose_name", ".", "title", "(", ")", "# otherwise, derive it from our field name", "if", "label", "is", "None", ":", "label", "=", "self", ".", "derive_field_label", "(", "field", ")", "return", "label" ]
Figures out what the field label should be for the passed in field name. Our heuristic is as follows: 1) we check to see if our field_config has a label specified 2) if not, then we derive a field value from the field name
[ "Figures", "out", "what", "the", "field", "label", "should", "be", "for", "the", "passed", "in", "field", "name", "." ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L175-L207
nyaruka/smartmin
smartmin/views.py
SmartView.lookup_field_help
def lookup_field_help(self, field, default=None): """ Looks up the help text for the passed in field. """ help = None # is there a label specified for this field if field in self.field_config and 'help' in self.field_config[field]: help = self.field_config[field]['help'] # if we were given a default, use that elif default: help = default # try to see if there is a description on our model elif hasattr(self, 'model'): for model_field in self.model._meta.fields: if model_field.name == field: help = model_field.help_text break return help
python
def lookup_field_help(self, field, default=None): """ Looks up the help text for the passed in field. """ help = None # is there a label specified for this field if field in self.field_config and 'help' in self.field_config[field]: help = self.field_config[field]['help'] # if we were given a default, use that elif default: help = default # try to see if there is a description on our model elif hasattr(self, 'model'): for model_field in self.model._meta.fields: if model_field.name == field: help = model_field.help_text break return help
[ "def", "lookup_field_help", "(", "self", ",", "field", ",", "default", "=", "None", ")", ":", "help", "=", "None", "# is there a label specified for this field", "if", "field", "in", "self", ".", "field_config", "and", "'help'", "in", "self", ".", "field_config", "[", "field", "]", ":", "help", "=", "self", ".", "field_config", "[", "field", "]", "[", "'help'", "]", "# if we were given a default, use that", "elif", "default", ":", "help", "=", "default", "# try to see if there is a description on our model", "elif", "hasattr", "(", "self", ",", "'model'", ")", ":", "for", "model_field", "in", "self", ".", "model", ".", "_meta", ".", "fields", ":", "if", "model_field", ".", "name", "==", "field", ":", "help", "=", "model_field", ".", "help_text", "break", "return", "help" ]
Looks up the help text for the passed in field.
[ "Looks", "up", "the", "help", "text", "for", "the", "passed", "in", "field", "." ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L209-L230
nyaruka/smartmin
smartmin/views.py
SmartView.lookup_field_class
def lookup_field_class(self, field, obj=None, default=None): """ Looks up any additional class we should include when rendering this field """ css = "" # is there a class specified for this field if field in self.field_config and 'class' in self.field_config[field]: css = self.field_config[field]['class'] # if we were given a default, use that elif default: css = default return css
python
def lookup_field_class(self, field, obj=None, default=None): """ Looks up any additional class we should include when rendering this field """ css = "" # is there a class specified for this field if field in self.field_config and 'class' in self.field_config[field]: css = self.field_config[field]['class'] # if we were given a default, use that elif default: css = default return css
[ "def", "lookup_field_class", "(", "self", ",", "field", ",", "obj", "=", "None", ",", "default", "=", "None", ")", ":", "css", "=", "\"\"", "# is there a class specified for this field", "if", "field", "in", "self", ".", "field_config", "and", "'class'", "in", "self", ".", "field_config", "[", "field", "]", ":", "css", "=", "self", ".", "field_config", "[", "field", "]", "[", "'class'", "]", "# if we were given a default, use that", "elif", "default", ":", "css", "=", "default", "return", "css" ]
Looks up any additional class we should include when rendering this field
[ "Looks", "up", "any", "additional", "class", "we", "should", "include", "when", "rendering", "this", "field" ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L232-L246
nyaruka/smartmin
smartmin/views.py
SmartView.get_template_names
def get_template_names(self): """ Returns the name of the template to use to render this request. Smartmin provides default templates as fallbacks, so appends it's own templates names to the end of whatever list is built by the generic views. Subclasses can override this by setting a 'template_name' variable on the class. """ templates = [] if getattr(self, 'template_name', None): templates.append(self.template_name) if getattr(self, 'default_template', None): templates.append(self.default_template) else: templates = super(SmartView, self).get_template_names() return templates
python
def get_template_names(self): """ Returns the name of the template to use to render this request. Smartmin provides default templates as fallbacks, so appends it's own templates names to the end of whatever list is built by the generic views. Subclasses can override this by setting a 'template_name' variable on the class. """ templates = [] if getattr(self, 'template_name', None): templates.append(self.template_name) if getattr(self, 'default_template', None): templates.append(self.default_template) else: templates = super(SmartView, self).get_template_names() return templates
[ "def", "get_template_names", "(", "self", ")", ":", "templates", "=", "[", "]", "if", "getattr", "(", "self", ",", "'template_name'", ",", "None", ")", ":", "templates", ".", "append", "(", "self", ".", "template_name", ")", "if", "getattr", "(", "self", ",", "'default_template'", ",", "None", ")", ":", "templates", ".", "append", "(", "self", ".", "default_template", ")", "else", ":", "templates", "=", "super", "(", "SmartView", ",", "self", ")", ".", "get_template_names", "(", ")", "return", "templates" ]
Returns the name of the template to use to render this request. Smartmin provides default templates as fallbacks, so appends it's own templates names to the end of whatever list is built by the generic views. Subclasses can override this by setting a 'template_name' variable on the class.
[ "Returns", "the", "name", "of", "the", "template", "to", "use", "to", "render", "this", "request", "." ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L263-L281
nyaruka/smartmin
smartmin/views.py
SmartView.derive_fields
def derive_fields(self): """ Default implementation """ fields = [] if self.fields: fields.append(self.fields) return fields
python
def derive_fields(self): """ Default implementation """ fields = [] if self.fields: fields.append(self.fields) return fields
[ "def", "derive_fields", "(", "self", ")", ":", "fields", "=", "[", "]", "if", "self", ".", "fields", ":", "fields", ".", "append", "(", "self", ".", "fields", ")", "return", "fields" ]
Default implementation
[ "Default", "implementation" ]
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L283-L291