repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
dwkim78/upsilon
upsilon/extract_features/extract_features.py
ExtractFeatures.half_mag_amplitude_ratio
def half_mag_amplitude_ratio(self, mag, avg, weight): """ Return ratio of amplitude of higher and lower magnitudes. A ratio of amplitude of higher and lower magnitudes than average, considering weights. This ratio, by definition, should be higher for EB than for others. Parameters ---------- mag : array_like An array of magnitudes. avg : float An average value of magnitudes. weight : array_like An array of weight. Returns ------- hl_ratio : float Ratio of amplitude of higher and lower magnitudes than average. """ # For lower (fainter) magnitude than average. index = np.where(mag > avg) lower_weight = weight[index] lower_weight_sum = np.sum(lower_weight) lower_mag = mag[index] lower_weighted_std = np.sum((lower_mag - avg) ** 2 * lower_weight) / \ lower_weight_sum # For higher (brighter) magnitude than average. index = np.where(mag <= avg) higher_weight = weight[index] higher_weight_sum = np.sum(higher_weight) higher_mag = mag[index] higher_weighted_std = np.sum((higher_mag - avg) ** 2 * higher_weight) / \ higher_weight_sum # Return ratio. return np.sqrt(lower_weighted_std / higher_weighted_std)
python
def half_mag_amplitude_ratio(self, mag, avg, weight): """ Return ratio of amplitude of higher and lower magnitudes. A ratio of amplitude of higher and lower magnitudes than average, considering weights. This ratio, by definition, should be higher for EB than for others. Parameters ---------- mag : array_like An array of magnitudes. avg : float An average value of magnitudes. weight : array_like An array of weight. Returns ------- hl_ratio : float Ratio of amplitude of higher and lower magnitudes than average. """ # For lower (fainter) magnitude than average. index = np.where(mag > avg) lower_weight = weight[index] lower_weight_sum = np.sum(lower_weight) lower_mag = mag[index] lower_weighted_std = np.sum((lower_mag - avg) ** 2 * lower_weight) / \ lower_weight_sum # For higher (brighter) magnitude than average. index = np.where(mag <= avg) higher_weight = weight[index] higher_weight_sum = np.sum(higher_weight) higher_mag = mag[index] higher_weighted_std = np.sum((higher_mag - avg) ** 2 * higher_weight) / \ higher_weight_sum # Return ratio. return np.sqrt(lower_weighted_std / higher_weighted_std)
[ "def", "half_mag_amplitude_ratio", "(", "self", ",", "mag", ",", "avg", ",", "weight", ")", ":", "# For lower (fainter) magnitude than average.", "index", "=", "np", ".", "where", "(", "mag", ">", "avg", ")", "lower_weight", "=", "weight", "[", "index", "]", "lower_weight_sum", "=", "np", ".", "sum", "(", "lower_weight", ")", "lower_mag", "=", "mag", "[", "index", "]", "lower_weighted_std", "=", "np", ".", "sum", "(", "(", "lower_mag", "-", "avg", ")", "**", "2", "*", "lower_weight", ")", "/", "lower_weight_sum", "# For higher (brighter) magnitude than average.", "index", "=", "np", ".", "where", "(", "mag", "<=", "avg", ")", "higher_weight", "=", "weight", "[", "index", "]", "higher_weight_sum", "=", "np", ".", "sum", "(", "higher_weight", ")", "higher_mag", "=", "mag", "[", "index", "]", "higher_weighted_std", "=", "np", ".", "sum", "(", "(", "higher_mag", "-", "avg", ")", "**", "2", "*", "higher_weight", ")", "/", "higher_weight_sum", "# Return ratio.", "return", "np", ".", "sqrt", "(", "lower_weighted_std", "/", "higher_weighted_std", ")" ]
Return ratio of amplitude of higher and lower magnitudes. A ratio of amplitude of higher and lower magnitudes than average, considering weights. This ratio, by definition, should be higher for EB than for others. Parameters ---------- mag : array_like An array of magnitudes. avg : float An average value of magnitudes. weight : array_like An array of weight. Returns ------- hl_ratio : float Ratio of amplitude of higher and lower magnitudes than average.
[ "Return", "ratio", "of", "amplitude", "of", "higher", "and", "lower", "magnitudes", "." ]
train
https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/extract_features.py#L406-L449
dwkim78/upsilon
upsilon/extract_features/extract_features.py
ExtractFeatures.half_mag_amplitude_ratio2
def half_mag_amplitude_ratio2(self, mag, avg): """ Return ratio of amplitude of higher and lower magnitudes. A ratio of amplitude of higher and lower magnitudes than average, considering weights. This ratio, by definition, should be higher for EB than for others. Parameters ---------- mag : array_like An array of magnitudes. avg : float An average value of magnitudes. Returns ------- hl_ratio : float Ratio of amplitude of higher and lower magnitudes than average. """ # For lower (fainter) magnitude than average. index = np.where(mag > avg) fainter_mag = mag[index] lower_sum = np.sum((fainter_mag - avg) ** 2) / len(fainter_mag) # For higher (brighter) magnitude than average. index = np.where(mag <= avg) brighter_mag = mag[index] higher_sum = np.sum((avg - brighter_mag) ** 2) / len(brighter_mag) # Return ratio. return np.sqrt(lower_sum / higher_sum)
python
def half_mag_amplitude_ratio2(self, mag, avg): """ Return ratio of amplitude of higher and lower magnitudes. A ratio of amplitude of higher and lower magnitudes than average, considering weights. This ratio, by definition, should be higher for EB than for others. Parameters ---------- mag : array_like An array of magnitudes. avg : float An average value of magnitudes. Returns ------- hl_ratio : float Ratio of amplitude of higher and lower magnitudes than average. """ # For lower (fainter) magnitude than average. index = np.where(mag > avg) fainter_mag = mag[index] lower_sum = np.sum((fainter_mag - avg) ** 2) / len(fainter_mag) # For higher (brighter) magnitude than average. index = np.where(mag <= avg) brighter_mag = mag[index] higher_sum = np.sum((avg - brighter_mag) ** 2) / len(brighter_mag) # Return ratio. return np.sqrt(lower_sum / higher_sum)
[ "def", "half_mag_amplitude_ratio2", "(", "self", ",", "mag", ",", "avg", ")", ":", "# For lower (fainter) magnitude than average.", "index", "=", "np", ".", "where", "(", "mag", ">", "avg", ")", "fainter_mag", "=", "mag", "[", "index", "]", "lower_sum", "=", "np", ".", "sum", "(", "(", "fainter_mag", "-", "avg", ")", "**", "2", ")", "/", "len", "(", "fainter_mag", ")", "# For higher (brighter) magnitude than average.", "index", "=", "np", ".", "where", "(", "mag", "<=", "avg", ")", "brighter_mag", "=", "mag", "[", "index", "]", "higher_sum", "=", "np", ".", "sum", "(", "(", "avg", "-", "brighter_mag", ")", "**", "2", ")", "/", "len", "(", "brighter_mag", ")", "# Return ratio.", "return", "np", ".", "sqrt", "(", "lower_sum", "/", "higher_sum", ")" ]
Return ratio of amplitude of higher and lower magnitudes. A ratio of amplitude of higher and lower magnitudes than average, considering weights. This ratio, by definition, should be higher for EB than for others. Parameters ---------- mag : array_like An array of magnitudes. avg : float An average value of magnitudes. Returns ------- hl_ratio : float Ratio of amplitude of higher and lower magnitudes than average.
[ "Return", "ratio", "of", "amplitude", "of", "higher", "and", "lower", "magnitudes", "." ]
train
https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/extract_features.py#L451-L486
dwkim78/upsilon
upsilon/extract_features/extract_features.py
ExtractFeatures.get_eta
def get_eta(self, mag, std): """ Return Eta feature. Parameters ---------- mag : array_like An array of magnitudes. std : array_like A standard deviation of magnitudes. Returns ------- eta : float The value of Eta index. """ diff = mag[1:] - mag[:len(mag) - 1] eta = np.sum(diff * diff) / (len(mag) - 1.) / std / std return eta
python
def get_eta(self, mag, std): """ Return Eta feature. Parameters ---------- mag : array_like An array of magnitudes. std : array_like A standard deviation of magnitudes. Returns ------- eta : float The value of Eta index. """ diff = mag[1:] - mag[:len(mag) - 1] eta = np.sum(diff * diff) / (len(mag) - 1.) / std / std return eta
[ "def", "get_eta", "(", "self", ",", "mag", ",", "std", ")", ":", "diff", "=", "mag", "[", "1", ":", "]", "-", "mag", "[", ":", "len", "(", "mag", ")", "-", "1", "]", "eta", "=", "np", ".", "sum", "(", "diff", "*", "diff", ")", "/", "(", "len", "(", "mag", ")", "-", "1.", ")", "/", "std", "/", "std", "return", "eta" ]
Return Eta feature. Parameters ---------- mag : array_like An array of magnitudes. std : array_like A standard deviation of magnitudes. Returns ------- eta : float The value of Eta index.
[ "Return", "Eta", "feature", "." ]
train
https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/extract_features.py#L488-L508
dwkim78/upsilon
upsilon/extract_features/extract_features.py
ExtractFeatures.slope_percentile
def slope_percentile(self, date, mag): """ Return 10% and 90% percentile of slope. Parameters ---------- date : array_like An array of phase-folded date. Sorted. mag : array_like An array of phase-folded magnitudes. Sorted by date. Returns ------- per_10 : float 10% percentile values of slope. per_90 : float 90% percentile values of slope. """ date_diff = date[1:] - date[:len(date) - 1] mag_diff = mag[1:] - mag[:len(mag) - 1] # Remove zero mag_diff. index = np.where(mag_diff != 0.) date_diff = date_diff[index] mag_diff = mag_diff[index] # Derive slope. slope = date_diff / mag_diff percentile_10 = np.percentile(slope, 10.) percentile_90 = np.percentile(slope, 90.) return percentile_10, percentile_90
python
def slope_percentile(self, date, mag): """ Return 10% and 90% percentile of slope. Parameters ---------- date : array_like An array of phase-folded date. Sorted. mag : array_like An array of phase-folded magnitudes. Sorted by date. Returns ------- per_10 : float 10% percentile values of slope. per_90 : float 90% percentile values of slope. """ date_diff = date[1:] - date[:len(date) - 1] mag_diff = mag[1:] - mag[:len(mag) - 1] # Remove zero mag_diff. index = np.where(mag_diff != 0.) date_diff = date_diff[index] mag_diff = mag_diff[index] # Derive slope. slope = date_diff / mag_diff percentile_10 = np.percentile(slope, 10.) percentile_90 = np.percentile(slope, 90.) return percentile_10, percentile_90
[ "def", "slope_percentile", "(", "self", ",", "date", ",", "mag", ")", ":", "date_diff", "=", "date", "[", "1", ":", "]", "-", "date", "[", ":", "len", "(", "date", ")", "-", "1", "]", "mag_diff", "=", "mag", "[", "1", ":", "]", "-", "mag", "[", ":", "len", "(", "mag", ")", "-", "1", "]", "# Remove zero mag_diff.", "index", "=", "np", ".", "where", "(", "mag_diff", "!=", "0.", ")", "date_diff", "=", "date_diff", "[", "index", "]", "mag_diff", "=", "mag_diff", "[", "index", "]", "# Derive slope.", "slope", "=", "date_diff", "/", "mag_diff", "percentile_10", "=", "np", ".", "percentile", "(", "slope", ",", "10.", ")", "percentile_90", "=", "np", ".", "percentile", "(", "slope", ",", "90.", ")", "return", "percentile_10", ",", "percentile_90" ]
Return 10% and 90% percentile of slope. Parameters ---------- date : array_like An array of phase-folded date. Sorted. mag : array_like An array of phase-folded magnitudes. Sorted by date. Returns ------- per_10 : float 10% percentile values of slope. per_90 : float 90% percentile values of slope.
[ "Return", "10%", "and", "90%", "percentile", "of", "slope", "." ]
train
https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/extract_features.py#L510-L543
dwkim78/upsilon
upsilon/extract_features/extract_features.py
ExtractFeatures.get_cusum
def get_cusum(self, mag): """ Return max - min of cumulative sum. Parameters ---------- mag : array_like An array of magnitudes. Returns ------- mm_cusum : float Max - min of cumulative sum. """ c = np.cumsum(mag - self.weighted_mean) / len(mag) / self.weighted_std return np.max(c) - np.min(c)
python
def get_cusum(self, mag): """ Return max - min of cumulative sum. Parameters ---------- mag : array_like An array of magnitudes. Returns ------- mm_cusum : float Max - min of cumulative sum. """ c = np.cumsum(mag - self.weighted_mean) / len(mag) / self.weighted_std return np.max(c) - np.min(c)
[ "def", "get_cusum", "(", "self", ",", "mag", ")", ":", "c", "=", "np", ".", "cumsum", "(", "mag", "-", "self", ".", "weighted_mean", ")", "/", "len", "(", "mag", ")", "/", "self", ".", "weighted_std", "return", "np", ".", "max", "(", "c", ")", "-", "np", ".", "min", "(", "c", ")" ]
Return max - min of cumulative sum. Parameters ---------- mag : array_like An array of magnitudes. Returns ------- mm_cusum : float Max - min of cumulative sum.
[ "Return", "max", "-", "min", "of", "cumulative", "sum", "." ]
train
https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/extract_features.py#L545-L562
dwkim78/upsilon
upsilon/extract_features/extract_features.py
ExtractFeatures.get_features2
def get_features2(self): """ Return all features with its names. Returns ------- names : list Feature names. values : list Feature values """ feature_names = [] feature_values = [] # Get all the names of features. all_vars = vars(self) for name in all_vars.keys(): # Omit input variables such as date, mag, err, etc. if not (name == 'date' or name == 'mag' or name == 'err' or name == 'n_threads' or name == 'min_period'): # Filter some other unnecessary features. if not (name == 'f' or name == 'f_phase' or name == 'period_log10FAP' or name == 'weight' or name == 'weighted_sum' or name == 'median' or name == 'mean' or name == 'std'): feature_names.append(name) # Sort by the names. # Sorting should be done to keep maintaining the same order of features. feature_names.sort() # Get feature values. for name in feature_names: feature_values.append(all_vars[name]) return feature_names, feature_values
python
def get_features2(self): """ Return all features with its names. Returns ------- names : list Feature names. values : list Feature values """ feature_names = [] feature_values = [] # Get all the names of features. all_vars = vars(self) for name in all_vars.keys(): # Omit input variables such as date, mag, err, etc. if not (name == 'date' or name == 'mag' or name == 'err' or name == 'n_threads' or name == 'min_period'): # Filter some other unnecessary features. if not (name == 'f' or name == 'f_phase' or name == 'period_log10FAP' or name == 'weight' or name == 'weighted_sum' or name == 'median' or name == 'mean' or name == 'std'): feature_names.append(name) # Sort by the names. # Sorting should be done to keep maintaining the same order of features. feature_names.sort() # Get feature values. for name in feature_names: feature_values.append(all_vars[name]) return feature_names, feature_values
[ "def", "get_features2", "(", "self", ")", ":", "feature_names", "=", "[", "]", "feature_values", "=", "[", "]", "# Get all the names of features.", "all_vars", "=", "vars", "(", "self", ")", "for", "name", "in", "all_vars", ".", "keys", "(", ")", ":", "# Omit input variables such as date, mag, err, etc.", "if", "not", "(", "name", "==", "'date'", "or", "name", "==", "'mag'", "or", "name", "==", "'err'", "or", "name", "==", "'n_threads'", "or", "name", "==", "'min_period'", ")", ":", "# Filter some other unnecessary features.", "if", "not", "(", "name", "==", "'f'", "or", "name", "==", "'f_phase'", "or", "name", "==", "'period_log10FAP'", "or", "name", "==", "'weight'", "or", "name", "==", "'weighted_sum'", "or", "name", "==", "'median'", "or", "name", "==", "'mean'", "or", "name", "==", "'std'", ")", ":", "feature_names", ".", "append", "(", "name", ")", "# Sort by the names.", "# Sorting should be done to keep maintaining the same order of features.", "feature_names", ".", "sort", "(", ")", "# Get feature values.", "for", "name", "in", "feature_names", ":", "feature_values", ".", "append", "(", "all_vars", "[", "name", "]", ")", "return", "feature_names", ",", "feature_values" ]
Return all features with its names. Returns ------- names : list Feature names. values : list Feature values
[ "Return", "all", "features", "with", "its", "names", "." ]
train
https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/extract_features.py#L564-L600
dwkim78/upsilon
upsilon/extract_features/extract_features.py
ExtractFeatures.get_features_all
def get_features_all(self): """ Return all features with its names. Regardless of being used for train and prediction. Sorted by the names. Returns ------- all_features : OrderedDict Features dictionary. """ features = {} # Get all the names of features. all_vars = vars(self) for name in all_vars.keys(): if name in feature_names_list_all: features[name] = all_vars[name] # Sort by the keys (i.e. feature names). features = OrderedDict(sorted(features.items(), key=lambda t: t[0])) return features
python
def get_features_all(self): """ Return all features with its names. Regardless of being used for train and prediction. Sorted by the names. Returns ------- all_features : OrderedDict Features dictionary. """ features = {} # Get all the names of features. all_vars = vars(self) for name in all_vars.keys(): if name in feature_names_list_all: features[name] = all_vars[name] # Sort by the keys (i.e. feature names). features = OrderedDict(sorted(features.items(), key=lambda t: t[0])) return features
[ "def", "get_features_all", "(", "self", ")", ":", "features", "=", "{", "}", "# Get all the names of features.", "all_vars", "=", "vars", "(", "self", ")", "for", "name", "in", "all_vars", ".", "keys", "(", ")", ":", "if", "name", "in", "feature_names_list_all", ":", "features", "[", "name", "]", "=", "all_vars", "[", "name", "]", "# Sort by the keys (i.e. feature names).", "features", "=", "OrderedDict", "(", "sorted", "(", "features", ".", "items", "(", ")", ",", "key", "=", "lambda", "t", ":", "t", "[", "0", "]", ")", ")", "return", "features" ]
Return all features with its names. Regardless of being used for train and prediction. Sorted by the names. Returns ------- all_features : OrderedDict Features dictionary.
[ "Return", "all", "features", "with", "its", "names", "." ]
train
https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/extract_features.py#L631-L654
hannes-brt/hebel
hebel/__init__.py
init
def init(device_id=None, random_seed=None): """Initialize Hebel. This function creates a CUDA context, CUBLAS context and initializes and seeds the pseudo-random number generator. **Parameters:** device_id : integer, optional The ID of the GPU device to use. If this is omitted, PyCUDA's default context is used, which by default uses the fastest available device on the system. Alternatively, you can put the device id in the environment variable ``CUDA_DEVICE`` or into the file ``.cuda-device`` in the user's home directory. random_seed : integer, optional The seed to use for the pseudo-random number generator. If this is omitted, the seed is taken from the environment variable ``RANDOM_SEED`` and if that is not defined, a random integer is used as a seed. """ if device_id is None: random_seed = _os.environ.get('CUDA_DEVICE') if random_seed is None: random_seed = _os.environ.get('RANDOM_SEED') global is_initialized if not is_initialized: is_initialized = True global context context.init_context(device_id) from pycuda import gpuarray, driver, curandom # Initialize memory pool global memory_pool memory_pool.init() # Initialize PRG global sampler sampler.set_seed(random_seed) # Initialize pycuda_ops from hebel import pycuda_ops pycuda_ops.init()
python
def init(device_id=None, random_seed=None): """Initialize Hebel. This function creates a CUDA context, CUBLAS context and initializes and seeds the pseudo-random number generator. **Parameters:** device_id : integer, optional The ID of the GPU device to use. If this is omitted, PyCUDA's default context is used, which by default uses the fastest available device on the system. Alternatively, you can put the device id in the environment variable ``CUDA_DEVICE`` or into the file ``.cuda-device`` in the user's home directory. random_seed : integer, optional The seed to use for the pseudo-random number generator. If this is omitted, the seed is taken from the environment variable ``RANDOM_SEED`` and if that is not defined, a random integer is used as a seed. """ if device_id is None: random_seed = _os.environ.get('CUDA_DEVICE') if random_seed is None: random_seed = _os.environ.get('RANDOM_SEED') global is_initialized if not is_initialized: is_initialized = True global context context.init_context(device_id) from pycuda import gpuarray, driver, curandom # Initialize memory pool global memory_pool memory_pool.init() # Initialize PRG global sampler sampler.set_seed(random_seed) # Initialize pycuda_ops from hebel import pycuda_ops pycuda_ops.init()
[ "def", "init", "(", "device_id", "=", "None", ",", "random_seed", "=", "None", ")", ":", "if", "device_id", "is", "None", ":", "random_seed", "=", "_os", ".", "environ", ".", "get", "(", "'CUDA_DEVICE'", ")", "if", "random_seed", "is", "None", ":", "random_seed", "=", "_os", ".", "environ", ".", "get", "(", "'RANDOM_SEED'", ")", "global", "is_initialized", "if", "not", "is_initialized", ":", "is_initialized", "=", "True", "global", "context", "context", ".", "init_context", "(", "device_id", ")", "from", "pycuda", "import", "gpuarray", ",", "driver", ",", "curandom", "# Initialize memory pool", "global", "memory_pool", "memory_pool", ".", "init", "(", ")", "# Initialize PRG", "global", "sampler", "sampler", ".", "set_seed", "(", "random_seed", ")", "# Initialize pycuda_ops", "from", "hebel", "import", "pycuda_ops", "pycuda_ops", ".", "init", "(", ")" ]
Initialize Hebel. This function creates a CUDA context, CUBLAS context and initializes and seeds the pseudo-random number generator. **Parameters:** device_id : integer, optional The ID of the GPU device to use. If this is omitted, PyCUDA's default context is used, which by default uses the fastest available device on the system. Alternatively, you can put the device id in the environment variable ``CUDA_DEVICE`` or into the file ``.cuda-device`` in the user's home directory. random_seed : integer, optional The seed to use for the pseudo-random number generator. If this is omitted, the seed is taken from the environment variable ``RANDOM_SEED`` and if that is not defined, a random integer is used as a seed.
[ "Initialize", "Hebel", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/__init__.py#L96-L143
rix0rrr/gcl
gcl/ast_util.py
inflate_context_tuple
def inflate_context_tuple(ast_rootpath, root_env): """Instantiate a Tuple from a TupleNode. Walking the AST tree upwards, evaluate from the root down again. """ with util.LogTime('inflate_context_tuple'): # We only need to look at tuple members going down. inflated = ast_rootpath[0].eval(root_env) current = inflated env = root_env try: for node in ast_rootpath[1:]: if is_tuple_member_node(node): assert framework.is_tuple(current) with util.LogTime('into tuple'): thunk, env = inflated.get_thunk_env(node.name) current = framework.eval(thunk, env) elif framework.is_list(current): with util.LogTime('eval thing'): current = framework.eval(node, env) if framework.is_tuple(current): inflated = current except (gcl.EvaluationError, ast.UnparseableAccess): # Eat evaluation error, probably means the rightmost tuplemember wasn't complete. # Return what we have so far. pass return inflated
python
def inflate_context_tuple(ast_rootpath, root_env): """Instantiate a Tuple from a TupleNode. Walking the AST tree upwards, evaluate from the root down again. """ with util.LogTime('inflate_context_tuple'): # We only need to look at tuple members going down. inflated = ast_rootpath[0].eval(root_env) current = inflated env = root_env try: for node in ast_rootpath[1:]: if is_tuple_member_node(node): assert framework.is_tuple(current) with util.LogTime('into tuple'): thunk, env = inflated.get_thunk_env(node.name) current = framework.eval(thunk, env) elif framework.is_list(current): with util.LogTime('eval thing'): current = framework.eval(node, env) if framework.is_tuple(current): inflated = current except (gcl.EvaluationError, ast.UnparseableAccess): # Eat evaluation error, probably means the rightmost tuplemember wasn't complete. # Return what we have so far. pass return inflated
[ "def", "inflate_context_tuple", "(", "ast_rootpath", ",", "root_env", ")", ":", "with", "util", ".", "LogTime", "(", "'inflate_context_tuple'", ")", ":", "# We only need to look at tuple members going down.", "inflated", "=", "ast_rootpath", "[", "0", "]", ".", "eval", "(", "root_env", ")", "current", "=", "inflated", "env", "=", "root_env", "try", ":", "for", "node", "in", "ast_rootpath", "[", "1", ":", "]", ":", "if", "is_tuple_member_node", "(", "node", ")", ":", "assert", "framework", ".", "is_tuple", "(", "current", ")", "with", "util", ".", "LogTime", "(", "'into tuple'", ")", ":", "thunk", ",", "env", "=", "inflated", ".", "get_thunk_env", "(", "node", ".", "name", ")", "current", "=", "framework", ".", "eval", "(", "thunk", ",", "env", ")", "elif", "framework", ".", "is_list", "(", "current", ")", ":", "with", "util", ".", "LogTime", "(", "'eval thing'", ")", ":", "current", "=", "framework", ".", "eval", "(", "node", ",", "env", ")", "if", "framework", ".", "is_tuple", "(", "current", ")", ":", "inflated", "=", "current", "except", "(", "gcl", ".", "EvaluationError", ",", "ast", ".", "UnparseableAccess", ")", ":", "# Eat evaluation error, probably means the rightmost tuplemember wasn't complete.", "# Return what we have so far.", "pass", "return", "inflated" ]
Instantiate a Tuple from a TupleNode. Walking the AST tree upwards, evaluate from the root down again.
[ "Instantiate", "a", "Tuple", "from", "a", "TupleNode", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast_util.py#L24-L52
rix0rrr/gcl
gcl/ast_util.py
enumerate_scope
def enumerate_scope(ast_rootpath, root_env=None, include_default_builtins=False): """Return a dict of { name => Completions } for the given tuple node. Enumerates all keys that are in scope in a given tuple. The node part of the tuple may be None, in case the binding is a built-in. """ with util.LogTime('enumerate_scope'): scope = {} for node in reversed(ast_rootpath): if is_tuple_node(node): for member in node.members: if member.name not in scope: scope[member.name] = Completion(member.name, False, member.comment.as_string(), member.location) if include_default_builtins: # Backwards compat flag root_env = gcl.default_env if root_env: for k in root_env.keys(): if k not in scope and not hide_from_autocomplete(root_env[k]): v = root_env[k] scope[k] = Completion(k, True, dedent(v.__doc__ or ''), None) return scope
python
def enumerate_scope(ast_rootpath, root_env=None, include_default_builtins=False): """Return a dict of { name => Completions } for the given tuple node. Enumerates all keys that are in scope in a given tuple. The node part of the tuple may be None, in case the binding is a built-in. """ with util.LogTime('enumerate_scope'): scope = {} for node in reversed(ast_rootpath): if is_tuple_node(node): for member in node.members: if member.name not in scope: scope[member.name] = Completion(member.name, False, member.comment.as_string(), member.location) if include_default_builtins: # Backwards compat flag root_env = gcl.default_env if root_env: for k in root_env.keys(): if k not in scope and not hide_from_autocomplete(root_env[k]): v = root_env[k] scope[k] = Completion(k, True, dedent(v.__doc__ or ''), None) return scope
[ "def", "enumerate_scope", "(", "ast_rootpath", ",", "root_env", "=", "None", ",", "include_default_builtins", "=", "False", ")", ":", "with", "util", ".", "LogTime", "(", "'enumerate_scope'", ")", ":", "scope", "=", "{", "}", "for", "node", "in", "reversed", "(", "ast_rootpath", ")", ":", "if", "is_tuple_node", "(", "node", ")", ":", "for", "member", "in", "node", ".", "members", ":", "if", "member", ".", "name", "not", "in", "scope", ":", "scope", "[", "member", ".", "name", "]", "=", "Completion", "(", "member", ".", "name", ",", "False", ",", "member", ".", "comment", ".", "as_string", "(", ")", ",", "member", ".", "location", ")", "if", "include_default_builtins", ":", "# Backwards compat flag", "root_env", "=", "gcl", ".", "default_env", "if", "root_env", ":", "for", "k", "in", "root_env", ".", "keys", "(", ")", ":", "if", "k", "not", "in", "scope", "and", "not", "hide_from_autocomplete", "(", "root_env", "[", "k", "]", ")", ":", "v", "=", "root_env", "[", "k", "]", "scope", "[", "k", "]", "=", "Completion", "(", "k", ",", "True", ",", "dedent", "(", "v", ".", "__doc__", "or", "''", ")", ",", "None", ")", "return", "scope" ]
Return a dict of { name => Completions } for the given tuple node. Enumerates all keys that are in scope in a given tuple. The node part of the tuple may be None, in case the binding is a built-in.
[ "Return", "a", "dict", "of", "{", "name", "=", ">", "Completions", "}", "for", "the", "given", "tuple", "node", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast_util.py#L75-L98
rix0rrr/gcl
gcl/ast_util.py
find_deref_completions
def find_deref_completions(ast_rootpath, root_env=gcl.default_env): """Returns a dict of { name => Completions }.""" with util.LogTime('find_deref_completions'): tup = inflate_context_tuple(ast_rootpath, root_env) path = path_until(ast_rootpath, is_deref_node) if not path: return {} deref = path[-1] haystack = deref.haystack(tup.env(tup)) if not hasattr(haystack, 'keys'): return {} return {n: get_completion(haystack, n) for n in haystack.keys()}
python
def find_deref_completions(ast_rootpath, root_env=gcl.default_env): """Returns a dict of { name => Completions }.""" with util.LogTime('find_deref_completions'): tup = inflate_context_tuple(ast_rootpath, root_env) path = path_until(ast_rootpath, is_deref_node) if not path: return {} deref = path[-1] haystack = deref.haystack(tup.env(tup)) if not hasattr(haystack, 'keys'): return {} return {n: get_completion(haystack, n) for n in haystack.keys()}
[ "def", "find_deref_completions", "(", "ast_rootpath", ",", "root_env", "=", "gcl", ".", "default_env", ")", ":", "with", "util", ".", "LogTime", "(", "'find_deref_completions'", ")", ":", "tup", "=", "inflate_context_tuple", "(", "ast_rootpath", ",", "root_env", ")", "path", "=", "path_until", "(", "ast_rootpath", ",", "is_deref_node", ")", "if", "not", "path", ":", "return", "{", "}", "deref", "=", "path", "[", "-", "1", "]", "haystack", "=", "deref", ".", "haystack", "(", "tup", ".", "env", "(", "tup", ")", ")", "if", "not", "hasattr", "(", "haystack", ",", "'keys'", ")", ":", "return", "{", "}", "return", "{", "n", ":", "get_completion", "(", "haystack", ",", "n", ")", "for", "n", "in", "haystack", ".", "keys", "(", ")", "}" ]
Returns a dict of { name => Completions }.
[ "Returns", "a", "dict", "of", "{", "name", "=", ">", "Completions", "}", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast_util.py#L122-L133
rix0rrr/gcl
gcl/ast_util.py
is_identifier_position
def is_identifier_position(rootpath): """Return whether the cursor is in identifier-position in a member declaration.""" if len(rootpath) >= 2 and is_tuple_member_node(rootpath[-2]) and is_identifier(rootpath[-1]): return True if len(rootpath) >= 1 and is_tuple_node(rootpath[-1]): # No deeper node than tuple? Must be identifier position, otherwise we'd have a TupleMemberNode. return True return False
python
def is_identifier_position(rootpath): """Return whether the cursor is in identifier-position in a member declaration.""" if len(rootpath) >= 2 and is_tuple_member_node(rootpath[-2]) and is_identifier(rootpath[-1]): return True if len(rootpath) >= 1 and is_tuple_node(rootpath[-1]): # No deeper node than tuple? Must be identifier position, otherwise we'd have a TupleMemberNode. return True return False
[ "def", "is_identifier_position", "(", "rootpath", ")", ":", "if", "len", "(", "rootpath", ")", ">=", "2", "and", "is_tuple_member_node", "(", "rootpath", "[", "-", "2", "]", ")", "and", "is_identifier", "(", "rootpath", "[", "-", "1", "]", ")", ":", "return", "True", "if", "len", "(", "rootpath", ")", ">=", "1", "and", "is_tuple_node", "(", "rootpath", "[", "-", "1", "]", ")", ":", "# No deeper node than tuple? Must be identifier position, otherwise we'd have a TupleMemberNode.", "return", "True", "return", "False" ]
Return whether the cursor is in identifier-position in a member declaration.
[ "Return", "whether", "the", "cursor", "is", "in", "identifier", "-", "position", "in", "a", "member", "declaration", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast_util.py#L141-L148
rix0rrr/gcl
gcl/ast_util.py
find_completions_at_cursor
def find_completions_at_cursor(ast_tree, filename, line, col, root_env=gcl.default_env): """Find completions at the cursor. Return a dict of { name => Completion } objects. """ q = gcl.SourceQuery(filename, line, col - 1) rootpath = ast_tree.find_tokens(q) if is_identifier_position(rootpath): return find_inherited_key_completions(rootpath, root_env) try: ret = find_deref_completions(rootpath, root_env) or enumerate_scope(rootpath, root_env=root_env) assert isinstance(ret, dict) return ret except gcl.EvaluationError: # Probably an unbound value or something--just return an empty list return {}
python
def find_completions_at_cursor(ast_tree, filename, line, col, root_env=gcl.default_env): """Find completions at the cursor. Return a dict of { name => Completion } objects. """ q = gcl.SourceQuery(filename, line, col - 1) rootpath = ast_tree.find_tokens(q) if is_identifier_position(rootpath): return find_inherited_key_completions(rootpath, root_env) try: ret = find_deref_completions(rootpath, root_env) or enumerate_scope(rootpath, root_env=root_env) assert isinstance(ret, dict) return ret except gcl.EvaluationError: # Probably an unbound value or something--just return an empty list return {}
[ "def", "find_completions_at_cursor", "(", "ast_tree", ",", "filename", ",", "line", ",", "col", ",", "root_env", "=", "gcl", ".", "default_env", ")", ":", "q", "=", "gcl", ".", "SourceQuery", "(", "filename", ",", "line", ",", "col", "-", "1", ")", "rootpath", "=", "ast_tree", ".", "find_tokens", "(", "q", ")", "if", "is_identifier_position", "(", "rootpath", ")", ":", "return", "find_inherited_key_completions", "(", "rootpath", ",", "root_env", ")", "try", ":", "ret", "=", "find_deref_completions", "(", "rootpath", ",", "root_env", ")", "or", "enumerate_scope", "(", "rootpath", ",", "root_env", "=", "root_env", ")", "assert", "isinstance", "(", "ret", ",", "dict", ")", "return", "ret", "except", "gcl", ".", "EvaluationError", ":", "# Probably an unbound value or something--just return an empty list", "return", "{", "}" ]
Find completions at the cursor. Return a dict of { name => Completion } objects.
[ "Find", "completions", "at", "the", "cursor", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast_util.py#L151-L168
rix0rrr/gcl
gcl/ast_util.py
find_inherited_key_completions
def find_inherited_key_completions(rootpath, root_env): """Return completion keys from INHERITED tuples. Easiest way to get those is to evaluate the tuple, check if it is a CompositeTuple, then enumerate the keys that are NOT in the rightmost tuple. """ tup = inflate_context_tuple(rootpath, root_env) if isinstance(tup, runtime.CompositeTuple): keys = set(k for t in tup.tuples[:-1] for k in t.keys()) return {n: get_completion(tup, n) for n in keys} return {}
python
def find_inherited_key_completions(rootpath, root_env): """Return completion keys from INHERITED tuples. Easiest way to get those is to evaluate the tuple, check if it is a CompositeTuple, then enumerate the keys that are NOT in the rightmost tuple. """ tup = inflate_context_tuple(rootpath, root_env) if isinstance(tup, runtime.CompositeTuple): keys = set(k for t in tup.tuples[:-1] for k in t.keys()) return {n: get_completion(tup, n) for n in keys} return {}
[ "def", "find_inherited_key_completions", "(", "rootpath", ",", "root_env", ")", ":", "tup", "=", "inflate_context_tuple", "(", "rootpath", ",", "root_env", ")", "if", "isinstance", "(", "tup", ",", "runtime", ".", "CompositeTuple", ")", ":", "keys", "=", "set", "(", "k", "for", "t", "in", "tup", ".", "tuples", "[", ":", "-", "1", "]", "for", "k", "in", "t", ".", "keys", "(", ")", ")", "return", "{", "n", ":", "get_completion", "(", "tup", ",", "n", ")", "for", "n", "in", "keys", "}", "return", "{", "}" ]
Return completion keys from INHERITED tuples. Easiest way to get those is to evaluate the tuple, check if it is a CompositeTuple, then enumerate the keys that are NOT in the rightmost tuple.
[ "Return", "completion", "keys", "from", "INHERITED", "tuples", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast_util.py#L171-L181
rix0rrr/gcl
gcl/ast_util.py
find_value_at_cursor
def find_value_at_cursor(ast_tree, filename, line, col, root_env=gcl.default_env): """Find the value of the object under the cursor.""" q = gcl.SourceQuery(filename, line, col) rootpath = ast_tree.find_tokens(q) rootpath = path_until(rootpath, is_thunk) if len(rootpath) <= 1: # Just the file tuple itself, or some non-thunk element at the top level return None tup = inflate_context_tuple(rootpath, root_env) try: if isinstance(rootpath[-1], ast.Inherit): # Special case handling of 'Inherit' nodes, show the value that's being # inherited. return tup[rootpath[-1].name] return rootpath[-1].eval(tup.env(tup)) except gcl.EvaluationError as e: return e
python
def find_value_at_cursor(ast_tree, filename, line, col, root_env=gcl.default_env): """Find the value of the object under the cursor.""" q = gcl.SourceQuery(filename, line, col) rootpath = ast_tree.find_tokens(q) rootpath = path_until(rootpath, is_thunk) if len(rootpath) <= 1: # Just the file tuple itself, or some non-thunk element at the top level return None tup = inflate_context_tuple(rootpath, root_env) try: if isinstance(rootpath[-1], ast.Inherit): # Special case handling of 'Inherit' nodes, show the value that's being # inherited. return tup[rootpath[-1].name] return rootpath[-1].eval(tup.env(tup)) except gcl.EvaluationError as e: return e
[ "def", "find_value_at_cursor", "(", "ast_tree", ",", "filename", ",", "line", ",", "col", ",", "root_env", "=", "gcl", ".", "default_env", ")", ":", "q", "=", "gcl", ".", "SourceQuery", "(", "filename", ",", "line", ",", "col", ")", "rootpath", "=", "ast_tree", ".", "find_tokens", "(", "q", ")", "rootpath", "=", "path_until", "(", "rootpath", ",", "is_thunk", ")", "if", "len", "(", "rootpath", ")", "<=", "1", ":", "# Just the file tuple itself, or some non-thunk element at the top level", "return", "None", "tup", "=", "inflate_context_tuple", "(", "rootpath", ",", "root_env", ")", "try", ":", "if", "isinstance", "(", "rootpath", "[", "-", "1", "]", ",", "ast", ".", "Inherit", ")", ":", "# Special case handling of 'Inherit' nodes, show the value that's being", "# inherited.", "return", "tup", "[", "rootpath", "[", "-", "1", "]", ".", "name", "]", "return", "rootpath", "[", "-", "1", "]", ".", "eval", "(", "tup", ".", "env", "(", "tup", ")", ")", "except", "gcl", ".", "EvaluationError", "as", "e", ":", "return", "e" ]
Find the value of the object under the cursor.
[ "Find", "the", "value", "of", "the", "object", "under", "the", "cursor", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast_util.py#L184-L202
hannes-brt/hebel
hebel/pycuda_ops/matrix.py
add_vec_to_mat
def add_vec_to_mat(mat, vec, axis=None, inplace=False, target=None, substract=False): """ Add a vector to a matrix """ assert mat.flags.c_contiguous if axis is None: if vec.shape[0] == mat.shape[0]: axis = 0 elif vec.shape[0] == mat.shape[1]: axis = 1 else: raise ValueError('Vector length must be equal ' 'to one side of the matrix') n, m = mat.shape block = (_compilation_constants['add_vec_block_size'], _compilation_constants['add_vec_block_size'], 1) gridx = ceil_div(n, block[0]) gridy = ceil_div(m, block[1]) grid = (gridx, gridy, 1) if inplace: target = mat elif target is None: target = gpuarray.empty_like(mat) if axis == 0: assert vec.shape[0] == mat.shape[0] add_col_vec_kernel.prepared_call( grid, block, mat.gpudata, vec.gpudata, target.gpudata, np.uint32(n), np.uint32(m), np.int32(substract)) elif axis == 1: assert vec.shape[0] == mat.shape[1] add_row_vec_kernel.prepared_call( grid, block, mat.gpudata, vec.gpudata, target.gpudata, np.uint32(n), np.uint32(m), np.int32(substract)) return target
python
def add_vec_to_mat(mat, vec, axis=None, inplace=False, target=None, substract=False): """ Add a vector to a matrix """ assert mat.flags.c_contiguous if axis is None: if vec.shape[0] == mat.shape[0]: axis = 0 elif vec.shape[0] == mat.shape[1]: axis = 1 else: raise ValueError('Vector length must be equal ' 'to one side of the matrix') n, m = mat.shape block = (_compilation_constants['add_vec_block_size'], _compilation_constants['add_vec_block_size'], 1) gridx = ceil_div(n, block[0]) gridy = ceil_div(m, block[1]) grid = (gridx, gridy, 1) if inplace: target = mat elif target is None: target = gpuarray.empty_like(mat) if axis == 0: assert vec.shape[0] == mat.shape[0] add_col_vec_kernel.prepared_call( grid, block, mat.gpudata, vec.gpudata, target.gpudata, np.uint32(n), np.uint32(m), np.int32(substract)) elif axis == 1: assert vec.shape[0] == mat.shape[1] add_row_vec_kernel.prepared_call( grid, block, mat.gpudata, vec.gpudata, target.gpudata, np.uint32(n), np.uint32(m), np.int32(substract)) return target
[ "def", "add_vec_to_mat", "(", "mat", ",", "vec", ",", "axis", "=", "None", ",", "inplace", "=", "False", ",", "target", "=", "None", ",", "substract", "=", "False", ")", ":", "assert", "mat", ".", "flags", ".", "c_contiguous", "if", "axis", "is", "None", ":", "if", "vec", ".", "shape", "[", "0", "]", "==", "mat", ".", "shape", "[", "0", "]", ":", "axis", "=", "0", "elif", "vec", ".", "shape", "[", "0", "]", "==", "mat", ".", "shape", "[", "1", "]", ":", "axis", "=", "1", "else", ":", "raise", "ValueError", "(", "'Vector length must be equal '", "'to one side of the matrix'", ")", "n", ",", "m", "=", "mat", ".", "shape", "block", "=", "(", "_compilation_constants", "[", "'add_vec_block_size'", "]", ",", "_compilation_constants", "[", "'add_vec_block_size'", "]", ",", "1", ")", "gridx", "=", "ceil_div", "(", "n", ",", "block", "[", "0", "]", ")", "gridy", "=", "ceil_div", "(", "m", ",", "block", "[", "1", "]", ")", "grid", "=", "(", "gridx", ",", "gridy", ",", "1", ")", "if", "inplace", ":", "target", "=", "mat", "elif", "target", "is", "None", ":", "target", "=", "gpuarray", ".", "empty_like", "(", "mat", ")", "if", "axis", "==", "0", ":", "assert", "vec", ".", "shape", "[", "0", "]", "==", "mat", ".", "shape", "[", "0", "]", "add_col_vec_kernel", ".", "prepared_call", "(", "grid", ",", "block", ",", "mat", ".", "gpudata", ",", "vec", ".", "gpudata", ",", "target", ".", "gpudata", ",", "np", ".", "uint32", "(", "n", ")", ",", "np", ".", "uint32", "(", "m", ")", ",", "np", ".", "int32", "(", "substract", ")", ")", "elif", "axis", "==", "1", ":", "assert", "vec", ".", "shape", "[", "0", "]", "==", "mat", ".", "shape", "[", "1", "]", "add_row_vec_kernel", ".", "prepared_call", "(", "grid", ",", "block", ",", "mat", ".", "gpudata", ",", "vec", ".", "gpudata", ",", "target", ".", "gpudata", ",", "np", ".", "uint32", "(", "n", ")", ",", "np", ".", "uint32", "(", "m", ")", ",", "np", ".", "int32", "(", "substract", ")", ")", "return", "target" ]
Add a vector to a matrix
[ "Add", "a", "vector", "to", "a", "matrix" ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/matrix.py#L130-L179
hannes-brt/hebel
hebel/pycuda_ops/matrix.py
vector_normalize
def vector_normalize(mat, max_vec_norm=1.): """ Normalize each column vector in mat to length max_vec_norm if it is longer than max_vec_norm """ assert mat.flags.c_contiguous n, m = mat.shape vector_normalize_kernel.prepared_call( (m, 1, 1), (32, 1, 1), mat.gpudata, np.float32(max_vec_norm), np.int32(m), np.int32(n))
python
def vector_normalize(mat, max_vec_norm=1.): """ Normalize each column vector in mat to length max_vec_norm if it is longer than max_vec_norm """ assert mat.flags.c_contiguous n, m = mat.shape vector_normalize_kernel.prepared_call( (m, 1, 1), (32, 1, 1), mat.gpudata, np.float32(max_vec_norm), np.int32(m), np.int32(n))
[ "def", "vector_normalize", "(", "mat", ",", "max_vec_norm", "=", "1.", ")", ":", "assert", "mat", ".", "flags", ".", "c_contiguous", "n", ",", "m", "=", "mat", ".", "shape", "vector_normalize_kernel", ".", "prepared_call", "(", "(", "m", ",", "1", ",", "1", ")", ",", "(", "32", ",", "1", ",", "1", ")", ",", "mat", ".", "gpudata", ",", "np", ".", "float32", "(", "max_vec_norm", ")", ",", "np", ".", "int32", "(", "m", ")", ",", "np", ".", "int32", "(", "n", ")", ")" ]
Normalize each column vector in mat to length max_vec_norm if it is longer than max_vec_norm
[ "Normalize", "each", "column", "vector", "in", "mat", "to", "length", "max_vec_norm", "if", "it", "is", "longer", "than", "max_vec_norm" ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/matrix.py#L182-L194
hannes-brt/hebel
hebel/utils/string_utils.py
preprocess
def preprocess(string): """ Preprocesses a string, by replacing ${VARNAME} with os.environ['VARNAME'] Parameters ---------- string: the str object to preprocess Returns ------- the preprocessed string """ split = string.split('${') rval = [split[0]] for candidate in split[1:]: subsplit = candidate.split('}') if len(subsplit) < 2: raise ValueError('Open ${ not followed by } before ' \ + 'end of string or next ${ in "' \ + string + '"') varname = subsplit[0] if varname == 'PYLEARN2_TRAIN_FILE_NAME': warnings.warn("PYLEARN2_TRAIN_FILE_NAME is deprecated and may be " "removed from the library on or after Oct 22, 2013. Switch" " to PYLEARN2_TRAIN_FILE_FULL_STEM") try: val = os.environ[varname] except KeyError: if varname == 'PYLEARN2_DATA_PATH': raise NoDataPathError() if varname == 'PYLEARN2_VIEWER_COMMAND': raise EnvironmentVariableError(environment_variable_essay) raise ValueError('Unrecognized environment variable "' + varname + '". Did you mean ' + match(varname, os.environ.keys()) + '?') rval.append(val) rval.append('}'.join(subsplit[1:])) rval = ''.join(rval) return rval
python
def preprocess(string): """ Preprocesses a string, by replacing ${VARNAME} with os.environ['VARNAME'] Parameters ---------- string: the str object to preprocess Returns ------- the preprocessed string """ split = string.split('${') rval = [split[0]] for candidate in split[1:]: subsplit = candidate.split('}') if len(subsplit) < 2: raise ValueError('Open ${ not followed by } before ' \ + 'end of string or next ${ in "' \ + string + '"') varname = subsplit[0] if varname == 'PYLEARN2_TRAIN_FILE_NAME': warnings.warn("PYLEARN2_TRAIN_FILE_NAME is deprecated and may be " "removed from the library on or after Oct 22, 2013. Switch" " to PYLEARN2_TRAIN_FILE_FULL_STEM") try: val = os.environ[varname] except KeyError: if varname == 'PYLEARN2_DATA_PATH': raise NoDataPathError() if varname == 'PYLEARN2_VIEWER_COMMAND': raise EnvironmentVariableError(environment_variable_essay) raise ValueError('Unrecognized environment variable "' + varname + '". Did you mean ' + match(varname, os.environ.keys()) + '?') rval.append(val) rval.append('}'.join(subsplit[1:])) rval = ''.join(rval) return rval
[ "def", "preprocess", "(", "string", ")", ":", "split", "=", "string", ".", "split", "(", "'${'", ")", "rval", "=", "[", "split", "[", "0", "]", "]", "for", "candidate", "in", "split", "[", "1", ":", "]", ":", "subsplit", "=", "candidate", ".", "split", "(", "'}'", ")", "if", "len", "(", "subsplit", ")", "<", "2", ":", "raise", "ValueError", "(", "'Open ${ not followed by } before '", "+", "'end of string or next ${ in \"'", "+", "string", "+", "'\"'", ")", "varname", "=", "subsplit", "[", "0", "]", "if", "varname", "==", "'PYLEARN2_TRAIN_FILE_NAME'", ":", "warnings", ".", "warn", "(", "\"PYLEARN2_TRAIN_FILE_NAME is deprecated and may be \"", "\"removed from the library on or after Oct 22, 2013. Switch\"", "\" to PYLEARN2_TRAIN_FILE_FULL_STEM\"", ")", "try", ":", "val", "=", "os", ".", "environ", "[", "varname", "]", "except", "KeyError", ":", "if", "varname", "==", "'PYLEARN2_DATA_PATH'", ":", "raise", "NoDataPathError", "(", ")", "if", "varname", "==", "'PYLEARN2_VIEWER_COMMAND'", ":", "raise", "EnvironmentVariableError", "(", "environment_variable_essay", ")", "raise", "ValueError", "(", "'Unrecognized environment variable \"'", "+", "varname", "+", "'\". Did you mean '", "+", "match", "(", "varname", ",", "os", ".", "environ", ".", "keys", "(", ")", ")", "+", "'?'", ")", "rval", ".", "append", "(", "val", ")", "rval", ".", "append", "(", "'}'", ".", "join", "(", "subsplit", "[", "1", ":", "]", ")", ")", "rval", "=", "''", ".", "join", "(", "rval", ")", "return", "rval" ]
Preprocesses a string, by replacing ${VARNAME} with os.environ['VARNAME'] Parameters ---------- string: the str object to preprocess Returns ------- the preprocessed string
[ "Preprocesses", "a", "string", "by", "replacing", "$", "{", "VARNAME", "}", "with", "os", ".", "environ", "[", "VARNAME", "]" ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/utils/string_utils.py#L26-L77
hannes-brt/hebel
hebel/utils/string_utils.py
tokenize_by_number
def tokenize_by_number(s): """ splits a string into a list of tokens each is either a string containing no numbers or a float """ r = find_number(s) if r == None: return [ s ] else: tokens = [] if r[0] > 0: tokens.append(s[0:r[0]]) tokens.append( float(s[r[0]:r[1]]) ) if r[1] < len(s): tokens.extend(tokenize_by_number(s[r[1]:])) return tokens assert False
python
def tokenize_by_number(s): """ splits a string into a list of tokens each is either a string containing no numbers or a float """ r = find_number(s) if r == None: return [ s ] else: tokens = [] if r[0] > 0: tokens.append(s[0:r[0]]) tokens.append( float(s[r[0]:r[1]]) ) if r[1] < len(s): tokens.extend(tokenize_by_number(s[r[1]:])) return tokens assert False
[ "def", "tokenize_by_number", "(", "s", ")", ":", "r", "=", "find_number", "(", "s", ")", "if", "r", "==", "None", ":", "return", "[", "s", "]", "else", ":", "tokens", "=", "[", "]", "if", "r", "[", "0", "]", ">", "0", ":", "tokens", ".", "append", "(", "s", "[", "0", ":", "r", "[", "0", "]", "]", ")", "tokens", ".", "append", "(", "float", "(", "s", "[", "r", "[", "0", "]", ":", "r", "[", "1", "]", "]", ")", ")", "if", "r", "[", "1", "]", "<", "len", "(", "s", ")", ":", "tokens", ".", "extend", "(", "tokenize_by_number", "(", "s", "[", "r", "[", "1", "]", ":", "]", ")", ")", "return", "tokens", "assert", "False" ]
splits a string into a list of tokens each is either a string containing no numbers or a float
[ "splits", "a", "string", "into", "a", "list", "of", "tokens", "each", "is", "either", "a", "string", "containing", "no", "numbers", "or", "a", "float" ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/utils/string_utils.py#L93-L110
hannes-brt/hebel
hebel/utils/string_utils.py
number_aware_alphabetical_cmp
def number_aware_alphabetical_cmp(str1, str2): """ cmp function for sorting a list of strings by alphabetical order, but with numbers sorted numerically. i.e., foo1, foo2, foo10, foo11 instead of foo1, foo10 """ def flatten_tokens(tokens): l = [] for token in tokens: if isinstance(token, str): for char in token: l.append(char) else: assert isinstance(token, float) l.append(token) return l seq1 = flatten_tokens(tokenize_by_number(str1)) seq2 = flatten_tokens(tokenize_by_number(str2)) l = min(len(seq1),len(seq2)) i = 0 while i < l: if seq1[i] < seq2[i]: return -1 elif seq1[i] > seq2[i]: return 1 i += 1 if len(seq1) < len(seq2): return -1 elif len(seq1) > len(seq2): return 1 return 0
python
def number_aware_alphabetical_cmp(str1, str2): """ cmp function for sorting a list of strings by alphabetical order, but with numbers sorted numerically. i.e., foo1, foo2, foo10, foo11 instead of foo1, foo10 """ def flatten_tokens(tokens): l = [] for token in tokens: if isinstance(token, str): for char in token: l.append(char) else: assert isinstance(token, float) l.append(token) return l seq1 = flatten_tokens(tokenize_by_number(str1)) seq2 = flatten_tokens(tokenize_by_number(str2)) l = min(len(seq1),len(seq2)) i = 0 while i < l: if seq1[i] < seq2[i]: return -1 elif seq1[i] > seq2[i]: return 1 i += 1 if len(seq1) < len(seq2): return -1 elif len(seq1) > len(seq2): return 1 return 0
[ "def", "number_aware_alphabetical_cmp", "(", "str1", ",", "str2", ")", ":", "def", "flatten_tokens", "(", "tokens", ")", ":", "l", "=", "[", "]", "for", "token", "in", "tokens", ":", "if", "isinstance", "(", "token", ",", "str", ")", ":", "for", "char", "in", "token", ":", "l", ".", "append", "(", "char", ")", "else", ":", "assert", "isinstance", "(", "token", ",", "float", ")", "l", ".", "append", "(", "token", ")", "return", "l", "seq1", "=", "flatten_tokens", "(", "tokenize_by_number", "(", "str1", ")", ")", "seq2", "=", "flatten_tokens", "(", "tokenize_by_number", "(", "str2", ")", ")", "l", "=", "min", "(", "len", "(", "seq1", ")", ",", "len", "(", "seq2", ")", ")", "i", "=", "0", "while", "i", "<", "l", ":", "if", "seq1", "[", "i", "]", "<", "seq2", "[", "i", "]", ":", "return", "-", "1", "elif", "seq1", "[", "i", "]", ">", "seq2", "[", "i", "]", ":", "return", "1", "i", "+=", "1", "if", "len", "(", "seq1", ")", "<", "len", "(", "seq2", ")", ":", "return", "-", "1", "elif", "len", "(", "seq1", ")", ">", "len", "(", "seq2", ")", ":", "return", "1", "return", "0" ]
cmp function for sorting a list of strings by alphabetical order, but with numbers sorted numerically. i.e., foo1, foo2, foo10, foo11 instead of foo1, foo10
[ "cmp", "function", "for", "sorting", "a", "list", "of", "strings", "by", "alphabetical", "order", "but", "with", "numbers", "sorted", "numerically", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/utils/string_utils.py#L113-L151
hannes-brt/hebel
hebel/utils/string_utils.py
match
def match(wrong, candidates): """ wrong: a mispelling candidates: a set of correct words returns a guess of which candidate is the right one This should be used with a small number of candidates and a high potential edit distance. ie, use it to correct a wrong filename in a directory, wrong class name in a module, etc. Don't use it to correct small typos of freeform natural language words. """ assert len(candidates) > 0 # Current implementation tries all candidates and outputs the one # with the min score # Could try to do something smarter def score(w1,w2): # Current implementation returns negative dot product of # the two words mapped into a feature space by mapping phi # w -> [ phi(w1), .1 phi(first letter of w), .1 phi(last letter of w) ] # Could try to do something smarter w1 = w1.lower() w2 = w2.lower() def phi(w): # Current feature mapping is to the vector of counts of # all letters and two-letter sequences # Could try to do something smarter rval = {} for i in xrange(len(w)): l = w[i] rval[l] = rval.get(l,0.) + 1. if i < len(w)-1: b = w[i:i+2] rval[b] = rval.get(b,0.) + 1. return rval d1 = phi(w1) d2 = phi(w2) def mul(d1, d2): rval = 0 for key in set(d1).union(d2): rval += d1.get(key,0) * d2.get(key,0) return rval tot_score = mul(phi(w1),phi(w2)) / float(len(w1)*len(w2)) + \ 0.1 * mul(phi(w1[0:1]), phi(w2[0:1])) + \ 0.1 * mul(phi(w1[-1:]), phi(w2[-1:])) return tot_score scored_candidates = [ (-score(wrong, candidate), candidate) for candidate in candidates ] scored_candidates.sort() return scored_candidates[0][1]
python
def match(wrong, candidates): """ wrong: a mispelling candidates: a set of correct words returns a guess of which candidate is the right one This should be used with a small number of candidates and a high potential edit distance. ie, use it to correct a wrong filename in a directory, wrong class name in a module, etc. Don't use it to correct small typos of freeform natural language words. """ assert len(candidates) > 0 # Current implementation tries all candidates and outputs the one # with the min score # Could try to do something smarter def score(w1,w2): # Current implementation returns negative dot product of # the two words mapped into a feature space by mapping phi # w -> [ phi(w1), .1 phi(first letter of w), .1 phi(last letter of w) ] # Could try to do something smarter w1 = w1.lower() w2 = w2.lower() def phi(w): # Current feature mapping is to the vector of counts of # all letters and two-letter sequences # Could try to do something smarter rval = {} for i in xrange(len(w)): l = w[i] rval[l] = rval.get(l,0.) + 1. if i < len(w)-1: b = w[i:i+2] rval[b] = rval.get(b,0.) + 1. return rval d1 = phi(w1) d2 = phi(w2) def mul(d1, d2): rval = 0 for key in set(d1).union(d2): rval += d1.get(key,0) * d2.get(key,0) return rval tot_score = mul(phi(w1),phi(w2)) / float(len(w1)*len(w2)) + \ 0.1 * mul(phi(w1[0:1]), phi(w2[0:1])) + \ 0.1 * mul(phi(w1[-1:]), phi(w2[-1:])) return tot_score scored_candidates = [ (-score(wrong, candidate), candidate) for candidate in candidates ] scored_candidates.sort() return scored_candidates[0][1]
[ "def", "match", "(", "wrong", ",", "candidates", ")", ":", "assert", "len", "(", "candidates", ")", ">", "0", "# Current implementation tries all candidates and outputs the one", "# with the min score", "# Could try to do something smarter", "def", "score", "(", "w1", ",", "w2", ")", ":", "# Current implementation returns negative dot product of", "# the two words mapped into a feature space by mapping phi", "# w -> [ phi(w1), .1 phi(first letter of w), .1 phi(last letter of w) ]", "# Could try to do something smarter", "w1", "=", "w1", ".", "lower", "(", ")", "w2", "=", "w2", ".", "lower", "(", ")", "def", "phi", "(", "w", ")", ":", "# Current feature mapping is to the vector of counts of", "# all letters and two-letter sequences", "# Could try to do something smarter", "rval", "=", "{", "}", "for", "i", "in", "xrange", "(", "len", "(", "w", ")", ")", ":", "l", "=", "w", "[", "i", "]", "rval", "[", "l", "]", "=", "rval", ".", "get", "(", "l", ",", "0.", ")", "+", "1.", "if", "i", "<", "len", "(", "w", ")", "-", "1", ":", "b", "=", "w", "[", "i", ":", "i", "+", "2", "]", "rval", "[", "b", "]", "=", "rval", ".", "get", "(", "b", ",", "0.", ")", "+", "1.", "return", "rval", "d1", "=", "phi", "(", "w1", ")", "d2", "=", "phi", "(", "w2", ")", "def", "mul", "(", "d1", ",", "d2", ")", ":", "rval", "=", "0", "for", "key", "in", "set", "(", "d1", ")", ".", "union", "(", "d2", ")", ":", "rval", "+=", "d1", ".", "get", "(", "key", ",", "0", ")", "*", "d2", ".", "get", "(", "key", ",", "0", ")", "return", "rval", "tot_score", "=", "mul", "(", "phi", "(", "w1", ")", ",", "phi", "(", "w2", ")", ")", "/", "float", "(", "len", "(", "w1", ")", "*", "len", "(", "w2", ")", ")", "+", "0.1", "*", "mul", "(", "phi", "(", "w1", "[", "0", ":", "1", "]", ")", ",", "phi", "(", "w2", "[", "0", ":", "1", "]", ")", ")", "+", "0.1", "*", "mul", "(", "phi", "(", "w1", "[", "-", "1", ":", "]", ")", ",", "phi", "(", "w2", "[", "-", "1", ":", "]", ")", ")", "return", "tot_score", "scored_candidates", "=", "[", "(", "-", "score", "(", "wrong", ",", "candidate", ")", ",", "candidate", ")", "for", "candidate", "in", "candidates", "]", "scored_candidates", ".", "sort", "(", ")", "return", "scored_candidates", "[", "0", "]", "[", "1", "]" ]
wrong: a mispelling candidates: a set of correct words returns a guess of which candidate is the right one This should be used with a small number of candidates and a high potential edit distance. ie, use it to correct a wrong filename in a directory, wrong class name in a module, etc. Don't use it to correct small typos of freeform natural language words.
[ "wrong", ":", "a", "mispelling", "candidates", ":", "a", "set", "of", "correct", "words" ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/utils/string_utils.py#L153-L219
hannes-brt/hebel
hebel/utils/string_utils.py
censor_non_alphanum
def censor_non_alphanum(s): """ Returns s with all non-alphanumeric characters replaced with * """ def censor(ch): if (ch >= 'A' and ch <= 'z') or (ch >= '0' and ch <= '9'): return ch return '*' return ''.join([censor(ch) for ch in s])
python
def censor_non_alphanum(s): """ Returns s with all non-alphanumeric characters replaced with * """ def censor(ch): if (ch >= 'A' and ch <= 'z') or (ch >= '0' and ch <= '9'): return ch return '*' return ''.join([censor(ch) for ch in s])
[ "def", "censor_non_alphanum", "(", "s", ")", ":", "def", "censor", "(", "ch", ")", ":", "if", "(", "ch", ">=", "'A'", "and", "ch", "<=", "'z'", ")", "or", "(", "ch", ">=", "'0'", "and", "ch", "<=", "'9'", ")", ":", "return", "ch", "return", "'*'", "return", "''", ".", "join", "(", "[", "censor", "(", "ch", ")", "for", "ch", "in", "s", "]", ")" ]
Returns s with all non-alphanumeric characters replaced with *
[ "Returns", "s", "with", "all", "non", "-", "alphanumeric", "characters", "replaced", "with", "*" ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/utils/string_utils.py#L221-L231
dwkim78/upsilon
upsilon/extract_features/is_period_alias.py
is_period_alias
def is_period_alias(period): """ Check if a given period is possibly an alias. Parameters ---------- period : float A period to test if it is a possible alias or not. Returns ------- is_alias : boolean True if the given period is in a range of period alias. """ # Based on the period vs periodSN plot of EROS-2 dataset (Kim+ 2014). # Period alias occurs mostly at ~1 and ~30. # Check each 1, 2, 3, 4, 5 factors. for i in range(1, 6): # One-day and one-month alias if (.99 / float(i)) < period < (1.004 / float(i)): return True if (1.03 / float(i)) < period < (1.04 / float(i)): return True if (29.2 / float(i)) < period < (29.9 / float(i)): return True # From candidates from the two fields 01, 08. # All of them are close to one day (or sidereal) alias. if (0.96465 / float(i)) < period < (0.96485 / float(i)): return True if (0.96725 / float(i)) < period < (0.96745 / float(i)): return True if (0.98190 / float(i)) < period < (0.98230 / float(i)): return True if (1.01034 / float(i)) < period < (1.01076 / float(i)): return True if (1.01568 / float(i)) < period < (1.01604 / float(i)): return True if (1.01718 / float(i)) < period < (1.01742 / float(i)): return True # From the all candidates from the entire LMC fields. # Some of these could be overlapped with the above cuts. if (0.50776 / float(i)) < period < (0.50861 / float(i)): return True if (0.96434 / float(i)) < period < (0.9652 / float(i)): return True if (0.96688 / float(i)) < period < (0.96731 / float(i)): return True if (1.0722 / float(i)) < period < (1.0729 / float(i)): return True if (27.1 / float(i)) < period < (27.5 / float(i)): return True # Not in the range of any alias. return False
python
def is_period_alias(period): """ Check if a given period is possibly an alias. Parameters ---------- period : float A period to test if it is a possible alias or not. Returns ------- is_alias : boolean True if the given period is in a range of period alias. """ # Based on the period vs periodSN plot of EROS-2 dataset (Kim+ 2014). # Period alias occurs mostly at ~1 and ~30. # Check each 1, 2, 3, 4, 5 factors. for i in range(1, 6): # One-day and one-month alias if (.99 / float(i)) < period < (1.004 / float(i)): return True if (1.03 / float(i)) < period < (1.04 / float(i)): return True if (29.2 / float(i)) < period < (29.9 / float(i)): return True # From candidates from the two fields 01, 08. # All of them are close to one day (or sidereal) alias. if (0.96465 / float(i)) < period < (0.96485 / float(i)): return True if (0.96725 / float(i)) < period < (0.96745 / float(i)): return True if (0.98190 / float(i)) < period < (0.98230 / float(i)): return True if (1.01034 / float(i)) < period < (1.01076 / float(i)): return True if (1.01568 / float(i)) < period < (1.01604 / float(i)): return True if (1.01718 / float(i)) < period < (1.01742 / float(i)): return True # From the all candidates from the entire LMC fields. # Some of these could be overlapped with the above cuts. if (0.50776 / float(i)) < period < (0.50861 / float(i)): return True if (0.96434 / float(i)) < period < (0.9652 / float(i)): return True if (0.96688 / float(i)) < period < (0.96731 / float(i)): return True if (1.0722 / float(i)) < period < (1.0729 / float(i)): return True if (27.1 / float(i)) < period < (27.5 / float(i)): return True # Not in the range of any alias. return False
[ "def", "is_period_alias", "(", "period", ")", ":", "# Based on the period vs periodSN plot of EROS-2 dataset (Kim+ 2014).", "# Period alias occurs mostly at ~1 and ~30.", "# Check each 1, 2, 3, 4, 5 factors.", "for", "i", "in", "range", "(", "1", ",", "6", ")", ":", "# One-day and one-month alias", "if", "(", ".99", "/", "float", "(", "i", ")", ")", "<", "period", "<", "(", "1.004", "/", "float", "(", "i", ")", ")", ":", "return", "True", "if", "(", "1.03", "/", "float", "(", "i", ")", ")", "<", "period", "<", "(", "1.04", "/", "float", "(", "i", ")", ")", ":", "return", "True", "if", "(", "29.2", "/", "float", "(", "i", ")", ")", "<", "period", "<", "(", "29.9", "/", "float", "(", "i", ")", ")", ":", "return", "True", "# From candidates from the two fields 01, 08.", "# All of them are close to one day (or sidereal) alias.", "if", "(", "0.96465", "/", "float", "(", "i", ")", ")", "<", "period", "<", "(", "0.96485", "/", "float", "(", "i", ")", ")", ":", "return", "True", "if", "(", "0.96725", "/", "float", "(", "i", ")", ")", "<", "period", "<", "(", "0.96745", "/", "float", "(", "i", ")", ")", ":", "return", "True", "if", "(", "0.98190", "/", "float", "(", "i", ")", ")", "<", "period", "<", "(", "0.98230", "/", "float", "(", "i", ")", ")", ":", "return", "True", "if", "(", "1.01034", "/", "float", "(", "i", ")", ")", "<", "period", "<", "(", "1.01076", "/", "float", "(", "i", ")", ")", ":", "return", "True", "if", "(", "1.01568", "/", "float", "(", "i", ")", ")", "<", "period", "<", "(", "1.01604", "/", "float", "(", "i", ")", ")", ":", "return", "True", "if", "(", "1.01718", "/", "float", "(", "i", ")", ")", "<", "period", "<", "(", "1.01742", "/", "float", "(", "i", ")", ")", ":", "return", "True", "# From the all candidates from the entire LMC fields.", "# Some of these could be overlapped with the above cuts.", "if", "(", "0.50776", "/", "float", "(", "i", ")", ")", "<", "period", "<", "(", "0.50861", "/", "float", "(", "i", ")", ")", ":", "return", "True", "if", "(", "0.96434", "/", "float", "(", "i", ")", ")", "<", "period", "<", "(", "0.9652", "/", "float", "(", "i", ")", ")", ":", "return", "True", "if", "(", "0.96688", "/", "float", "(", "i", ")", ")", "<", "period", "<", "(", "0.96731", "/", "float", "(", "i", ")", ")", ":", "return", "True", "if", "(", "1.0722", "/", "float", "(", "i", ")", ")", "<", "period", "<", "(", "1.0729", "/", "float", "(", "i", ")", ")", ":", "return", "True", "if", "(", "27.1", "/", "float", "(", "i", ")", ")", "<", "period", "<", "(", "27.5", "/", "float", "(", "i", ")", ")", ":", "return", "True", "# Not in the range of any alias.", "return", "False" ]
Check if a given period is possibly an alias. Parameters ---------- period : float A period to test if it is a possible alias or not. Returns ------- is_alias : boolean True if the given period is in a range of period alias.
[ "Check", "if", "a", "given", "period", "is", "possibly", "an", "alias", "." ]
train
https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/is_period_alias.py#L1-L57
hannes-brt/hebel
hebel/utils/serial.py
save
def save(filepath, obj, on_overwrite = 'ignore'): """ Serialize `object` to a file denoted by `filepath`. Parameters ---------- filepath : str A filename. If the suffix is `.joblib` and joblib can be imported, `joblib.dump` is used in place of the regular pickling mechanisms; this results in much faster saves by saving arrays as separate .npy files on disk. If the file suffix is `.npy` than `numpy.save` is attempted on `obj`. Otherwise, (c)pickle is used. obj : object A Python object to be serialized. on_overwrite: A string specifying what to do if the file already exists. ignore: just overwrite it backup: make a copy of the file (<filepath>.bak) and delete it when done saving the new copy. this allows recovery of the old version of the file if saving the new one fails """ filepath = preprocess(filepath) if os.path.exists(filepath): if on_overwrite == 'backup': backup = filepath + '.bak' shutil.move(filepath, backup) save(filepath, obj) try: os.remove(backup) except Exception, e: warnings.warn("Got an error while traing to remove "+backup+":"+str(e)) return else: assert on_overwrite == 'ignore' try: _save(filepath, obj) except RuntimeError, e: """ Sometimes for large theano graphs, pickle/cPickle exceed the maximum recursion depth. This seems to me like a fundamental design flaw in pickle/cPickle. The workaround I employ here is the one recommended to someone who had a similar problem on stackexchange: http://stackoverflow.com/questions/2134706/hitting-maximum-recursion-depth-using-pythons-pickle-cpickle Obviously this does not scale and could cause a crash but I don't see another solution short of writing our own implementation of pickle. """ if str(e).find('recursion') != -1: warnings.warn('pylearn2.utils.save encountered the following ' 'error: ' + str(e) + '\nAttempting to resolve this error by calling ' + 'sys.setrecusionlimit and retrying') old_limit = sys.getrecursionlimit() try: sys.setrecursionlimit(50000) _save(filepath, obj) finally: sys.setrecursionlimit(old_limit)
python
def save(filepath, obj, on_overwrite = 'ignore'): """ Serialize `object` to a file denoted by `filepath`. Parameters ---------- filepath : str A filename. If the suffix is `.joblib` and joblib can be imported, `joblib.dump` is used in place of the regular pickling mechanisms; this results in much faster saves by saving arrays as separate .npy files on disk. If the file suffix is `.npy` than `numpy.save` is attempted on `obj`. Otherwise, (c)pickle is used. obj : object A Python object to be serialized. on_overwrite: A string specifying what to do if the file already exists. ignore: just overwrite it backup: make a copy of the file (<filepath>.bak) and delete it when done saving the new copy. this allows recovery of the old version of the file if saving the new one fails """ filepath = preprocess(filepath) if os.path.exists(filepath): if on_overwrite == 'backup': backup = filepath + '.bak' shutil.move(filepath, backup) save(filepath, obj) try: os.remove(backup) except Exception, e: warnings.warn("Got an error while traing to remove "+backup+":"+str(e)) return else: assert on_overwrite == 'ignore' try: _save(filepath, obj) except RuntimeError, e: """ Sometimes for large theano graphs, pickle/cPickle exceed the maximum recursion depth. This seems to me like a fundamental design flaw in pickle/cPickle. The workaround I employ here is the one recommended to someone who had a similar problem on stackexchange: http://stackoverflow.com/questions/2134706/hitting-maximum-recursion-depth-using-pythons-pickle-cpickle Obviously this does not scale and could cause a crash but I don't see another solution short of writing our own implementation of pickle. """ if str(e).find('recursion') != -1: warnings.warn('pylearn2.utils.save encountered the following ' 'error: ' + str(e) + '\nAttempting to resolve this error by calling ' + 'sys.setrecusionlimit and retrying') old_limit = sys.getrecursionlimit() try: sys.setrecursionlimit(50000) _save(filepath, obj) finally: sys.setrecursionlimit(old_limit)
[ "def", "save", "(", "filepath", ",", "obj", ",", "on_overwrite", "=", "'ignore'", ")", ":", "filepath", "=", "preprocess", "(", "filepath", ")", "if", "os", ".", "path", ".", "exists", "(", "filepath", ")", ":", "if", "on_overwrite", "==", "'backup'", ":", "backup", "=", "filepath", "+", "'.bak'", "shutil", ".", "move", "(", "filepath", ",", "backup", ")", "save", "(", "filepath", ",", "obj", ")", "try", ":", "os", ".", "remove", "(", "backup", ")", "except", "Exception", ",", "e", ":", "warnings", ".", "warn", "(", "\"Got an error while traing to remove \"", "+", "backup", "+", "\":\"", "+", "str", "(", "e", ")", ")", "return", "else", ":", "assert", "on_overwrite", "==", "'ignore'", "try", ":", "_save", "(", "filepath", ",", "obj", ")", "except", "RuntimeError", ",", "e", ":", "\"\"\" Sometimes for large theano graphs, pickle/cPickle exceed the\n maximum recursion depth. This seems to me like a fundamental\n design flaw in pickle/cPickle. The workaround I employ here\n is the one recommended to someone who had a similar problem\n on stackexchange:\n\n http://stackoverflow.com/questions/2134706/hitting-maximum-recursion-depth-using-pythons-pickle-cpickle\n\n Obviously this does not scale and could cause a crash\n but I don't see another solution short of writing our\n own implementation of pickle.\n \"\"\"", "if", "str", "(", "e", ")", ".", "find", "(", "'recursion'", ")", "!=", "-", "1", ":", "warnings", ".", "warn", "(", "'pylearn2.utils.save encountered the following '", "'error: '", "+", "str", "(", "e", ")", "+", "'\\nAttempting to resolve this error by calling '", "+", "'sys.setrecusionlimit and retrying'", ")", "old_limit", "=", "sys", ".", "getrecursionlimit", "(", ")", "try", ":", "sys", ".", "setrecursionlimit", "(", "50000", ")", "_save", "(", "filepath", ",", "obj", ")", "finally", ":", "sys", ".", "setrecursionlimit", "(", "old_limit", ")" ]
Serialize `object` to a file denoted by `filepath`. Parameters ---------- filepath : str A filename. If the suffix is `.joblib` and joblib can be imported, `joblib.dump` is used in place of the regular pickling mechanisms; this results in much faster saves by saving arrays as separate .npy files on disk. If the file suffix is `.npy` than `numpy.save` is attempted on `obj`. Otherwise, (c)pickle is used. obj : object A Python object to be serialized. on_overwrite: A string specifying what to do if the file already exists. ignore: just overwrite it backup: make a copy of the file (<filepath>.bak) and delete it when done saving the new copy. this allows recovery of the old version of the file if saving the new one fails
[ "Serialize", "object", "to", "a", "file", "denoted", "by", "filepath", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/utils/serial.py#L177-L245
hannes-brt/hebel
hebel/utils/serial.py
get_pickle_protocol
def get_pickle_protocol(): """ Allow configuration of the pickle protocol on a per-machine basis. This way, if you use multiple platforms with different versions of pickle, you can configure each of them to use the highest protocol supported by all of the machines that you want to be able to communicate. """ try: protocol_str = os.environ['PYLEARN2_PICKLE_PROTOCOL'] except KeyError: # If not defined, we default to 0 because this is the default # protocol used by cPickle.dump (and because it results in # maximum portability) protocol_str = '0' if protocol_str == 'pickle.HIGHEST_PROTOCOL': return pickle.HIGHEST_PROTOCOL return int(protocol_str)
python
def get_pickle_protocol(): """ Allow configuration of the pickle protocol on a per-machine basis. This way, if you use multiple platforms with different versions of pickle, you can configure each of them to use the highest protocol supported by all of the machines that you want to be able to communicate. """ try: protocol_str = os.environ['PYLEARN2_PICKLE_PROTOCOL'] except KeyError: # If not defined, we default to 0 because this is the default # protocol used by cPickle.dump (and because it results in # maximum portability) protocol_str = '0' if protocol_str == 'pickle.HIGHEST_PROTOCOL': return pickle.HIGHEST_PROTOCOL return int(protocol_str)
[ "def", "get_pickle_protocol", "(", ")", ":", "try", ":", "protocol_str", "=", "os", ".", "environ", "[", "'PYLEARN2_PICKLE_PROTOCOL'", "]", "except", "KeyError", ":", "# If not defined, we default to 0 because this is the default", "# protocol used by cPickle.dump (and because it results in", "# maximum portability)", "protocol_str", "=", "'0'", "if", "protocol_str", "==", "'pickle.HIGHEST_PROTOCOL'", ":", "return", "pickle", ".", "HIGHEST_PROTOCOL", "return", "int", "(", "protocol_str", ")" ]
Allow configuration of the pickle protocol on a per-machine basis. This way, if you use multiple platforms with different versions of pickle, you can configure each of them to use the highest protocol supported by all of the machines that you want to be able to communicate.
[ "Allow", "configuration", "of", "the", "pickle", "protocol", "on", "a", "per", "-", "machine", "basis", ".", "This", "way", "if", "you", "use", "multiple", "platforms", "with", "different", "versions", "of", "pickle", "you", "can", "configure", "each", "of", "them", "to", "use", "the", "highest", "protocol", "supported", "by", "all", "of", "the", "machines", "that", "you", "want", "to", "be", "able", "to", "communicate", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/utils/serial.py#L248-L265
hannes-brt/hebel
hebel/utils/serial.py
load_train_file
def load_train_file(config_file_path): """Loads and parses a yaml file for a Train object. Publishes the relevant training environment variables""" from pylearn2.config import yaml_parse suffix_to_strip = '.yaml' # publish environment variables related to file name if config_file_path.endswith(suffix_to_strip): config_file_full_stem = config_file_path[0:-len(suffix_to_strip)] else: config_file_full_stem = config_file_path for varname in ["PYLEARN2_TRAIN_FILE_NAME", #this one is deprecated "PYLEARN2_TRAIN_FILE_FULL_STEM"]: #this is the new, accepted name environ.putenv(varname, config_file_full_stem) directory = config_file_path.split('/')[:-1] directory = '/'.join(directory) if directory != '': directory += '/' environ.putenv("PYLEARN2_TRAIN_DIR", directory) environ.putenv("PYLEARN2_TRAIN_BASE_NAME", config_file_path.split('/')[-1] ) environ.putenv("PYLEARN2_TRAIN_FILE_STEM", config_file_full_stem.split('/')[-1] ) return yaml_parse.load_path(config_file_path)
python
def load_train_file(config_file_path): """Loads and parses a yaml file for a Train object. Publishes the relevant training environment variables""" from pylearn2.config import yaml_parse suffix_to_strip = '.yaml' # publish environment variables related to file name if config_file_path.endswith(suffix_to_strip): config_file_full_stem = config_file_path[0:-len(suffix_to_strip)] else: config_file_full_stem = config_file_path for varname in ["PYLEARN2_TRAIN_FILE_NAME", #this one is deprecated "PYLEARN2_TRAIN_FILE_FULL_STEM"]: #this is the new, accepted name environ.putenv(varname, config_file_full_stem) directory = config_file_path.split('/')[:-1] directory = '/'.join(directory) if directory != '': directory += '/' environ.putenv("PYLEARN2_TRAIN_DIR", directory) environ.putenv("PYLEARN2_TRAIN_BASE_NAME", config_file_path.split('/')[-1] ) environ.putenv("PYLEARN2_TRAIN_FILE_STEM", config_file_full_stem.split('/')[-1] ) return yaml_parse.load_path(config_file_path)
[ "def", "load_train_file", "(", "config_file_path", ")", ":", "from", "pylearn2", ".", "config", "import", "yaml_parse", "suffix_to_strip", "=", "'.yaml'", "# publish environment variables related to file name", "if", "config_file_path", ".", "endswith", "(", "suffix_to_strip", ")", ":", "config_file_full_stem", "=", "config_file_path", "[", "0", ":", "-", "len", "(", "suffix_to_strip", ")", "]", "else", ":", "config_file_full_stem", "=", "config_file_path", "for", "varname", "in", "[", "\"PYLEARN2_TRAIN_FILE_NAME\"", ",", "#this one is deprecated", "\"PYLEARN2_TRAIN_FILE_FULL_STEM\"", "]", ":", "#this is the new, accepted name", "environ", ".", "putenv", "(", "varname", ",", "config_file_full_stem", ")", "directory", "=", "config_file_path", ".", "split", "(", "'/'", ")", "[", ":", "-", "1", "]", "directory", "=", "'/'", ".", "join", "(", "directory", ")", "if", "directory", "!=", "''", ":", "directory", "+=", "'/'", "environ", ".", "putenv", "(", "\"PYLEARN2_TRAIN_DIR\"", ",", "directory", ")", "environ", ".", "putenv", "(", "\"PYLEARN2_TRAIN_BASE_NAME\"", ",", "config_file_path", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", ")", "environ", ".", "putenv", "(", "\"PYLEARN2_TRAIN_FILE_STEM\"", ",", "config_file_full_stem", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", ")", "return", "yaml_parse", ".", "load_path", "(", "config_file_path", ")" ]
Loads and parses a yaml file for a Train object. Publishes the relevant training environment variables
[ "Loads", "and", "parses", "a", "yaml", "file", "for", "a", "Train", "object", ".", "Publishes", "the", "relevant", "training", "environment", "variables" ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/utils/serial.py#L426-L451
hannes-brt/hebel
hebel/layers/input_dropout.py
InputDropout.feed_forward
def feed_forward(self, input_data, prediction=False): """Propagate forward through the layer **Parameters:** input_data : ``GPUArray`` Inpute data to perform dropout on. prediction : bool, optional Whether to use prediction model. If true, then the data is scaled by ``1 - dropout_probability`` uses dropout. **Returns:** dropout_data : ``GPUArray`` The data after performing dropout. """ if input_data.shape[1] != self.n_in: raise ValueError('Number of outputs from previous layer (%d) ' 'does not match number of inputs to this layer (%d)' % (input_data.shape[1], self.n_in)) if not prediction: dropout_input = gpuarray.empty_like(input_data) dropout_mask = sample_dropout_mask(input_data, self.dropout_probability, target=dropout_input ) return dropout_input, dropout_mask else: return (input_data * (1 - self.dropout_probability),)
python
def feed_forward(self, input_data, prediction=False): """Propagate forward through the layer **Parameters:** input_data : ``GPUArray`` Inpute data to perform dropout on. prediction : bool, optional Whether to use prediction model. If true, then the data is scaled by ``1 - dropout_probability`` uses dropout. **Returns:** dropout_data : ``GPUArray`` The data after performing dropout. """ if input_data.shape[1] != self.n_in: raise ValueError('Number of outputs from previous layer (%d) ' 'does not match number of inputs to this layer (%d)' % (input_data.shape[1], self.n_in)) if not prediction: dropout_input = gpuarray.empty_like(input_data) dropout_mask = sample_dropout_mask(input_data, self.dropout_probability, target=dropout_input ) return dropout_input, dropout_mask else: return (input_data * (1 - self.dropout_probability),)
[ "def", "feed_forward", "(", "self", ",", "input_data", ",", "prediction", "=", "False", ")", ":", "if", "input_data", ".", "shape", "[", "1", "]", "!=", "self", ".", "n_in", ":", "raise", "ValueError", "(", "'Number of outputs from previous layer (%d) '", "'does not match number of inputs to this layer (%d)'", "%", "(", "input_data", ".", "shape", "[", "1", "]", ",", "self", ".", "n_in", ")", ")", "if", "not", "prediction", ":", "dropout_input", "=", "gpuarray", ".", "empty_like", "(", "input_data", ")", "dropout_mask", "=", "sample_dropout_mask", "(", "input_data", ",", "self", ".", "dropout_probability", ",", "target", "=", "dropout_input", ")", "return", "dropout_input", ",", "dropout_mask", "else", ":", "return", "(", "input_data", "*", "(", "1", "-", "self", ".", "dropout_probability", ")", ",", ")" ]
Propagate forward through the layer **Parameters:** input_data : ``GPUArray`` Inpute data to perform dropout on. prediction : bool, optional Whether to use prediction model. If true, then the data is scaled by ``1 - dropout_probability`` uses dropout. **Returns:** dropout_data : ``GPUArray`` The data after performing dropout.
[ "Propagate", "forward", "through", "the", "layer" ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/layers/input_dropout.py#L59-L89
hannes-brt/hebel
hebel/layers/input_dropout.py
InputDropout.backprop
def backprop(self, input_data, df_output, cache=None): """ Backpropagate through the hidden layer **Parameters:** input_data : ``GPUArray`` Inpute data to perform dropout on. df_output : ``GPUArray`` Gradients with respect to the output of this layer (received from the layer above). cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : empty tuple Gradients are empty since this layer has no parameters. df_input : ``GPUArray`` Gradients with respect to the input. """ if self.compute_input_gradients: apply_dropout_mask(df_output, dropout_mask) return tuple(), df_output
python
def backprop(self, input_data, df_output, cache=None): """ Backpropagate through the hidden layer **Parameters:** input_data : ``GPUArray`` Inpute data to perform dropout on. df_output : ``GPUArray`` Gradients with respect to the output of this layer (received from the layer above). cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : empty tuple Gradients are empty since this layer has no parameters. df_input : ``GPUArray`` Gradients with respect to the input. """ if self.compute_input_gradients: apply_dropout_mask(df_output, dropout_mask) return tuple(), df_output
[ "def", "backprop", "(", "self", ",", "input_data", ",", "df_output", ",", "cache", "=", "None", ")", ":", "if", "self", ".", "compute_input_gradients", ":", "apply_dropout_mask", "(", "df_output", ",", "dropout_mask", ")", "return", "tuple", "(", ")", ",", "df_output" ]
Backpropagate through the hidden layer **Parameters:** input_data : ``GPUArray`` Inpute data to perform dropout on. df_output : ``GPUArray`` Gradients with respect to the output of this layer (received from the layer above). cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : empty tuple Gradients are empty since this layer has no parameters. df_input : ``GPUArray`` Gradients with respect to the input.
[ "Backpropagate", "through", "the", "hidden", "layer" ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/layers/input_dropout.py#L91-L119
hannes-brt/hebel
hebel/pycuda_ops/cudart.py
POINTER
def POINTER(obj): """ Create ctypes pointer to object. Notes ----- This function converts None to a real NULL pointer because of bug in how ctypes handles None on 64-bit platforms. """ p = ctypes.POINTER(obj) if not isinstance(p.from_param, classmethod): def from_param(cls, x): if x is None: return cls() else: return x p.from_param = classmethod(from_param) return p
python
def POINTER(obj): """ Create ctypes pointer to object. Notes ----- This function converts None to a real NULL pointer because of bug in how ctypes handles None on 64-bit platforms. """ p = ctypes.POINTER(obj) if not isinstance(p.from_param, classmethod): def from_param(cls, x): if x is None: return cls() else: return x p.from_param = classmethod(from_param) return p
[ "def", "POINTER", "(", "obj", ")", ":", "p", "=", "ctypes", ".", "POINTER", "(", "obj", ")", "if", "not", "isinstance", "(", "p", ".", "from_param", ",", "classmethod", ")", ":", "def", "from_param", "(", "cls", ",", "x", ")", ":", "if", "x", "is", "None", ":", "return", "cls", "(", ")", "else", ":", "return", "x", "p", ".", "from_param", "=", "classmethod", "(", "from_param", ")", "return", "p" ]
Create ctypes pointer to object. Notes ----- This function converts None to a real NULL pointer because of bug in how ctypes handles None on 64-bit platforms.
[ "Create", "ctypes", "pointer", "to", "object", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cudart.py#L63-L83
hannes-brt/hebel
hebel/pycuda_ops/cudart.py
gpuarray_ptr
def gpuarray_ptr(g): """ Return ctypes pointer to data in GPUAarray object. """ addr = int(g.gpudata) if g.dtype == np.int8: return ctypes.cast(addr, POINTER(ctypes.c_byte)) if g.dtype == np.uint8: return ctypes.cast(addr, POINTER(ctypes.c_ubyte)) if g.dtype == np.int16: return ctypes.cast(addr, POINTER(ctypes.c_short)) if g.dtype == np.uint16: return ctypes.cast(addr, POINTER(ctypes.c_ushort)) if g.dtype == np.int32: return ctypes.cast(addr, POINTER(ctypes.c_int)) if g.dtype == np.uint32: return ctypes.cast(addr, POINTER(ctypes.c_uint)) if g.dtype == np.int64: return ctypes.cast(addr, POINTER(ctypes.c_long)) if g.dtype == np.uint64: return ctypes.cast(addr, POINTER(ctypes.c_ulong)) if g.dtype == np.float32: return ctypes.cast(addr, POINTER(ctypes.c_float)) elif g.dtype == np.float64: return ctypes.cast(addr, POINTER(ctypes.c_double)) elif g.dtype == np.complex64: return ctypes.cast(addr, POINTER(cuFloatComplex)) elif g.dtype == np.complex128: return ctypes.cast(addr, POINTER(cuDoubleComplex)) else: raise ValueError('unrecognized type')
python
def gpuarray_ptr(g): """ Return ctypes pointer to data in GPUAarray object. """ addr = int(g.gpudata) if g.dtype == np.int8: return ctypes.cast(addr, POINTER(ctypes.c_byte)) if g.dtype == np.uint8: return ctypes.cast(addr, POINTER(ctypes.c_ubyte)) if g.dtype == np.int16: return ctypes.cast(addr, POINTER(ctypes.c_short)) if g.dtype == np.uint16: return ctypes.cast(addr, POINTER(ctypes.c_ushort)) if g.dtype == np.int32: return ctypes.cast(addr, POINTER(ctypes.c_int)) if g.dtype == np.uint32: return ctypes.cast(addr, POINTER(ctypes.c_uint)) if g.dtype == np.int64: return ctypes.cast(addr, POINTER(ctypes.c_long)) if g.dtype == np.uint64: return ctypes.cast(addr, POINTER(ctypes.c_ulong)) if g.dtype == np.float32: return ctypes.cast(addr, POINTER(ctypes.c_float)) elif g.dtype == np.float64: return ctypes.cast(addr, POINTER(ctypes.c_double)) elif g.dtype == np.complex64: return ctypes.cast(addr, POINTER(cuFloatComplex)) elif g.dtype == np.complex128: return ctypes.cast(addr, POINTER(cuDoubleComplex)) else: raise ValueError('unrecognized type')
[ "def", "gpuarray_ptr", "(", "g", ")", ":", "addr", "=", "int", "(", "g", ".", "gpudata", ")", "if", "g", ".", "dtype", "==", "np", ".", "int8", ":", "return", "ctypes", ".", "cast", "(", "addr", ",", "POINTER", "(", "ctypes", ".", "c_byte", ")", ")", "if", "g", ".", "dtype", "==", "np", ".", "uint8", ":", "return", "ctypes", ".", "cast", "(", "addr", ",", "POINTER", "(", "ctypes", ".", "c_ubyte", ")", ")", "if", "g", ".", "dtype", "==", "np", ".", "int16", ":", "return", "ctypes", ".", "cast", "(", "addr", ",", "POINTER", "(", "ctypes", ".", "c_short", ")", ")", "if", "g", ".", "dtype", "==", "np", ".", "uint16", ":", "return", "ctypes", ".", "cast", "(", "addr", ",", "POINTER", "(", "ctypes", ".", "c_ushort", ")", ")", "if", "g", ".", "dtype", "==", "np", ".", "int32", ":", "return", "ctypes", ".", "cast", "(", "addr", ",", "POINTER", "(", "ctypes", ".", "c_int", ")", ")", "if", "g", ".", "dtype", "==", "np", ".", "uint32", ":", "return", "ctypes", ".", "cast", "(", "addr", ",", "POINTER", "(", "ctypes", ".", "c_uint", ")", ")", "if", "g", ".", "dtype", "==", "np", ".", "int64", ":", "return", "ctypes", ".", "cast", "(", "addr", ",", "POINTER", "(", "ctypes", ".", "c_long", ")", ")", "if", "g", ".", "dtype", "==", "np", ".", "uint64", ":", "return", "ctypes", ".", "cast", "(", "addr", ",", "POINTER", "(", "ctypes", ".", "c_ulong", ")", ")", "if", "g", ".", "dtype", "==", "np", ".", "float32", ":", "return", "ctypes", ".", "cast", "(", "addr", ",", "POINTER", "(", "ctypes", ".", "c_float", ")", ")", "elif", "g", ".", "dtype", "==", "np", ".", "float64", ":", "return", "ctypes", ".", "cast", "(", "addr", ",", "POINTER", "(", "ctypes", ".", "c_double", ")", ")", "elif", "g", ".", "dtype", "==", "np", ".", "complex64", ":", "return", "ctypes", ".", "cast", "(", "addr", ",", "POINTER", "(", "cuFloatComplex", ")", ")", "elif", "g", ".", "dtype", "==", "np", ".", "complex128", ":", "return", "ctypes", ".", "cast", "(", "addr", ",", "POINTER", "(", "cuDoubleComplex", ")", ")", "else", ":", "raise", "ValueError", "(", "'unrecognized type'", ")" ]
Return ctypes pointer to data in GPUAarray object.
[ "Return", "ctypes", "pointer", "to", "data", "in", "GPUAarray", "object", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cudart.py#L108-L140
hannes-brt/hebel
hebel/pycuda_ops/cudart.py
cudaMalloc
def cudaMalloc(count, ctype=None): """ Allocate device memory. Allocate memory on the device associated with the current active context. Parameters ---------- count : int Number of bytes of memory to allocate ctype : _ctypes.SimpleType, optional ctypes type to cast returned pointer. Returns ------- ptr : ctypes pointer Pointer to allocated device memory. """ ptr = ctypes.c_void_p() status = _libcudart.cudaMalloc(ctypes.byref(ptr), count) cudaCheckStatus(status) if ctype != None: ptr = ctypes.cast(ptr, ctypes.POINTER(ctype)) return ptr
python
def cudaMalloc(count, ctype=None): """ Allocate device memory. Allocate memory on the device associated with the current active context. Parameters ---------- count : int Number of bytes of memory to allocate ctype : _ctypes.SimpleType, optional ctypes type to cast returned pointer. Returns ------- ptr : ctypes pointer Pointer to allocated device memory. """ ptr = ctypes.c_void_p() status = _libcudart.cudaMalloc(ctypes.byref(ptr), count) cudaCheckStatus(status) if ctype != None: ptr = ctypes.cast(ptr, ctypes.POINTER(ctype)) return ptr
[ "def", "cudaMalloc", "(", "count", ",", "ctype", "=", "None", ")", ":", "ptr", "=", "ctypes", ".", "c_void_p", "(", ")", "status", "=", "_libcudart", ".", "cudaMalloc", "(", "ctypes", ".", "byref", "(", "ptr", ")", ",", "count", ")", "cudaCheckStatus", "(", "status", ")", "if", "ctype", "!=", "None", ":", "ptr", "=", "ctypes", ".", "cast", "(", "ptr", ",", "ctypes", ".", "POINTER", "(", "ctype", ")", ")", "return", "ptr" ]
Allocate device memory. Allocate memory on the device associated with the current active context. Parameters ---------- count : int Number of bytes of memory to allocate ctype : _ctypes.SimpleType, optional ctypes type to cast returned pointer. Returns ------- ptr : ctypes pointer Pointer to allocated device memory.
[ "Allocate", "device", "memory", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cudart.py#L485-L511
hannes-brt/hebel
hebel/pycuda_ops/cudart.py
cudaMallocPitch
def cudaMallocPitch(pitch, rows, cols, elesize): """ Allocate pitched device memory. Allocate pitched memory on the device associated with the current active context. Parameters ---------- pitch : int Pitch for allocation. rows : int Requested pitched allocation height. cols : int Requested pitched allocation width. elesize : int Size of memory element. Returns ------- ptr : ctypes pointer Pointer to allocated device memory. """ ptr = ctypes.c_void_p() status = _libcudart.cudaMallocPitch(ctypes.byref(ptr), ctypes.c_size_t(pitch), cols*elesize, rows) cudaCheckStatus(status) return ptr, pitch
python
def cudaMallocPitch(pitch, rows, cols, elesize): """ Allocate pitched device memory. Allocate pitched memory on the device associated with the current active context. Parameters ---------- pitch : int Pitch for allocation. rows : int Requested pitched allocation height. cols : int Requested pitched allocation width. elesize : int Size of memory element. Returns ------- ptr : ctypes pointer Pointer to allocated device memory. """ ptr = ctypes.c_void_p() status = _libcudart.cudaMallocPitch(ctypes.byref(ptr), ctypes.c_size_t(pitch), cols*elesize, rows) cudaCheckStatus(status) return ptr, pitch
[ "def", "cudaMallocPitch", "(", "pitch", ",", "rows", ",", "cols", ",", "elesize", ")", ":", "ptr", "=", "ctypes", ".", "c_void_p", "(", ")", "status", "=", "_libcudart", ".", "cudaMallocPitch", "(", "ctypes", ".", "byref", "(", "ptr", ")", ",", "ctypes", ".", "c_size_t", "(", "pitch", ")", ",", "cols", "*", "elesize", ",", "rows", ")", "cudaCheckStatus", "(", "status", ")", "return", "ptr", ",", "pitch" ]
Allocate pitched device memory. Allocate pitched memory on the device associated with the current active context. Parameters ---------- pitch : int Pitch for allocation. rows : int Requested pitched allocation height. cols : int Requested pitched allocation width. elesize : int Size of memory element. Returns ------- ptr : ctypes pointer Pointer to allocated device memory.
[ "Allocate", "pitched", "device", "memory", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cudart.py#L536-L566
hannes-brt/hebel
hebel/pycuda_ops/cudart.py
cudaMemcpy_htod
def cudaMemcpy_htod(dst, src, count): """ Copy memory from host to device. Copy data from host memory to device memory. Parameters ---------- dst : ctypes pointer Device memory pointer. src : ctypes pointer Host memory pointer. count : int Number of bytes to copy. """ status = _libcudart.cudaMemcpy(dst, src, ctypes.c_size_t(count), cudaMemcpyHostToDevice) cudaCheckStatus(status)
python
def cudaMemcpy_htod(dst, src, count): """ Copy memory from host to device. Copy data from host memory to device memory. Parameters ---------- dst : ctypes pointer Device memory pointer. src : ctypes pointer Host memory pointer. count : int Number of bytes to copy. """ status = _libcudart.cudaMemcpy(dst, src, ctypes.c_size_t(count), cudaMemcpyHostToDevice) cudaCheckStatus(status)
[ "def", "cudaMemcpy_htod", "(", "dst", ",", "src", ",", "count", ")", ":", "status", "=", "_libcudart", ".", "cudaMemcpy", "(", "dst", ",", "src", ",", "ctypes", ".", "c_size_t", "(", "count", ")", ",", "cudaMemcpyHostToDevice", ")", "cudaCheckStatus", "(", "status", ")" ]
Copy memory from host to device. Copy data from host memory to device memory. Parameters ---------- dst : ctypes pointer Device memory pointer. src : ctypes pointer Host memory pointer. count : int Number of bytes to copy.
[ "Copy", "memory", "from", "host", "to", "device", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cudart.py#L578-L598
hannes-brt/hebel
hebel/pycuda_ops/cudart.py
cudaMemcpy_dtoh
def cudaMemcpy_dtoh(dst, src, count): """ Copy memory from device to host. Copy data from device memory to host memory. Parameters ---------- dst : ctypes pointer Host memory pointer. src : ctypes pointer Device memory pointer. count : int Number of bytes to copy. """ status = _libcudart.cudaMemcpy(dst, src, ctypes.c_size_t(count), cudaMemcpyDeviceToHost) cudaCheckStatus(status)
python
def cudaMemcpy_dtoh(dst, src, count): """ Copy memory from device to host. Copy data from device memory to host memory. Parameters ---------- dst : ctypes pointer Host memory pointer. src : ctypes pointer Device memory pointer. count : int Number of bytes to copy. """ status = _libcudart.cudaMemcpy(dst, src, ctypes.c_size_t(count), cudaMemcpyDeviceToHost) cudaCheckStatus(status)
[ "def", "cudaMemcpy_dtoh", "(", "dst", ",", "src", ",", "count", ")", ":", "status", "=", "_libcudart", ".", "cudaMemcpy", "(", "dst", ",", "src", ",", "ctypes", ".", "c_size_t", "(", "count", ")", ",", "cudaMemcpyDeviceToHost", ")", "cudaCheckStatus", "(", "status", ")" ]
Copy memory from device to host. Copy data from device memory to host memory. Parameters ---------- dst : ctypes pointer Host memory pointer. src : ctypes pointer Device memory pointer. count : int Number of bytes to copy.
[ "Copy", "memory", "from", "device", "to", "host", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cudart.py#L600-L620
hannes-brt/hebel
hebel/pycuda_ops/cudart.py
cudaMemGetInfo
def cudaMemGetInfo(): """ Return the amount of free and total device memory. Returns ------- free : long Free memory in bytes. total : long Total memory in bytes. """ free = ctypes.c_size_t() total = ctypes.c_size_t() status = _libcudart.cudaMemGetInfo(ctypes.byref(free), ctypes.byref(total)) cudaCheckStatus(status) return free.value, total.value
python
def cudaMemGetInfo(): """ Return the amount of free and total device memory. Returns ------- free : long Free memory in bytes. total : long Total memory in bytes. """ free = ctypes.c_size_t() total = ctypes.c_size_t() status = _libcudart.cudaMemGetInfo(ctypes.byref(free), ctypes.byref(total)) cudaCheckStatus(status) return free.value, total.value
[ "def", "cudaMemGetInfo", "(", ")", ":", "free", "=", "ctypes", ".", "c_size_t", "(", ")", "total", "=", "ctypes", ".", "c_size_t", "(", ")", "status", "=", "_libcudart", ".", "cudaMemGetInfo", "(", "ctypes", ".", "byref", "(", "free", ")", ",", "ctypes", ".", "byref", "(", "total", ")", ")", "cudaCheckStatus", "(", "status", ")", "return", "free", ".", "value", ",", "total", ".", "value" ]
Return the amount of free and total device memory. Returns ------- free : long Free memory in bytes. total : long Total memory in bytes.
[ "Return", "the", "amount", "of", "free", "and", "total", "device", "memory", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cudart.py#L625-L643
hannes-brt/hebel
hebel/pycuda_ops/cudart.py
cudaGetDevice
def cudaGetDevice(): """ Get current CUDA device. Return the identifying number of the device currently used to process CUDA operations. Returns ------- dev : int Device number. """ dev = ctypes.c_int() status = _libcudart.cudaGetDevice(ctypes.byref(dev)) cudaCheckStatus(status) return dev.value
python
def cudaGetDevice(): """ Get current CUDA device. Return the identifying number of the device currently used to process CUDA operations. Returns ------- dev : int Device number. """ dev = ctypes.c_int() status = _libcudart.cudaGetDevice(ctypes.byref(dev)) cudaCheckStatus(status) return dev.value
[ "def", "cudaGetDevice", "(", ")", ":", "dev", "=", "ctypes", ".", "c_int", "(", ")", "status", "=", "_libcudart", ".", "cudaGetDevice", "(", "ctypes", ".", "byref", "(", "dev", ")", ")", "cudaCheckStatus", "(", "status", ")", "return", "dev", ".", "value" ]
Get current CUDA device. Return the identifying number of the device currently used to process CUDA operations. Returns ------- dev : int Device number.
[ "Get", "current", "CUDA", "device", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cudart.py#L665-L682
hannes-brt/hebel
hebel/pycuda_ops/cudart.py
cudaDriverGetVersion
def cudaDriverGetVersion(): """ Get installed CUDA driver version. Return the version of the installed CUDA driver as an integer. If no driver is detected, 0 is returned. Returns ------- version : int Driver version. """ version = ctypes.c_int() status = _libcudart.cudaDriverGetVersion(ctypes.byref(version)) cudaCheckStatus(status) return version.value
python
def cudaDriverGetVersion(): """ Get installed CUDA driver version. Return the version of the installed CUDA driver as an integer. If no driver is detected, 0 is returned. Returns ------- version : int Driver version. """ version = ctypes.c_int() status = _libcudart.cudaDriverGetVersion(ctypes.byref(version)) cudaCheckStatus(status) return version.value
[ "def", "cudaDriverGetVersion", "(", ")", ":", "version", "=", "ctypes", ".", "c_int", "(", ")", "status", "=", "_libcudart", ".", "cudaDriverGetVersion", "(", "ctypes", ".", "byref", "(", "version", ")", ")", "cudaCheckStatus", "(", "status", ")", "return", "version", ".", "value" ]
Get installed CUDA driver version. Return the version of the installed CUDA driver as an integer. If no driver is detected, 0 is returned. Returns ------- version : int Driver version.
[ "Get", "installed", "CUDA", "driver", "version", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cudart.py#L686-L703
hannes-brt/hebel
hebel/pycuda_ops/cudart.py
cudaPointerGetAttributes
def cudaPointerGetAttributes(ptr): """ Get memory pointer attributes. Returns attributes of the specified pointer. Parameters ---------- ptr : ctypes pointer Memory pointer to examine. Returns ------- memory_type : int Memory type; 1 indicates host memory, 2 indicates device memory. device : int Number of device associated with pointer. Notes ----- This function only works with CUDA 4.0 and later. """ attributes = cudaPointerAttributes() status = \ _libcudart.cudaPointerGetAttributes(ctypes.byref(attributes), ptr) cudaCheckStatus(status) return attributes.memoryType, attributes.device
python
def cudaPointerGetAttributes(ptr): """ Get memory pointer attributes. Returns attributes of the specified pointer. Parameters ---------- ptr : ctypes pointer Memory pointer to examine. Returns ------- memory_type : int Memory type; 1 indicates host memory, 2 indicates device memory. device : int Number of device associated with pointer. Notes ----- This function only works with CUDA 4.0 and later. """ attributes = cudaPointerAttributes() status = \ _libcudart.cudaPointerGetAttributes(ctypes.byref(attributes), ptr) cudaCheckStatus(status) return attributes.memoryType, attributes.device
[ "def", "cudaPointerGetAttributes", "(", "ptr", ")", ":", "attributes", "=", "cudaPointerAttributes", "(", ")", "status", "=", "_libcudart", ".", "cudaPointerGetAttributes", "(", "ctypes", ".", "byref", "(", "attributes", ")", ",", "ptr", ")", "cudaCheckStatus", "(", "status", ")", "return", "attributes", ".", "memoryType", ",", "attributes", ".", "device" ]
Get memory pointer attributes. Returns attributes of the specified pointer. Parameters ---------- ptr : ctypes pointer Memory pointer to examine. Returns ------- memory_type : int Memory type; 1 indicates host memory, 2 indicates device memory. device : int Number of device associated with pointer. Notes ----- This function only works with CUDA 4.0 and later.
[ "Get", "memory", "pointer", "attributes", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cudart.py#L720-L749
rix0rrr/gcl
gcl/framework.py
eval
def eval(thunk, env): """Evaluate a thunk in an environment. Will defer the actual evaluation to the thunk itself, but adds two things: caching and recursion detection. Since we have to use a global evaluation stack (because there is a variety of functions that may be invoked, not just eval() but also __getitem__, and not all of them can pass along a context object), GCL evaluation is not thread safe. With regard to schemas: - A schema can be passed in from outside. The returned object will be validated to see that it conforms to the schema. The schema will be attached to the value if possible. - Some objects may contain their own schema, such as tuples. This would be out of scope of the eval() function, were it not for: - Schema validation can be disabled in an evaluation call stack. This is useful if we're evaluating a tuple only for its schema information. At that point, we're not interested if the object is value-complete. """ key = Activation.key(thunk, env) if Activation.activated(key): raise exceptions.RecursionError('Reference cycle') with Activation(key): return eval_cache.get(key, thunk.eval, env)
python
def eval(thunk, env): """Evaluate a thunk in an environment. Will defer the actual evaluation to the thunk itself, but adds two things: caching and recursion detection. Since we have to use a global evaluation stack (because there is a variety of functions that may be invoked, not just eval() but also __getitem__, and not all of them can pass along a context object), GCL evaluation is not thread safe. With regard to schemas: - A schema can be passed in from outside. The returned object will be validated to see that it conforms to the schema. The schema will be attached to the value if possible. - Some objects may contain their own schema, such as tuples. This would be out of scope of the eval() function, were it not for: - Schema validation can be disabled in an evaluation call stack. This is useful if we're evaluating a tuple only for its schema information. At that point, we're not interested if the object is value-complete. """ key = Activation.key(thunk, env) if Activation.activated(key): raise exceptions.RecursionError('Reference cycle') with Activation(key): return eval_cache.get(key, thunk.eval, env)
[ "def", "eval", "(", "thunk", ",", "env", ")", ":", "key", "=", "Activation", ".", "key", "(", "thunk", ",", "env", ")", "if", "Activation", ".", "activated", "(", "key", ")", ":", "raise", "exceptions", ".", "RecursionError", "(", "'Reference cycle'", ")", "with", "Activation", "(", "key", ")", ":", "return", "eval_cache", ".", "get", "(", "key", ",", "thunk", ".", "eval", ",", "env", ")" ]
Evaluate a thunk in an environment. Will defer the actual evaluation to the thunk itself, but adds two things: caching and recursion detection. Since we have to use a global evaluation stack (because there is a variety of functions that may be invoked, not just eval() but also __getitem__, and not all of them can pass along a context object), GCL evaluation is not thread safe. With regard to schemas: - A schema can be passed in from outside. The returned object will be validated to see that it conforms to the schema. The schema will be attached to the value if possible. - Some objects may contain their own schema, such as tuples. This would be out of scope of the eval() function, were it not for: - Schema validation can be disabled in an evaluation call stack. This is useful if we're evaluating a tuple only for its schema information. At that point, we're not interested if the object is value-complete.
[ "Evaluate", "a", "thunk", "in", "an", "environment", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/framework.py#L30-L55
rix0rrr/gcl
gcl/framework.py
Environment.get_node
def get_node(self, key): """Delegate to our current "value provider" for the node belonging to this key.""" if key in self.names: return self.values.get_member_node(key) if hasattr(self.values, 'get_member_node') else None return self.parent.get_node(key)
python
def get_node(self, key): """Delegate to our current "value provider" for the node belonging to this key.""" if key in self.names: return self.values.get_member_node(key) if hasattr(self.values, 'get_member_node') else None return self.parent.get_node(key)
[ "def", "get_node", "(", "self", ",", "key", ")", ":", "if", "key", "in", "self", ".", "names", ":", "return", "self", ".", "values", ".", "get_member_node", "(", "key", ")", "if", "hasattr", "(", "self", ".", "values", ",", "'get_member_node'", ")", "else", "None", "return", "self", ".", "parent", ".", "get_node", "(", "key", ")" ]
Delegate to our current "value provider" for the node belonging to this key.
[ "Delegate", "to", "our", "current", "value", "provider", "for", "the", "node", "belonging", "to", "this", "key", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/framework.py#L170-L174
erm0l0v/django-fake-model
django_fake_model/models.py
FakeModel.create_table
def create_table(cls): """ create_table Manually create a temporary table for model in test data base. :return: """ schema_editor = getattr(connection, 'schema_editor', None) if schema_editor: with schema_editor() as schema_editor: schema_editor.create_model(cls) else: raw_sql, _ = connection.creation.sql_create_model( cls, no_style(), []) cls.delete_table() cursor = connection.cursor() try: cursor.execute(*raw_sql) finally: cursor.close()
python
def create_table(cls): """ create_table Manually create a temporary table for model in test data base. :return: """ schema_editor = getattr(connection, 'schema_editor', None) if schema_editor: with schema_editor() as schema_editor: schema_editor.create_model(cls) else: raw_sql, _ = connection.creation.sql_create_model( cls, no_style(), []) cls.delete_table() cursor = connection.cursor() try: cursor.execute(*raw_sql) finally: cursor.close()
[ "def", "create_table", "(", "cls", ")", ":", "schema_editor", "=", "getattr", "(", "connection", ",", "'schema_editor'", ",", "None", ")", "if", "schema_editor", ":", "with", "schema_editor", "(", ")", "as", "schema_editor", ":", "schema_editor", ".", "create_model", "(", "cls", ")", "else", ":", "raw_sql", ",", "_", "=", "connection", ".", "creation", ".", "sql_create_model", "(", "cls", ",", "no_style", "(", ")", ",", "[", "]", ")", "cls", ".", "delete_table", "(", ")", "cursor", "=", "connection", ".", "cursor", "(", ")", "try", ":", "cursor", ".", "execute", "(", "*", "raw_sql", ")", "finally", ":", "cursor", ".", "close", "(", ")" ]
create_table Manually create a temporary table for model in test data base. :return:
[ "create_table" ]
train
https://github.com/erm0l0v/django-fake-model/blob/42fb28ac3aa4db5f82b6cb97a7c2a92b83b36314/django_fake_model/models.py#L22-L43
erm0l0v/django-fake-model
django_fake_model/models.py
FakeModel.delete_table
def delete_table(cls): """ delete_table Manually delete a temporary table for model in test data base. :return: """ schema_editor = getattr(connection, 'schema_editor', None) if schema_editor: with connection.schema_editor() as schema_editor: schema_editor.delete_model(cls) else: cursor = connection.cursor() try: with warnings.catch_warnings(): warnings.filterwarnings('ignore', 'unknown table') cursor.execute('DROP TABLE IF EXISTS {0}'.format(cls._meta.db_table)) finally: cursor.close()
python
def delete_table(cls): """ delete_table Manually delete a temporary table for model in test data base. :return: """ schema_editor = getattr(connection, 'schema_editor', None) if schema_editor: with connection.schema_editor() as schema_editor: schema_editor.delete_model(cls) else: cursor = connection.cursor() try: with warnings.catch_warnings(): warnings.filterwarnings('ignore', 'unknown table') cursor.execute('DROP TABLE IF EXISTS {0}'.format(cls._meta.db_table)) finally: cursor.close()
[ "def", "delete_table", "(", "cls", ")", ":", "schema_editor", "=", "getattr", "(", "connection", ",", "'schema_editor'", ",", "None", ")", "if", "schema_editor", ":", "with", "connection", ".", "schema_editor", "(", ")", "as", "schema_editor", ":", "schema_editor", ".", "delete_model", "(", "cls", ")", "else", ":", "cursor", "=", "connection", ".", "cursor", "(", ")", "try", ":", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "filterwarnings", "(", "'ignore'", ",", "'unknown table'", ")", "cursor", ".", "execute", "(", "'DROP TABLE IF EXISTS {0}'", ".", "format", "(", "cls", ".", "_meta", ".", "db_table", ")", ")", "finally", ":", "cursor", ".", "close", "(", ")" ]
delete_table Manually delete a temporary table for model in test data base. :return:
[ "delete_table" ]
train
https://github.com/erm0l0v/django-fake-model/blob/42fb28ac3aa4db5f82b6cb97a7c2a92b83b36314/django_fake_model/models.py#L46-L64
erm0l0v/django-fake-model
django_fake_model/models.py
FakeModel.fake_me
def fake_me(cls, source): """ fake_me Class or method decorator Class decorator: create temporary table for all tests in SimpleTestCase. Method decorator: create temporary model only for given test method. :param source: SimpleTestCase or test function :return: """ if source and type(source) == type and issubclass(source, SimpleTestCase): return cls._class_extension(source) elif hasattr(source, '__call__'): return cls._decorator(source) else: raise AttributeError('source - must be a SimpleTestCase subclass of function')
python
def fake_me(cls, source): """ fake_me Class or method decorator Class decorator: create temporary table for all tests in SimpleTestCase. Method decorator: create temporary model only for given test method. :param source: SimpleTestCase or test function :return: """ if source and type(source) == type and issubclass(source, SimpleTestCase): return cls._class_extension(source) elif hasattr(source, '__call__'): return cls._decorator(source) else: raise AttributeError('source - must be a SimpleTestCase subclass of function')
[ "def", "fake_me", "(", "cls", ",", "source", ")", ":", "if", "source", "and", "type", "(", "source", ")", "==", "type", "and", "issubclass", "(", "source", ",", "SimpleTestCase", ")", ":", "return", "cls", ".", "_class_extension", "(", "source", ")", "elif", "hasattr", "(", "source", ",", "'__call__'", ")", ":", "return", "cls", ".", "_decorator", "(", "source", ")", "else", ":", "raise", "AttributeError", "(", "'source - must be a SimpleTestCase subclass of function'", ")" ]
fake_me Class or method decorator Class decorator: create temporary table for all tests in SimpleTestCase. Method decorator: create temporary model only for given test method. :param source: SimpleTestCase or test function :return:
[ "fake_me" ]
train
https://github.com/erm0l0v/django-fake-model/blob/42fb28ac3aa4db5f82b6cb97a7c2a92b83b36314/django_fake_model/models.py#L67-L83
obspy/vcr
vcr/core.py
vcr
def vcr(decorated_func=None, debug=False, overwrite=False, disabled=False, playback_only=False, tape_name=None): """ Decorator for capturing and simulating network communication ``debug`` : bool, optional Enables debug mode. ``overwrite`` : bool, optional Will run vcr in recording mode - overwrites any existing vcrtapes. ``playback_only`` : bool, optional Will run vcr in playback mode - will not create missing vcrtapes. ``disabled`` : bool, optional Completely disables vcr - same effect as removing the decorator. ``tape_name`` : str, optional Use given custom file name instead of an auto-generated name for the tape file. """ def _vcr_outer(func): """ Wrapper around _vcr_inner allowing optional arguments on decorator """ def _vcr_inner(*args, **kwargs): """ The actual decorator doing a lot of monkey patching and auto magic """ if disabled or VCRSystem.disabled: # execute decorated function without VCR return func(*args, **kwargs) # prepare VCR tape if func.__module__ == 'doctest': source_filename = func.__self__._dt_test.filename file_name = os.path.splitext( os.path.basename(source_filename))[0] # check if a tests directory exists path = os.path.join(os.path.dirname(source_filename), 'tests') if os.path.exists(path): # ./test/vcrtapes/tape_name.vcr path = os.path.join(os.path.dirname(source_filename), 'tests', 'vcrtapes') else: # ./vcrtapes/tape_name.vcr path = os.path.join(os.path.dirname(source_filename), 'vcrtapes') func_name = func.__self__._dt_test.name.split('.')[-1] else: source_filename = func.__code__.co_filename file_name = os.path.splitext( os.path.basename(source_filename))[0] path = os.path.join( os.path.dirname(source_filename), 'vcrtapes') func_name = func.__name__ if tape_name: # tape file name is given - either full path is given or use # 'vcrtapes' directory if os.sep in tape_name: temp = os.path.abspath(tape_name) path = os.path.dirname(temp) if not os.path.isdir(path): os.makedirs(path) tape = os.path.join(path, '%s' % (tape_name)) else: # make sure 'vcrtapes' directory exists if not os.path.isdir(path): os.makedirs(path) # auto-generated file name tape = os.path.join(path, '%s.%s.vcr' % (file_name, func_name)) # enable VCR with VCRSystem(debug=debug): # check for tape file and determine mode if not (playback_only or VCRSystem.playback_only) and ( not os.path.isfile(tape) or overwrite or VCRSystem.overwrite): # record mode if PY2: msg = 'VCR records only in PY3 to be backward ' + \ 'compatible with PY2 - skipping VCR ' + \ 'mechanics for %s' warnings.warn(msg % (func.__name__)) # disable VCR VCRSystem.stop() # execute decorated function without VCR return func(*args, **kwargs) if VCRSystem.debug: print('\nVCR RECORDING (%s) ...' % (func_name)) VCRSystem.status = VCR_RECORD # execute decorated function value = func(*args, **kwargs) # check if vcr is actually used at all if len(VCRSystem.playlist) == 0: msg = 'no socket activity - @vcr unneeded for %s' msg = msg % (func.__name__) if VCRSystem.raise_if_not_needed: raise Exception(msg) else: warnings.warn(msg) else: # remove existing tape try: os.remove(tape) except OSError: pass # write playlist to file with gzip.open(tape, 'wb') as fh: pickle.dump(VCRSystem.playlist, fh, protocol=2) else: # playback mode if VCRSystem.debug: print('\nVCR PLAYBACK (%s) ...' % (func_name)) VCRSystem.status = VCR_PLAYBACK # if playback is requested and tape is missing: raise! if not os.path.exists(tape): msg = 'Missing VCR tape file for playback: {}' raise IOError(msg.format(tape)) # load playlist try: with gzip.open(tape, 'rb') as fh: VCRSystem.playlist = pickle.load(fh) except OSError: # support for older uncompressed tapes with open(tape, 'rb') as fh: VCRSystem.playlist = pickle.load(fh) if VCRSystem.debug: print('Loaded playlist:') for i, item in enumerate(VCRSystem.playlist): print('{:3d}: {} {} {}'.format(i, *item)) print() # execute decorated function value = func(*args, **kwargs) return value return _vcr_inner if decorated_func is None: # without arguments return _vcr_outer else: # with arguments return _vcr_outer(decorated_func)
python
def vcr(decorated_func=None, debug=False, overwrite=False, disabled=False, playback_only=False, tape_name=None): """ Decorator for capturing and simulating network communication ``debug`` : bool, optional Enables debug mode. ``overwrite`` : bool, optional Will run vcr in recording mode - overwrites any existing vcrtapes. ``playback_only`` : bool, optional Will run vcr in playback mode - will not create missing vcrtapes. ``disabled`` : bool, optional Completely disables vcr - same effect as removing the decorator. ``tape_name`` : str, optional Use given custom file name instead of an auto-generated name for the tape file. """ def _vcr_outer(func): """ Wrapper around _vcr_inner allowing optional arguments on decorator """ def _vcr_inner(*args, **kwargs): """ The actual decorator doing a lot of monkey patching and auto magic """ if disabled or VCRSystem.disabled: # execute decorated function without VCR return func(*args, **kwargs) # prepare VCR tape if func.__module__ == 'doctest': source_filename = func.__self__._dt_test.filename file_name = os.path.splitext( os.path.basename(source_filename))[0] # check if a tests directory exists path = os.path.join(os.path.dirname(source_filename), 'tests') if os.path.exists(path): # ./test/vcrtapes/tape_name.vcr path = os.path.join(os.path.dirname(source_filename), 'tests', 'vcrtapes') else: # ./vcrtapes/tape_name.vcr path = os.path.join(os.path.dirname(source_filename), 'vcrtapes') func_name = func.__self__._dt_test.name.split('.')[-1] else: source_filename = func.__code__.co_filename file_name = os.path.splitext( os.path.basename(source_filename))[0] path = os.path.join( os.path.dirname(source_filename), 'vcrtapes') func_name = func.__name__ if tape_name: # tape file name is given - either full path is given or use # 'vcrtapes' directory if os.sep in tape_name: temp = os.path.abspath(tape_name) path = os.path.dirname(temp) if not os.path.isdir(path): os.makedirs(path) tape = os.path.join(path, '%s' % (tape_name)) else: # make sure 'vcrtapes' directory exists if not os.path.isdir(path): os.makedirs(path) # auto-generated file name tape = os.path.join(path, '%s.%s.vcr' % (file_name, func_name)) # enable VCR with VCRSystem(debug=debug): # check for tape file and determine mode if not (playback_only or VCRSystem.playback_only) and ( not os.path.isfile(tape) or overwrite or VCRSystem.overwrite): # record mode if PY2: msg = 'VCR records only in PY3 to be backward ' + \ 'compatible with PY2 - skipping VCR ' + \ 'mechanics for %s' warnings.warn(msg % (func.__name__)) # disable VCR VCRSystem.stop() # execute decorated function without VCR return func(*args, **kwargs) if VCRSystem.debug: print('\nVCR RECORDING (%s) ...' % (func_name)) VCRSystem.status = VCR_RECORD # execute decorated function value = func(*args, **kwargs) # check if vcr is actually used at all if len(VCRSystem.playlist) == 0: msg = 'no socket activity - @vcr unneeded for %s' msg = msg % (func.__name__) if VCRSystem.raise_if_not_needed: raise Exception(msg) else: warnings.warn(msg) else: # remove existing tape try: os.remove(tape) except OSError: pass # write playlist to file with gzip.open(tape, 'wb') as fh: pickle.dump(VCRSystem.playlist, fh, protocol=2) else: # playback mode if VCRSystem.debug: print('\nVCR PLAYBACK (%s) ...' % (func_name)) VCRSystem.status = VCR_PLAYBACK # if playback is requested and tape is missing: raise! if not os.path.exists(tape): msg = 'Missing VCR tape file for playback: {}' raise IOError(msg.format(tape)) # load playlist try: with gzip.open(tape, 'rb') as fh: VCRSystem.playlist = pickle.load(fh) except OSError: # support for older uncompressed tapes with open(tape, 'rb') as fh: VCRSystem.playlist = pickle.load(fh) if VCRSystem.debug: print('Loaded playlist:') for i, item in enumerate(VCRSystem.playlist): print('{:3d}: {} {} {}'.format(i, *item)) print() # execute decorated function value = func(*args, **kwargs) return value return _vcr_inner if decorated_func is None: # without arguments return _vcr_outer else: # with arguments return _vcr_outer(decorated_func)
[ "def", "vcr", "(", "decorated_func", "=", "None", ",", "debug", "=", "False", ",", "overwrite", "=", "False", ",", "disabled", "=", "False", ",", "playback_only", "=", "False", ",", "tape_name", "=", "None", ")", ":", "def", "_vcr_outer", "(", "func", ")", ":", "\"\"\"\n Wrapper around _vcr_inner allowing optional arguments on decorator\n \"\"\"", "def", "_vcr_inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"\n The actual decorator doing a lot of monkey patching and auto magic\n \"\"\"", "if", "disabled", "or", "VCRSystem", ".", "disabled", ":", "# execute decorated function without VCR", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# prepare VCR tape", "if", "func", ".", "__module__", "==", "'doctest'", ":", "source_filename", "=", "func", ".", "__self__", ".", "_dt_test", ".", "filename", "file_name", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "source_filename", ")", ")", "[", "0", "]", "# check if a tests directory exists", "path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "source_filename", ")", ",", "'tests'", ")", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "# ./test/vcrtapes/tape_name.vcr", "path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "source_filename", ")", ",", "'tests'", ",", "'vcrtapes'", ")", "else", ":", "# ./vcrtapes/tape_name.vcr", "path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "source_filename", ")", ",", "'vcrtapes'", ")", "func_name", "=", "func", ".", "__self__", ".", "_dt_test", ".", "name", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "else", ":", "source_filename", "=", "func", ".", "__code__", ".", "co_filename", "file_name", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "source_filename", ")", ")", "[", "0", "]", "path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "source_filename", ")", ",", "'vcrtapes'", ")", "func_name", "=", "func", ".", "__name__", "if", "tape_name", ":", "# tape file name is given - either full path is given or use", "# 'vcrtapes' directory", "if", "os", ".", "sep", "in", "tape_name", ":", "temp", "=", "os", ".", "path", ".", "abspath", "(", "tape_name", ")", "path", "=", "os", ".", "path", ".", "dirname", "(", "temp", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "os", ".", "makedirs", "(", "path", ")", "tape", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'%s'", "%", "(", "tape_name", ")", ")", "else", ":", "# make sure 'vcrtapes' directory exists", "if", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "os", ".", "makedirs", "(", "path", ")", "# auto-generated file name", "tape", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'%s.%s.vcr'", "%", "(", "file_name", ",", "func_name", ")", ")", "# enable VCR", "with", "VCRSystem", "(", "debug", "=", "debug", ")", ":", "# check for tape file and determine mode", "if", "not", "(", "playback_only", "or", "VCRSystem", ".", "playback_only", ")", "and", "(", "not", "os", ".", "path", ".", "isfile", "(", "tape", ")", "or", "overwrite", "or", "VCRSystem", ".", "overwrite", ")", ":", "# record mode", "if", "PY2", ":", "msg", "=", "'VCR records only in PY3 to be backward '", "+", "'compatible with PY2 - skipping VCR '", "+", "'mechanics for %s'", "warnings", ".", "warn", "(", "msg", "%", "(", "func", ".", "__name__", ")", ")", "# disable VCR", "VCRSystem", ".", "stop", "(", ")", "# execute decorated function without VCR", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "VCRSystem", ".", "debug", ":", "print", "(", "'\\nVCR RECORDING (%s) ...'", "%", "(", "func_name", ")", ")", "VCRSystem", ".", "status", "=", "VCR_RECORD", "# execute decorated function", "value", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# check if vcr is actually used at all", "if", "len", "(", "VCRSystem", ".", "playlist", ")", "==", "0", ":", "msg", "=", "'no socket activity - @vcr unneeded for %s'", "msg", "=", "msg", "%", "(", "func", ".", "__name__", ")", "if", "VCRSystem", ".", "raise_if_not_needed", ":", "raise", "Exception", "(", "msg", ")", "else", ":", "warnings", ".", "warn", "(", "msg", ")", "else", ":", "# remove existing tape", "try", ":", "os", ".", "remove", "(", "tape", ")", "except", "OSError", ":", "pass", "# write playlist to file", "with", "gzip", ".", "open", "(", "tape", ",", "'wb'", ")", "as", "fh", ":", "pickle", ".", "dump", "(", "VCRSystem", ".", "playlist", ",", "fh", ",", "protocol", "=", "2", ")", "else", ":", "# playback mode", "if", "VCRSystem", ".", "debug", ":", "print", "(", "'\\nVCR PLAYBACK (%s) ...'", "%", "(", "func_name", ")", ")", "VCRSystem", ".", "status", "=", "VCR_PLAYBACK", "# if playback is requested and tape is missing: raise!", "if", "not", "os", ".", "path", ".", "exists", "(", "tape", ")", ":", "msg", "=", "'Missing VCR tape file for playback: {}'", "raise", "IOError", "(", "msg", ".", "format", "(", "tape", ")", ")", "# load playlist", "try", ":", "with", "gzip", ".", "open", "(", "tape", ",", "'rb'", ")", "as", "fh", ":", "VCRSystem", ".", "playlist", "=", "pickle", ".", "load", "(", "fh", ")", "except", "OSError", ":", "# support for older uncompressed tapes", "with", "open", "(", "tape", ",", "'rb'", ")", "as", "fh", ":", "VCRSystem", ".", "playlist", "=", "pickle", ".", "load", "(", "fh", ")", "if", "VCRSystem", ".", "debug", ":", "print", "(", "'Loaded playlist:'", ")", "for", "i", ",", "item", "in", "enumerate", "(", "VCRSystem", ".", "playlist", ")", ":", "print", "(", "'{:3d}: {} {} {}'", ".", "format", "(", "i", ",", "*", "item", ")", ")", "print", "(", ")", "# execute decorated function", "value", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "value", "return", "_vcr_inner", "if", "decorated_func", "is", "None", ":", "# without arguments", "return", "_vcr_outer", "else", ":", "# with arguments", "return", "_vcr_outer", "(", "decorated_func", ")" ]
Decorator for capturing and simulating network communication ``debug`` : bool, optional Enables debug mode. ``overwrite`` : bool, optional Will run vcr in recording mode - overwrites any existing vcrtapes. ``playback_only`` : bool, optional Will run vcr in playback mode - will not create missing vcrtapes. ``disabled`` : bool, optional Completely disables vcr - same effect as removing the decorator. ``tape_name`` : str, optional Use given custom file name instead of an auto-generated name for the tape file.
[ "Decorator", "for", "capturing", "and", "simulating", "network", "communication" ]
train
https://github.com/obspy/vcr/blob/f961d3bffc57d1761b6de2fb1e67d5f464ebc6b6/vcr/core.py#L413-L555
obspy/vcr
vcr/core.py
VCRSystem.reset
def reset(cls): """ Reset to default settings """ cls.debug = False cls.disabled = False cls.overwrite = False cls.playback_only = False cls.recv_timeout = 5 cls.recv_endmarkers = [] cls.recv_size = None
python
def reset(cls): """ Reset to default settings """ cls.debug = False cls.disabled = False cls.overwrite = False cls.playback_only = False cls.recv_timeout = 5 cls.recv_endmarkers = [] cls.recv_size = None
[ "def", "reset", "(", "cls", ")", ":", "cls", ".", "debug", "=", "False", "cls", ".", "disabled", "=", "False", "cls", ".", "overwrite", "=", "False", "cls", ".", "playback_only", "=", "False", "cls", ".", "recv_timeout", "=", "5", "cls", ".", "recv_endmarkers", "=", "[", "]", "cls", ".", "recv_size", "=", "None" ]
Reset to default settings
[ "Reset", "to", "default", "settings" ]
train
https://github.com/obspy/vcr/blob/f961d3bffc57d1761b6de2fb1e67d5f464ebc6b6/vcr/core.py#L112-L122
rix0rrr/gcl
gcl/util.py
to_python
def to_python(value, seen=None): """Reify values to their Python equivalents. Does recursion detection, failing when that happens. """ seen = seen or set() if isinstance(value, framework.TupleLike): if value.ident in seen: raise RecursionException('to_python: infinite recursion while evaluating %r' % value) new_seen = seen.union([value.ident]) return {k: to_python(value[k], seen=new_seen) for k in value.exportable_keys()} if isinstance(value, dict): return {k: to_python(value[k], seen=seen) for k in value.keys()} if isinstance(value, list): return [to_python(x, seen=seen) for x in value] return value
python
def to_python(value, seen=None): """Reify values to their Python equivalents. Does recursion detection, failing when that happens. """ seen = seen or set() if isinstance(value, framework.TupleLike): if value.ident in seen: raise RecursionException('to_python: infinite recursion while evaluating %r' % value) new_seen = seen.union([value.ident]) return {k: to_python(value[k], seen=new_seen) for k in value.exportable_keys()} if isinstance(value, dict): return {k: to_python(value[k], seen=seen) for k in value.keys()} if isinstance(value, list): return [to_python(x, seen=seen) for x in value] return value
[ "def", "to_python", "(", "value", ",", "seen", "=", "None", ")", ":", "seen", "=", "seen", "or", "set", "(", ")", "if", "isinstance", "(", "value", ",", "framework", ".", "TupleLike", ")", ":", "if", "value", ".", "ident", "in", "seen", ":", "raise", "RecursionException", "(", "'to_python: infinite recursion while evaluating %r'", "%", "value", ")", "new_seen", "=", "seen", ".", "union", "(", "[", "value", ".", "ident", "]", ")", "return", "{", "k", ":", "to_python", "(", "value", "[", "k", "]", ",", "seen", "=", "new_seen", ")", "for", "k", "in", "value", ".", "exportable_keys", "(", ")", "}", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "return", "{", "k", ":", "to_python", "(", "value", "[", "k", "]", ",", "seen", "=", "seen", ")", "for", "k", "in", "value", ".", "keys", "(", ")", "}", "if", "isinstance", "(", "value", ",", "list", ")", ":", "return", "[", "to_python", "(", "x", ",", "seen", "=", "seen", ")", "for", "x", "in", "value", "]", "return", "value" ]
Reify values to their Python equivalents. Does recursion detection, failing when that happens.
[ "Reify", "values", "to", "their", "Python", "equivalents", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/util.py#L81-L96
rix0rrr/gcl
gcl/util.py
walk
def walk(value, walker, path=None, seen=None): """Walks the _evaluated_ tree of the given GCL tuple. The appropriate methods of walker will be invoked for every element in the tree. """ seen = seen or set() path = path or [] # Recursion if id(value) in seen: walker.visitRecursion(path) return # Error if isinstance(value, Exception): walker.visitError(path, value) return # List if isinstance(value, list): # Not actually a tuple, but okay recurse = walker.enterList(value, path) if not recurse: return next_walker = walker if recurse is True else recurse with TempSetAdd(seen, id(value)): for i, x in enumerate(value): walk(x, next_walker, path=path + ['[%d]' % i], seen=seen) walker.leaveList(value, path) return # Scalar if not isinstance(value, framework.TupleLike): walker.visitScalar(path, value) return # Tuple recurse = walker.enterTuple(value, path) if not recurse: return next_walker = walker if recurse is True else recurse with TempSetAdd(seen, id(value)): keys = sorted(value.keys()) for key in keys: key_path = path + [key] elm = get_or_error(value, key) walk(elm, next_walker, path=key_path, seen=seen) walker.leaveTuple(value, path)
python
def walk(value, walker, path=None, seen=None): """Walks the _evaluated_ tree of the given GCL tuple. The appropriate methods of walker will be invoked for every element in the tree. """ seen = seen or set() path = path or [] # Recursion if id(value) in seen: walker.visitRecursion(path) return # Error if isinstance(value, Exception): walker.visitError(path, value) return # List if isinstance(value, list): # Not actually a tuple, but okay recurse = walker.enterList(value, path) if not recurse: return next_walker = walker if recurse is True else recurse with TempSetAdd(seen, id(value)): for i, x in enumerate(value): walk(x, next_walker, path=path + ['[%d]' % i], seen=seen) walker.leaveList(value, path) return # Scalar if not isinstance(value, framework.TupleLike): walker.visitScalar(path, value) return # Tuple recurse = walker.enterTuple(value, path) if not recurse: return next_walker = walker if recurse is True else recurse with TempSetAdd(seen, id(value)): keys = sorted(value.keys()) for key in keys: key_path = path + [key] elm = get_or_error(value, key) walk(elm, next_walker, path=key_path, seen=seen) walker.leaveTuple(value, path)
[ "def", "walk", "(", "value", ",", "walker", ",", "path", "=", "None", ",", "seen", "=", "None", ")", ":", "seen", "=", "seen", "or", "set", "(", ")", "path", "=", "path", "or", "[", "]", "# Recursion", "if", "id", "(", "value", ")", "in", "seen", ":", "walker", ".", "visitRecursion", "(", "path", ")", "return", "# Error", "if", "isinstance", "(", "value", ",", "Exception", ")", ":", "walker", ".", "visitError", "(", "path", ",", "value", ")", "return", "# List", "if", "isinstance", "(", "value", ",", "list", ")", ":", "# Not actually a tuple, but okay", "recurse", "=", "walker", ".", "enterList", "(", "value", ",", "path", ")", "if", "not", "recurse", ":", "return", "next_walker", "=", "walker", "if", "recurse", "is", "True", "else", "recurse", "with", "TempSetAdd", "(", "seen", ",", "id", "(", "value", ")", ")", ":", "for", "i", ",", "x", "in", "enumerate", "(", "value", ")", ":", "walk", "(", "x", ",", "next_walker", ",", "path", "=", "path", "+", "[", "'[%d]'", "%", "i", "]", ",", "seen", "=", "seen", ")", "walker", ".", "leaveList", "(", "value", ",", "path", ")", "return", "# Scalar", "if", "not", "isinstance", "(", "value", ",", "framework", ".", "TupleLike", ")", ":", "walker", ".", "visitScalar", "(", "path", ",", "value", ")", "return", "# Tuple", "recurse", "=", "walker", ".", "enterTuple", "(", "value", ",", "path", ")", "if", "not", "recurse", ":", "return", "next_walker", "=", "walker", "if", "recurse", "is", "True", "else", "recurse", "with", "TempSetAdd", "(", "seen", ",", "id", "(", "value", ")", ")", ":", "keys", "=", "sorted", "(", "value", ".", "keys", "(", ")", ")", "for", "key", "in", "keys", ":", "key_path", "=", "path", "+", "[", "key", "]", "elm", "=", "get_or_error", "(", "value", ",", "key", ")", "walk", "(", "elm", ",", "next_walker", ",", "path", "=", "key_path", ",", "seen", "=", "seen", ")", "walker", ".", "leaveTuple", "(", "value", ",", "path", ")" ]
Walks the _evaluated_ tree of the given GCL tuple. The appropriate methods of walker will be invoked for every element in the tree.
[ "Walks", "the", "_evaluated_", "tree", "of", "the", "given", "GCL", "tuple", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/util.py#L99-L149
rix0rrr/gcl
gcl/util.py
fingerprint
def fingerprint(value): """Return a hash value that uniquely identifies the GCL value.""" h = hashlib.sha256() _digest(value, h) return h.digest().encode('hex')
python
def fingerprint(value): """Return a hash value that uniquely identifies the GCL value.""" h = hashlib.sha256() _digest(value, h) return h.digest().encode('hex')
[ "def", "fingerprint", "(", "value", ")", ":", "h", "=", "hashlib", ".", "sha256", "(", ")", "_digest", "(", "value", ",", "h", ")", "return", "h", ".", "digest", "(", ")", ".", "encode", "(", "'hex'", ")" ]
Return a hash value that uniquely identifies the GCL value.
[ "Return", "a", "hash", "value", "that", "uniquely", "identifies", "the", "GCL", "value", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/util.py#L183-L187
rix0rrr/gcl
gcl/util.py
compact_error
def compact_error(err): """Return the the last 2 error messages from an error stack. These error messages turns out to be the most descriptive. """ def err2(e): if isinstance(e, exceptions.EvaluationError) and e.inner: message, i = err2(e.inner) if i == 1: return ', '.join([e.args[0], str(e.inner)]), i + 1 else: return message, i + 1 else: return str(e), 1 return err2(err)[0]
python
def compact_error(err): """Return the the last 2 error messages from an error stack. These error messages turns out to be the most descriptive. """ def err2(e): if isinstance(e, exceptions.EvaluationError) and e.inner: message, i = err2(e.inner) if i == 1: return ', '.join([e.args[0], str(e.inner)]), i + 1 else: return message, i + 1 else: return str(e), 1 return err2(err)[0]
[ "def", "compact_error", "(", "err", ")", ":", "def", "err2", "(", "e", ")", ":", "if", "isinstance", "(", "e", ",", "exceptions", ".", "EvaluationError", ")", "and", "e", ".", "inner", ":", "message", ",", "i", "=", "err2", "(", "e", ".", "inner", ")", "if", "i", "==", "1", ":", "return", "', '", ".", "join", "(", "[", "e", ".", "args", "[", "0", "]", ",", "str", "(", "e", ".", "inner", ")", "]", ")", ",", "i", "+", "1", "else", ":", "return", "message", ",", "i", "+", "1", "else", ":", "return", "str", "(", "e", ")", ",", "1", "return", "err2", "(", "err", ")", "[", "0", "]" ]
Return the the last 2 error messages from an error stack. These error messages turns out to be the most descriptive.
[ "Return", "the", "the", "last", "2", "error", "messages", "from", "an", "error", "stack", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/util.py#L280-L294
hannes-brt/hebel
hebel/layers/logistic_layer.py
LogisticLayer.backprop
def backprop(self, input_data, targets, cache=None): """ Backpropagate through the logistic layer. **Parameters:** input_data : ``GPUArray`` Inpute data to compute activations for. targets : ``GPUArray`` The target values of the units. cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : tuple of ``GPUArray`` Gradients with respect to the weights and biases in the form ``(df_weights, df_biases)``. df_input : ``GPUArray`` Gradients with respect to the input. """ if cache is not None: activations = cache else: activations = self.feed_forward(input_data, prediction=False) if activations.shape != targets.shape: raise ValueError('Activations (shape = %s) and targets (shape = %s) are different sizes' % (activations.shape, targets.shape)) delta = substract_matrix(activations, targets) nan_to_zeros(delta, delta) # Gradient wrt weights df_W = linalg.dot(input_data, delta, transa='T') # Gradient wrt bias df_b = matrix_sum_out_axis(delta, 0) # Gradient wrt input df_input = linalg.dot(delta, self.W, transb='T') # L1 penalty if self.l1_penalty_weight: df_W += self.l1_penalty_weight * sign(self.W) # L2 penalty if self.l2_penalty_weight: df_W += self.l2_penalty_weight * self.W return (df_W, df_b), df_input
python
def backprop(self, input_data, targets, cache=None): """ Backpropagate through the logistic layer. **Parameters:** input_data : ``GPUArray`` Inpute data to compute activations for. targets : ``GPUArray`` The target values of the units. cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : tuple of ``GPUArray`` Gradients with respect to the weights and biases in the form ``(df_weights, df_biases)``. df_input : ``GPUArray`` Gradients with respect to the input. """ if cache is not None: activations = cache else: activations = self.feed_forward(input_data, prediction=False) if activations.shape != targets.shape: raise ValueError('Activations (shape = %s) and targets (shape = %s) are different sizes' % (activations.shape, targets.shape)) delta = substract_matrix(activations, targets) nan_to_zeros(delta, delta) # Gradient wrt weights df_W = linalg.dot(input_data, delta, transa='T') # Gradient wrt bias df_b = matrix_sum_out_axis(delta, 0) # Gradient wrt input df_input = linalg.dot(delta, self.W, transb='T') # L1 penalty if self.l1_penalty_weight: df_W += self.l1_penalty_weight * sign(self.W) # L2 penalty if self.l2_penalty_weight: df_W += self.l2_penalty_weight * self.W return (df_W, df_b), df_input
[ "def", "backprop", "(", "self", ",", "input_data", ",", "targets", ",", "cache", "=", "None", ")", ":", "if", "cache", "is", "not", "None", ":", "activations", "=", "cache", "else", ":", "activations", "=", "self", ".", "feed_forward", "(", "input_data", ",", "prediction", "=", "False", ")", "if", "activations", ".", "shape", "!=", "targets", ".", "shape", ":", "raise", "ValueError", "(", "'Activations (shape = %s) and targets (shape = %s) are different sizes'", "%", "(", "activations", ".", "shape", ",", "targets", ".", "shape", ")", ")", "delta", "=", "substract_matrix", "(", "activations", ",", "targets", ")", "nan_to_zeros", "(", "delta", ",", "delta", ")", "# Gradient wrt weights", "df_W", "=", "linalg", ".", "dot", "(", "input_data", ",", "delta", ",", "transa", "=", "'T'", ")", "# Gradient wrt bias", "df_b", "=", "matrix_sum_out_axis", "(", "delta", ",", "0", ")", "# Gradient wrt input", "df_input", "=", "linalg", ".", "dot", "(", "delta", ",", "self", ".", "W", ",", "transb", "=", "'T'", ")", "# L1 penalty", "if", "self", ".", "l1_penalty_weight", ":", "df_W", "+=", "self", ".", "l1_penalty_weight", "*", "sign", "(", "self", ".", "W", ")", "# L2 penalty", "if", "self", ".", "l2_penalty_weight", ":", "df_W", "+=", "self", ".", "l2_penalty_weight", "*", "self", ".", "W", "return", "(", "df_W", ",", "df_b", ")", ",", "df_input" ]
Backpropagate through the logistic layer. **Parameters:** input_data : ``GPUArray`` Inpute data to compute activations for. targets : ``GPUArray`` The target values of the units. cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : tuple of ``GPUArray`` Gradients with respect to the weights and biases in the form ``(df_weights, df_biases)``. df_input : ``GPUArray`` Gradients with respect to the input.
[ "Backpropagate", "through", "the", "logistic", "layer", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/layers/logistic_layer.py#L170-L224
hannes-brt/hebel
hebel/layers/logistic_layer.py
LogisticLayer.cross_entropy_error
def cross_entropy_error(self, input_data, targets, average=True, cache=None, prediction=False): """ Return the cross entropy error """ if cache is not None: activations = cache else: activations = \ self.feed_forward(input_data, prediction=prediction) loss = cross_entropy_logistic(activations, targets) if average: loss /= targets.shape[0] # assert np.isfinite(loss) return loss.get()
python
def cross_entropy_error(self, input_data, targets, average=True, cache=None, prediction=False): """ Return the cross entropy error """ if cache is not None: activations = cache else: activations = \ self.feed_forward(input_data, prediction=prediction) loss = cross_entropy_logistic(activations, targets) if average: loss /= targets.shape[0] # assert np.isfinite(loss) return loss.get()
[ "def", "cross_entropy_error", "(", "self", ",", "input_data", ",", "targets", ",", "average", "=", "True", ",", "cache", "=", "None", ",", "prediction", "=", "False", ")", ":", "if", "cache", "is", "not", "None", ":", "activations", "=", "cache", "else", ":", "activations", "=", "self", ".", "feed_forward", "(", "input_data", ",", "prediction", "=", "prediction", ")", "loss", "=", "cross_entropy_logistic", "(", "activations", ",", "targets", ")", "if", "average", ":", "loss", "/=", "targets", ".", "shape", "[", "0", "]", "# assert np.isfinite(loss)", "return", "loss", ".", "get", "(", ")" ]
Return the cross entropy error
[ "Return", "the", "cross", "entropy", "error" ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/layers/logistic_layer.py#L271-L286
rix0rrr/gcl
gcl/doc.py
stylize_comment_block
def stylize_comment_block(lines): """Parse comment lines and make subsequent indented lines into a code block block. """ normal, sep, in_code = range(3) state = normal for line in lines: indented = line.startswith(' ') empty_line = line.strip() == '' if state == normal and empty_line: state = sep elif state in [sep, normal] and indented: yield '' if indented: yield '.. code-block:: javascript' yield '' yield line state = in_code else: state = normal elif state == sep and not empty_line: yield '' yield line state = normal else: yield line if state == in_code and not (indented or empty_line): sep = normal
python
def stylize_comment_block(lines): """Parse comment lines and make subsequent indented lines into a code block block. """ normal, sep, in_code = range(3) state = normal for line in lines: indented = line.startswith(' ') empty_line = line.strip() == '' if state == normal and empty_line: state = sep elif state in [sep, normal] and indented: yield '' if indented: yield '.. code-block:: javascript' yield '' yield line state = in_code else: state = normal elif state == sep and not empty_line: yield '' yield line state = normal else: yield line if state == in_code and not (indented or empty_line): sep = normal
[ "def", "stylize_comment_block", "(", "lines", ")", ":", "normal", ",", "sep", ",", "in_code", "=", "range", "(", "3", ")", "state", "=", "normal", "for", "line", "in", "lines", ":", "indented", "=", "line", ".", "startswith", "(", "' '", ")", "empty_line", "=", "line", ".", "strip", "(", ")", "==", "''", "if", "state", "==", "normal", "and", "empty_line", ":", "state", "=", "sep", "elif", "state", "in", "[", "sep", ",", "normal", "]", "and", "indented", ":", "yield", "''", "if", "indented", ":", "yield", "'.. code-block:: javascript'", "yield", "''", "yield", "line", "state", "=", "in_code", "else", ":", "state", "=", "normal", "elif", "state", "==", "sep", "and", "not", "empty_line", ":", "yield", "''", "yield", "line", "state", "=", "normal", "else", ":", "yield", "line", "if", "state", "==", "in_code", "and", "not", "(", "indented", "or", "empty_line", ")", ":", "sep", "=", "normal" ]
Parse comment lines and make subsequent indented lines into a code block block.
[ "Parse", "comment", "lines", "and", "make", "subsequent", "indented", "lines", "into", "a", "code", "block", "block", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/doc.py#L194-L222
rix0rrr/gcl
gcl/doc.py
sort_members
def sort_members(tup, names): """Return two pairs of members, scalar and tuple members. The scalars will be sorted s.t. the unbound members are at the top. """ scalars, tuples = partition(lambda x: not is_tuple_node(tup.member[x].value), names) unbound, bound = partition(lambda x: tup.member[x].value.is_unbound(), scalars) return usorted(unbound) + usorted(bound), usorted(tuples)
python
def sort_members(tup, names): """Return two pairs of members, scalar and tuple members. The scalars will be sorted s.t. the unbound members are at the top. """ scalars, tuples = partition(lambda x: not is_tuple_node(tup.member[x].value), names) unbound, bound = partition(lambda x: tup.member[x].value.is_unbound(), scalars) return usorted(unbound) + usorted(bound), usorted(tuples)
[ "def", "sort_members", "(", "tup", ",", "names", ")", ":", "scalars", ",", "tuples", "=", "partition", "(", "lambda", "x", ":", "not", "is_tuple_node", "(", "tup", ".", "member", "[", "x", "]", ".", "value", ")", ",", "names", ")", "unbound", ",", "bound", "=", "partition", "(", "lambda", "x", ":", "tup", ".", "member", "[", "x", "]", ".", "value", ".", "is_unbound", "(", ")", ",", "scalars", ")", "return", "usorted", "(", "unbound", ")", "+", "usorted", "(", "bound", ")", ",", "usorted", "(", "tuples", ")" ]
Return two pairs of members, scalar and tuple members. The scalars will be sorted s.t. the unbound members are at the top.
[ "Return", "two", "pairs", "of", "members", "scalar", "and", "tuple", "members", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/doc.py#L229-L236
rix0rrr/gcl
gcl/doc.py
resolve_file
def resolve_file(fname, paths): """Resolve filename relatively against one of the given paths, if possible.""" fpath = path.abspath(fname) for p in paths: spath = path.abspath(p) if fpath.startswith(spath): return fpath[len(spath) + 1:] return fname
python
def resolve_file(fname, paths): """Resolve filename relatively against one of the given paths, if possible.""" fpath = path.abspath(fname) for p in paths: spath = path.abspath(p) if fpath.startswith(spath): return fpath[len(spath) + 1:] return fname
[ "def", "resolve_file", "(", "fname", ",", "paths", ")", ":", "fpath", "=", "path", ".", "abspath", "(", "fname", ")", "for", "p", "in", "paths", ":", "spath", "=", "path", ".", "abspath", "(", "p", ")", "if", "fpath", ".", "startswith", "(", "spath", ")", ":", "return", "fpath", "[", "len", "(", "spath", ")", "+", "1", ":", "]", "return", "fname" ]
Resolve filename relatively against one of the given paths, if possible.
[ "Resolve", "filename", "relatively", "against", "one", "of", "the", "given", "paths", "if", "possible", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/doc.py#L262-L269
rix0rrr/gcl
gcl/doc.py
RstTable.generate
def generate(self): """Generate a list of strings representing the table in RST format.""" header = ' '.join('=' * self.width[i] for i in range(self.w)) lines = [ ' '.join(row[i].ljust(self.width[i]) for i in range(self.w)) for row in self.rows] return [header] + lines + [header]
python
def generate(self): """Generate a list of strings representing the table in RST format.""" header = ' '.join('=' * self.width[i] for i in range(self.w)) lines = [ ' '.join(row[i].ljust(self.width[i]) for i in range(self.w)) for row in self.rows] return [header] + lines + [header]
[ "def", "generate", "(", "self", ")", ":", "header", "=", "' '", ".", "join", "(", "'='", "*", "self", ".", "width", "[", "i", "]", "for", "i", "in", "range", "(", "self", ".", "w", ")", ")", "lines", "=", "[", "' '", ".", "join", "(", "row", "[", "i", "]", ".", "ljust", "(", "self", ".", "width", "[", "i", "]", ")", "for", "i", "in", "range", "(", "self", ".", "w", ")", ")", "for", "row", "in", "self", ".", "rows", "]", "return", "[", "header", "]", "+", "lines", "+", "[", "header", "]" ]
Generate a list of strings representing the table in RST format.
[ "Generate", "a", "list", "of", "strings", "representing", "the", "table", "in", "RST", "format", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/doc.py#L44-L50
rix0rrr/gcl
gcl/query.py
partition
def partition(pred, iterable): 'Use a predicate to partition entries into false entries and true entries' # partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9 t1, t2 = itertools.tee(iterable) return list(filter(negate(pred), t1)), list(filter(pred, t2))
python
def partition(pred, iterable): 'Use a predicate to partition entries into false entries and true entries' # partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9 t1, t2 = itertools.tee(iterable) return list(filter(negate(pred), t1)), list(filter(pred, t2))
[ "def", "partition", "(", "pred", ",", "iterable", ")", ":", "# partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9", "t1", ",", "t2", "=", "itertools", ".", "tee", "(", "iterable", ")", "return", "list", "(", "filter", "(", "negate", "(", "pred", ")", ",", "t1", ")", ")", ",", "list", "(", "filter", "(", "pred", ",", "t2", ")", ")" ]
Use a predicate to partition entries into false entries and true entries
[ "Use", "a", "predicate", "to", "partition", "entries", "into", "false", "entries", "and", "true", "entries" ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/query.py#L63-L67
rix0rrr/gcl
gcl/query.py
GPath.select
def select(self, model): """Select nodes according to the input selector. This can ALWAYS return multiple root elements. """ res = [] def doSelect(value, pre, remaining): if not remaining: res.append((pre, value)) else: # For the other selectors to work, value must be a Tuple or a list at this point. if not is_tuple(value) and not isinstance(value, list): return qhead, qtail = remaining[0], remaining[1:] if isinstance(qhead, tuple) and is_tuple(value): for alt in qhead: if alt in value: doSelect(value[alt], pre + [alt], qtail) elif qhead == '*': if isinstance(value, list): indices = range(len(value)) reprs = [listKey(i) for i in indices] else: indices = value.keys() reprs = indices for key, rep in zip(indices, reprs): doSelect(value[key], pre + [rep], qtail) elif isinstance(qhead, int) and isinstance(value, list): doSelect(value[qhead], pre + [listKey(qhead)], qtail) elif is_tuple(value): if qhead in value: doSelect(value[qhead], pre + [qhead], qtail) for selector in self.selectors: doSelect(model, [], selector) return QueryResult(res)
python
def select(self, model): """Select nodes according to the input selector. This can ALWAYS return multiple root elements. """ res = [] def doSelect(value, pre, remaining): if not remaining: res.append((pre, value)) else: # For the other selectors to work, value must be a Tuple or a list at this point. if not is_tuple(value) and not isinstance(value, list): return qhead, qtail = remaining[0], remaining[1:] if isinstance(qhead, tuple) and is_tuple(value): for alt in qhead: if alt in value: doSelect(value[alt], pre + [alt], qtail) elif qhead == '*': if isinstance(value, list): indices = range(len(value)) reprs = [listKey(i) for i in indices] else: indices = value.keys() reprs = indices for key, rep in zip(indices, reprs): doSelect(value[key], pre + [rep], qtail) elif isinstance(qhead, int) and isinstance(value, list): doSelect(value[qhead], pre + [listKey(qhead)], qtail) elif is_tuple(value): if qhead in value: doSelect(value[qhead], pre + [qhead], qtail) for selector in self.selectors: doSelect(model, [], selector) return QueryResult(res)
[ "def", "select", "(", "self", ",", "model", ")", ":", "res", "=", "[", "]", "def", "doSelect", "(", "value", ",", "pre", ",", "remaining", ")", ":", "if", "not", "remaining", ":", "res", ".", "append", "(", "(", "pre", ",", "value", ")", ")", "else", ":", "# For the other selectors to work, value must be a Tuple or a list at this point.", "if", "not", "is_tuple", "(", "value", ")", "and", "not", "isinstance", "(", "value", ",", "list", ")", ":", "return", "qhead", ",", "qtail", "=", "remaining", "[", "0", "]", ",", "remaining", "[", "1", ":", "]", "if", "isinstance", "(", "qhead", ",", "tuple", ")", "and", "is_tuple", "(", "value", ")", ":", "for", "alt", "in", "qhead", ":", "if", "alt", "in", "value", ":", "doSelect", "(", "value", "[", "alt", "]", ",", "pre", "+", "[", "alt", "]", ",", "qtail", ")", "elif", "qhead", "==", "'*'", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "indices", "=", "range", "(", "len", "(", "value", ")", ")", "reprs", "=", "[", "listKey", "(", "i", ")", "for", "i", "in", "indices", "]", "else", ":", "indices", "=", "value", ".", "keys", "(", ")", "reprs", "=", "indices", "for", "key", ",", "rep", "in", "zip", "(", "indices", ",", "reprs", ")", ":", "doSelect", "(", "value", "[", "key", "]", ",", "pre", "+", "[", "rep", "]", ",", "qtail", ")", "elif", "isinstance", "(", "qhead", ",", "int", ")", "and", "isinstance", "(", "value", ",", "list", ")", ":", "doSelect", "(", "value", "[", "qhead", "]", ",", "pre", "+", "[", "listKey", "(", "qhead", ")", "]", ",", "qtail", ")", "elif", "is_tuple", "(", "value", ")", ":", "if", "qhead", "in", "value", ":", "doSelect", "(", "value", "[", "qhead", "]", ",", "pre", "+", "[", "qhead", "]", ",", "qtail", ")", "for", "selector", "in", "self", ".", "selectors", ":", "doSelect", "(", "model", ",", "[", "]", ",", "selector", ")", "return", "QueryResult", "(", "res", ")" ]
Select nodes according to the input selector. This can ALWAYS return multiple root elements.
[ "Select", "nodes", "according", "to", "the", "input", "selector", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/query.py#L85-L124
rix0rrr/gcl
gcl/query.py
QueryResult.deep
def deep(self): """Return a deep dict of the values selected. The leaf values may still be gcl Tuples. Use util.to_python() if you want to reify everything to real Python values. """ self.lists = {} ret = {} for path, value in self.paths_values(): self.recursiveSet(ret, path, value) self.removeMissingValuesFromLists() return ret
python
def deep(self): """Return a deep dict of the values selected. The leaf values may still be gcl Tuples. Use util.to_python() if you want to reify everything to real Python values. """ self.lists = {} ret = {} for path, value in self.paths_values(): self.recursiveSet(ret, path, value) self.removeMissingValuesFromLists() return ret
[ "def", "deep", "(", "self", ")", ":", "self", ".", "lists", "=", "{", "}", "ret", "=", "{", "}", "for", "path", ",", "value", "in", "self", ".", "paths_values", "(", ")", ":", "self", ".", "recursiveSet", "(", "ret", ",", "path", ",", "value", ")", "self", ".", "removeMissingValuesFromLists", "(", ")", "return", "ret" ]
Return a deep dict of the values selected. The leaf values may still be gcl Tuples. Use util.to_python() if you want to reify everything to real Python values.
[ "Return", "a", "deep", "dict", "of", "the", "values", "selected", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/query.py#L145-L156
rix0rrr/gcl
gcl/query.py
QueryResult.ldSet
def ldSet(self, what, key, value): """List/dictionary-aware set.""" if isListKey(key): # Make sure we keep the indexes consistent, insert missing_values # as necessary. We do remember the lists, so that we can remove # missing values after inserting all values from all selectors. self.lists[id(what)] = what ix = listKeyIndex(key) while len(what) <= ix: what.append(missing_value) what[ix] = value else: what[key] = value return value
python
def ldSet(self, what, key, value): """List/dictionary-aware set.""" if isListKey(key): # Make sure we keep the indexes consistent, insert missing_values # as necessary. We do remember the lists, so that we can remove # missing values after inserting all values from all selectors. self.lists[id(what)] = what ix = listKeyIndex(key) while len(what) <= ix: what.append(missing_value) what[ix] = value else: what[key] = value return value
[ "def", "ldSet", "(", "self", ",", "what", ",", "key", ",", "value", ")", ":", "if", "isListKey", "(", "key", ")", ":", "# Make sure we keep the indexes consistent, insert missing_values", "# as necessary. We do remember the lists, so that we can remove", "# missing values after inserting all values from all selectors.", "self", ".", "lists", "[", "id", "(", "what", ")", "]", "=", "what", "ix", "=", "listKeyIndex", "(", "key", ")", "while", "len", "(", "what", ")", "<=", "ix", ":", "what", ".", "append", "(", "missing_value", ")", "what", "[", "ix", "]", "=", "value", "else", ":", "what", "[", "key", "]", "=", "value", "return", "value" ]
List/dictionary-aware set.
[ "List", "/", "dictionary", "-", "aware", "set", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/query.py#L173-L186
rix0rrr/gcl
gcl/query.py
QueryResult.ldGet
def ldGet(self, what, key): """List-aware get.""" if isListKey(key): return what[listKeyIndex(key)] else: return what[key]
python
def ldGet(self, what, key): """List-aware get.""" if isListKey(key): return what[listKeyIndex(key)] else: return what[key]
[ "def", "ldGet", "(", "self", ",", "what", ",", "key", ")", ":", "if", "isListKey", "(", "key", ")", ":", "return", "what", "[", "listKeyIndex", "(", "key", ")", "]", "else", ":", "return", "what", "[", "key", "]" ]
List-aware get.
[ "List", "-", "aware", "get", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/query.py#L188-L193
rix0rrr/gcl
gcl/query.py
QueryResult.ldContains
def ldContains(self, what, key): """List/dictinary/missing-aware contains. If the value is a "missing_value", we'll treat it as non-existent so it will be overwritten by an empty list/dict when necessary to assign child keys. """ if isListKey(key): i = listKeyIndex(key) return i < len(what) and what[i] != missing_value else: return key in what and what[key] != missing_value
python
def ldContains(self, what, key): """List/dictinary/missing-aware contains. If the value is a "missing_value", we'll treat it as non-existent so it will be overwritten by an empty list/dict when necessary to assign child keys. """ if isListKey(key): i = listKeyIndex(key) return i < len(what) and what[i] != missing_value else: return key in what and what[key] != missing_value
[ "def", "ldContains", "(", "self", ",", "what", ",", "key", ")", ":", "if", "isListKey", "(", "key", ")", ":", "i", "=", "listKeyIndex", "(", "key", ")", "return", "i", "<", "len", "(", "what", ")", "and", "what", "[", "i", "]", "!=", "missing_value", "else", ":", "return", "key", "in", "what", "and", "what", "[", "key", "]", "!=", "missing_value" ]
List/dictinary/missing-aware contains. If the value is a "missing_value", we'll treat it as non-existent so it will be overwritten by an empty list/dict when necessary to assign child keys.
[ "List", "/", "dictinary", "/", "missing", "-", "aware", "contains", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/query.py#L195-L206
rix0rrr/gcl
gcl/query.py
TupleFinder.find_recursive_dependency
def find_recursive_dependency(self): """Return a list of nodes that have a recursive dependency.""" nodes_on_path = [] def helper(nodes): for node in nodes: cycle = node in nodes_on_path nodes_on_path.append(node) if cycle or helper(self.deps.get(node, [])): return True nodes_on_path.pop() return False helper(self.unordered) return nodes_on_path
python
def find_recursive_dependency(self): """Return a list of nodes that have a recursive dependency.""" nodes_on_path = [] def helper(nodes): for node in nodes: cycle = node in nodes_on_path nodes_on_path.append(node) if cycle or helper(self.deps.get(node, [])): return True nodes_on_path.pop() return False helper(self.unordered) return nodes_on_path
[ "def", "find_recursive_dependency", "(", "self", ")", ":", "nodes_on_path", "=", "[", "]", "def", "helper", "(", "nodes", ")", ":", "for", "node", "in", "nodes", ":", "cycle", "=", "node", "in", "nodes_on_path", "nodes_on_path", ".", "append", "(", "node", ")", "if", "cycle", "or", "helper", "(", "self", ".", "deps", ".", "get", "(", "node", ",", "[", "]", ")", ")", ":", "return", "True", "nodes_on_path", ".", "pop", "(", ")", "return", "False", "helper", "(", "self", ".", "unordered", ")", "return", "nodes_on_path" ]
Return a list of nodes that have a recursive dependency.
[ "Return", "a", "list", "of", "nodes", "that", "have", "a", "recursive", "dependency", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/query.py#L307-L321
rix0rrr/gcl
gcl/query.py
TupleFinder.enterTuple
def enterTuple(self, tuple, path): """Called for every tuple. If this returns False, the elements of the tuple will not be recursed over and leaveTuple() will not be called. """ if skip_name(path): return False node = Node(path, tuple) if self.condition.matches(node): self.unordered.append(node) return False return True
python
def enterTuple(self, tuple, path): """Called for every tuple. If this returns False, the elements of the tuple will not be recursed over and leaveTuple() will not be called. """ if skip_name(path): return False node = Node(path, tuple) if self.condition.matches(node): self.unordered.append(node) return False return True
[ "def", "enterTuple", "(", "self", ",", "tuple", ",", "path", ")", ":", "if", "skip_name", "(", "path", ")", ":", "return", "False", "node", "=", "Node", "(", "path", ",", "tuple", ")", "if", "self", ".", "condition", ".", "matches", "(", "node", ")", ":", "self", ".", "unordered", ".", "append", "(", "node", ")", "return", "False", "return", "True" ]
Called for every tuple. If this returns False, the elements of the tuple will not be recursed over and leaveTuple() will not be called.
[ "Called", "for", "every", "tuple", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/query.py#L324-L336
rix0rrr/gcl
gcl/ast.py
convertAndMake
def convertAndMake(converter, handler): """Convert with location.""" def convertAction(loc, value): return handler(loc, converter(value)) return convertAction
python
def convertAndMake(converter, handler): """Convert with location.""" def convertAction(loc, value): return handler(loc, converter(value)) return convertAction
[ "def", "convertAndMake", "(", "converter", ",", "handler", ")", ":", "def", "convertAction", "(", "loc", ",", "value", ")", ":", "return", "handler", "(", "loc", ",", "converter", "(", "value", ")", ")", "return", "convertAction" ]
Convert with location.
[ "Convert", "with", "location", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast.py#L75-L79
rix0rrr/gcl
gcl/ast.py
mkApplications
def mkApplications(location, *atoms): """Make a sequence of applications from a list of tokens. atoms is a list of atoms, which will be handled left-associatively. E.g: ['foo', [], []] == foo()() ==> Application(Application('foo', []), []) """ atoms = list(atoms) while len(atoms) > 1: atoms[0:2] = [Application(location, atoms[0], atoms[1])] # Nothing left to apply return atoms[0]
python
def mkApplications(location, *atoms): """Make a sequence of applications from a list of tokens. atoms is a list of atoms, which will be handled left-associatively. E.g: ['foo', [], []] == foo()() ==> Application(Application('foo', []), []) """ atoms = list(atoms) while len(atoms) > 1: atoms[0:2] = [Application(location, atoms[0], atoms[1])] # Nothing left to apply return atoms[0]
[ "def", "mkApplications", "(", "location", ",", "*", "atoms", ")", ":", "atoms", "=", "list", "(", "atoms", ")", "while", "len", "(", "atoms", ")", ">", "1", ":", "atoms", "[", "0", ":", "2", "]", "=", "[", "Application", "(", "location", ",", "atoms", "[", "0", "]", ",", "atoms", "[", "1", "]", ")", "]", "# Nothing left to apply", "return", "atoms", "[", "0", "]" ]
Make a sequence of applications from a list of tokens. atoms is a list of atoms, which will be handled left-associatively. E.g: ['foo', [], []] == foo()() ==> Application(Application('foo', []), [])
[ "Make", "a", "sequence", "of", "applications", "from", "a", "list", "of", "tokens", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast.py#L528-L540
rix0rrr/gcl
gcl/ast.py
call_fn
def call_fn(fn, arglist, env): """Call a function, respecting all the various types of functions that exist.""" if isinstance(fn, framework.LazyFunction): # The following looks complicated, but this is necessary because you can't # construct closures over the loop variable directly. thunks = [(lambda thunk: lambda: framework.eval(thunk, env))(th) for th in arglist.values] return fn(*thunks) evaled_args = framework.eval(arglist, env) if isinstance(fn, framework.EnvironmentFunction): return fn(*evaled_args, env=env) return fn(*evaled_args)
python
def call_fn(fn, arglist, env): """Call a function, respecting all the various types of functions that exist.""" if isinstance(fn, framework.LazyFunction): # The following looks complicated, but this is necessary because you can't # construct closures over the loop variable directly. thunks = [(lambda thunk: lambda: framework.eval(thunk, env))(th) for th in arglist.values] return fn(*thunks) evaled_args = framework.eval(arglist, env) if isinstance(fn, framework.EnvironmentFunction): return fn(*evaled_args, env=env) return fn(*evaled_args)
[ "def", "call_fn", "(", "fn", ",", "arglist", ",", "env", ")", ":", "if", "isinstance", "(", "fn", ",", "framework", ".", "LazyFunction", ")", ":", "# The following looks complicated, but this is necessary because you can't", "# construct closures over the loop variable directly.", "thunks", "=", "[", "(", "lambda", "thunk", ":", "lambda", ":", "framework", ".", "eval", "(", "thunk", ",", "env", ")", ")", "(", "th", ")", "for", "th", "in", "arglist", ".", "values", "]", "return", "fn", "(", "*", "thunks", ")", "evaled_args", "=", "framework", ".", "eval", "(", "arglist", ",", "env", ")", "if", "isinstance", "(", "fn", ",", "framework", ".", "EnvironmentFunction", ")", ":", "return", "fn", "(", "*", "evaled_args", ",", "env", "=", "env", ")", "return", "fn", "(", "*", "evaled_args", ")" ]
Call a function, respecting all the various types of functions that exist.
[ "Call", "a", "function", "respecting", "all", "the", "various", "types", "of", "functions", "that", "exist", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast.py#L595-L607
rix0rrr/gcl
gcl/ast.py
schema_spec_from_tuple
def schema_spec_from_tuple(tup): """Return the schema spec from a run-time tuple.""" if hasattr(tup, 'get_schema_spec'): # Tuples have a TupleSchema field that contains a model of the schema return schema.from_spec({ 'fields': TupleSchemaAccess(tup), 'required': tup.get_required_fields()}) return schema.AnySchema()
python
def schema_spec_from_tuple(tup): """Return the schema spec from a run-time tuple.""" if hasattr(tup, 'get_schema_spec'): # Tuples have a TupleSchema field that contains a model of the schema return schema.from_spec({ 'fields': TupleSchemaAccess(tup), 'required': tup.get_required_fields()}) return schema.AnySchema()
[ "def", "schema_spec_from_tuple", "(", "tup", ")", ":", "if", "hasattr", "(", "tup", ",", "'get_schema_spec'", ")", ":", "# Tuples have a TupleSchema field that contains a model of the schema", "return", "schema", ".", "from_spec", "(", "{", "'fields'", ":", "TupleSchemaAccess", "(", "tup", ")", ",", "'required'", ":", "tup", ".", "get_required_fields", "(", ")", "}", ")", "return", "schema", ".", "AnySchema", "(", ")" ]
Return the schema spec from a run-time tuple.
[ "Return", "the", "schema", "spec", "from", "a", "run", "-", "time", "tuple", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast.py#L847-L854
rix0rrr/gcl
gcl/ast.py
make_schema_from
def make_schema_from(value, env): """Make a Schema object from the given spec. The input and output types of this function are super unclear, and are held together by ponies, wishes, duct tape, and a load of tests. See the comments for horrific entertainment. """ # So this thing may not need to evaluate anything[0] if isinstance(value, framework.Thunk): value = framework.eval(value, env) # We're a bit messy. In general, this has evaluated to a Schema object, but not necessarily: # for tuples and lists, we still need to treat the objects as specs. if isinstance(value, schema.Schema): return value if framework.is_tuple(value): # If it so happens that the thing is a tuple, we need to pass in the data in a bit of a # different way into the schema factory (in a dictionary with {fields, required} keys). return schema_spec_from_tuple(value) if framework.is_list(value): # [0] This list may contain tuples, which oughta be treated as specs, or already-resolved schema # objects (as returned by 'int' and 'string' literals). make_schema_from # deals with both. return schema.from_spec([make_schema_from(x, env) for x in value]) raise exceptions.EvaluationError('Can\'t make a schema from %r' % value)
python
def make_schema_from(value, env): """Make a Schema object from the given spec. The input and output types of this function are super unclear, and are held together by ponies, wishes, duct tape, and a load of tests. See the comments for horrific entertainment. """ # So this thing may not need to evaluate anything[0] if isinstance(value, framework.Thunk): value = framework.eval(value, env) # We're a bit messy. In general, this has evaluated to a Schema object, but not necessarily: # for tuples and lists, we still need to treat the objects as specs. if isinstance(value, schema.Schema): return value if framework.is_tuple(value): # If it so happens that the thing is a tuple, we need to pass in the data in a bit of a # different way into the schema factory (in a dictionary with {fields, required} keys). return schema_spec_from_tuple(value) if framework.is_list(value): # [0] This list may contain tuples, which oughta be treated as specs, or already-resolved schema # objects (as returned by 'int' and 'string' literals). make_schema_from # deals with both. return schema.from_spec([make_schema_from(x, env) for x in value]) raise exceptions.EvaluationError('Can\'t make a schema from %r' % value)
[ "def", "make_schema_from", "(", "value", ",", "env", ")", ":", "# So this thing may not need to evaluate anything[0]", "if", "isinstance", "(", "value", ",", "framework", ".", "Thunk", ")", ":", "value", "=", "framework", ".", "eval", "(", "value", ",", "env", ")", "# We're a bit messy. In general, this has evaluated to a Schema object, but not necessarily:", "# for tuples and lists, we still need to treat the objects as specs.", "if", "isinstance", "(", "value", ",", "schema", ".", "Schema", ")", ":", "return", "value", "if", "framework", ".", "is_tuple", "(", "value", ")", ":", "# If it so happens that the thing is a tuple, we need to pass in the data in a bit of a", "# different way into the schema factory (in a dictionary with {fields, required} keys).", "return", "schema_spec_from_tuple", "(", "value", ")", "if", "framework", ".", "is_list", "(", "value", ")", ":", "# [0] This list may contain tuples, which oughta be treated as specs, or already-resolved schema", "# objects (as returned by 'int' and 'string' literals). make_schema_from", "# deals with both.", "return", "schema", ".", "from_spec", "(", "[", "make_schema_from", "(", "x", ",", "env", ")", "for", "x", "in", "value", "]", ")", "raise", "exceptions", ".", "EvaluationError", "(", "'Can\\'t make a schema from %r'", "%", "value", ")" ]
Make a Schema object from the given spec. The input and output types of this function are super unclear, and are held together by ponies, wishes, duct tape, and a load of tests. See the comments for horrific entertainment.
[ "Make", "a", "Schema", "object", "from", "the", "given", "spec", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast.py#L857-L884
rix0rrr/gcl
gcl/ast.py
bracketedList
def bracketedList(l, r, sep, expr, allow_missing_close=False): """Parse bracketed list. Empty list is possible, as is a trailing separator. """ # We may need to backtrack for lists, because of list comprehension, but not for # any of the other lists strict = l != '[' closer = sym(r) if not allow_missing_close else p.Optional(sym(r)) if strict: return sym(l) - listMembers(sep, expr) - closer else: return sym(l) + listMembers(sep, expr) + closer
python
def bracketedList(l, r, sep, expr, allow_missing_close=False): """Parse bracketed list. Empty list is possible, as is a trailing separator. """ # We may need to backtrack for lists, because of list comprehension, but not for # any of the other lists strict = l != '[' closer = sym(r) if not allow_missing_close else p.Optional(sym(r)) if strict: return sym(l) - listMembers(sep, expr) - closer else: return sym(l) + listMembers(sep, expr) + closer
[ "def", "bracketedList", "(", "l", ",", "r", ",", "sep", ",", "expr", ",", "allow_missing_close", "=", "False", ")", ":", "# We may need to backtrack for lists, because of list comprehension, but not for", "# any of the other lists", "strict", "=", "l", "!=", "'['", "closer", "=", "sym", "(", "r", ")", "if", "not", "allow_missing_close", "else", "p", ".", "Optional", "(", "sym", "(", "r", ")", ")", "if", "strict", ":", "return", "sym", "(", "l", ")", "-", "listMembers", "(", "sep", ",", "expr", ")", "-", "closer", "else", ":", "return", "sym", "(", "l", ")", "+", "listMembers", "(", "sep", ",", "expr", ")", "+", "closer" ]
Parse bracketed list. Empty list is possible, as is a trailing separator.
[ "Parse", "bracketed", "list", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast.py#L909-L921
rix0rrr/gcl
gcl/ast.py
unquote
def unquote(s): """Unquote the indicated string.""" # Ignore the left- and rightmost chars (which should be quotes). # Use the Python engine to decode the escape sequence i, N = 1, len(s) - 1 ret = [] while i < N: if s[i] == '\\' and i < N - 1: ret.append(UNQUOTE_MAP.get(s[i+1], s[i+1])) i += 2 else: ret.append(s[i]) i += 1 return ''.join(ret)
python
def unquote(s): """Unquote the indicated string.""" # Ignore the left- and rightmost chars (which should be quotes). # Use the Python engine to decode the escape sequence i, N = 1, len(s) - 1 ret = [] while i < N: if s[i] == '\\' and i < N - 1: ret.append(UNQUOTE_MAP.get(s[i+1], s[i+1])) i += 2 else: ret.append(s[i]) i += 1 return ''.join(ret)
[ "def", "unquote", "(", "s", ")", ":", "# Ignore the left- and rightmost chars (which should be quotes).", "# Use the Python engine to decode the escape sequence", "i", ",", "N", "=", "1", ",", "len", "(", "s", ")", "-", "1", "ret", "=", "[", "]", "while", "i", "<", "N", ":", "if", "s", "[", "i", "]", "==", "'\\\\'", "and", "i", "<", "N", "-", "1", ":", "ret", ".", "append", "(", "UNQUOTE_MAP", ".", "get", "(", "s", "[", "i", "+", "1", "]", ",", "s", "[", "i", "+", "1", "]", ")", ")", "i", "+=", "2", "else", ":", "ret", ".", "append", "(", "s", "[", "i", "]", ")", "i", "+=", "1", "return", "''", ".", "join", "(", "ret", ")" ]
Unquote the indicated string.
[ "Unquote", "the", "indicated", "string", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast.py#L924-L937
rix0rrr/gcl
gcl/ast.py
pattern
def pattern(name, pattern): """Function to put a name on a pyparsing pattern. Just for ease of debugging/tracing parse errors. """ pattern.setName(name) astracing.maybe_trace(pattern) return pattern
python
def pattern(name, pattern): """Function to put a name on a pyparsing pattern. Just for ease of debugging/tracing parse errors. """ pattern.setName(name) astracing.maybe_trace(pattern) return pattern
[ "def", "pattern", "(", "name", ",", "pattern", ")", ":", "pattern", ".", "setName", "(", "name", ")", "astracing", ".", "maybe_trace", "(", "pattern", ")", "return", "pattern" ]
Function to put a name on a pyparsing pattern. Just for ease of debugging/tracing parse errors.
[ "Function", "to", "put", "a", "name", "on", "a", "pyparsing", "pattern", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast.py#L947-L954
rix0rrr/gcl
gcl/ast.py
make_grammar
def make_grammar(allow_errors): """Make the part of the grammar that depends on whether we swallow errors or not.""" if allow_errors in GRAMMAR_CACHE: return GRAMMAR_CACHE[allow_errors] tuple = p.Forward() catch_errors = p.Forward() catch_errors << (p.Regex('[^{};]*') - p.Optional(tuple) - p.Regex('[^;}]*')) def swallow_remainder(): if allow_errors: return pattern('swallow_remainder', p.Suppress(catch_errors)) return p.Empty() def swallow_errors(rule): """Extend the production rule by potentially eating errors. This does not return a p.NoMatch() because that messes up the error messages. """ ret = rule if allow_errors: # Synchronize on the first semicolon or the first unbalanced closing curly ret = rule | pattern('catch_errors', parseWithLocation(p.Suppress(catch_errors), UnparseableNode)) return ret class Grammar: keywords = ['and', 'or', 'not', 'if', 'then', 'else', 'include', 'inherit', 'null', 'true', 'false', 'for', 'in'] # This is a hack: this condition helps uselessly recursing into the grammar for # juxtapositions. early_abort_scan = ~p.oneOf([';', ',', ']', '}', 'for' ]) expression = pattern('expression', p.Forward()) comment = p.Regex('#') + ~p.FollowedBy(sym('.')) + p.restOfLine doc_comment = pattern('doc_comment', (sym('#.') - p.restOfLine)) quotedIdentifier = pattern('quotedIdentifier', p.QuotedString('`', multiline=False)) # - Must start with an alphascore # - May contain alphanumericscores and special characters such as : and - # - Must not end in a special character identifier = pattern('identifier', parseWithLocation(quotedIdentifier | p.Regex(r'[a-zA-Z_]([a-zA-Z0-9_:-]*[a-zA-Z0-9_])?'), Identifier)) # Variable identifier (can't be any of the keywords, which may have lower matching priority) variable = pattern('variable', ~p.MatchFirst(p.oneOf(keywords)) + pattern('identifier', parseWithLocation(identifier.copy(), Var))) # Contants integer = pattern('integer', parseWithLocation(p.Word(p.nums), convertAndMake(int, Literal))) floating = pattern('floating', parseWithLocation(p.Regex(r'\d*\.\d+'), convertAndMake(float, Literal))) dq_string = pattern('dq_string', parseWithLocation(p.QuotedString('"', escChar='\\', unquoteResults=False, multiline=True), convertAndMake(unquote, Literal))) sq_string = pattern('sq_string', parseWithLocation(p.QuotedString("'", escChar='\\', unquoteResults=False, multiline=True), convertAndMake(unquote, Literal))) boolean = pattern('boolean', parseWithLocation(p.Keyword('true') | p.Keyword('false'), convertAndMake(mkBool, Literal))) null = pattern('null', parseWithLocation(p.Keyword('null'), Null)) # List list_ = pattern('list', parseWithLocation(bracketedList('[', ']', ',', expression), List)) # Tuple inherit = pattern('inherit', (kw('inherit') - p.ZeroOrMore(variable)).setParseAction(inheritNodes)) schema_spec = pattern('schema_spec', parseWithLocation(p.Optional(p.Keyword('private').setParseAction(lambda: True), default=False) - p.Optional(p.Keyword('required').setParseAction(lambda: True), default=False) - p.Optional(expression, default=any_schema_expr), MemberSchemaNode)) optional_schema = pattern('optional_schema', p.Optional(p.Suppress(':') - schema_spec, default=no_schema)) expression_value = pattern('expression_value', sym('=') - swallow_errors(expression)) void_value = pattern('void_value', parseWithLocation(p.FollowedBy(sym(';') | sym('}')), lambda loc: Void(loc, 'nonameyet'))) member_value = pattern('member_value', swallow_errors(expression_value | void_value)) named_member = pattern('named_member', parseWithLocation(identifier - optional_schema - member_value - swallow_remainder(), TupleMemberNode)) documented_member = pattern('documented_member', parseWithLocation(parseWithLocation(p.ZeroOrMore(doc_comment), DocComment) + named_member, attach_doc_comment)) tuple_member = early_abort_scan + pattern('tuple_member', swallow_errors(inherit | documented_member) - swallow_remainder()) ErrorAwareTupleNode = functools.partial(TupleNode, allow_errors) tuple_members = pattern('tuple_members', parseWithLocation(listMembers(';', tuple_member), ErrorAwareTupleNode)) tuple << pattern('tuple', parseWithLocation(bracketedList('{', '}', ';', tuple_member, allow_missing_close=allow_errors), ErrorAwareTupleNode)) # Argument list will live by itself as a atom. Actually, it's a tuple, but we # don't call it that because we use that term for something else already :) arg_list = pattern('arg_list', bracketedList('(', ')', ',', expression).setParseAction(ArgList)) parenthesized_expr = pattern('parenthesized_expr', (sym('(') - expression - ')').setParseAction(head)) unary_op = pattern('unary_op', (p.oneOf(' '.join(functions.unary_operators.keys())) - expression).setParseAction(mkUnOp)) if_then_else = pattern('if_then_else', parseWithLocation(kw('if') + expression + kw('then') + expression + kw('else') + expression, Condition)) list_comprehension = pattern('list_comprehension', parseWithLocation(sym('[') + expression + kw('for') + variable + kw('in') + expression + p.Optional(kw('if') + expression) + sym(']'), ListComprehension)) # We don't allow space-application here # Now our grammar is becoming very dirty and hackish deref = pattern('deref', p.Forward()) include = pattern('include', parseWithLocation(kw('include') - deref, Include)) atom = pattern('atom', (tuple | sq_string | dq_string | variable | floating | integer | boolean | list_ | null | unary_op | parenthesized_expr | if_then_else | include | list_comprehension )) # We have two different forms of function application, so they can have 2 # different precedences. This one: fn(args), which binds stronger than # dereferencing (fn(args).attr == (fn(args)).attr) applic1 = pattern('applic1', parseWithLocation(atom - p.ZeroOrMore(arg_list), mkApplications)) # Dereferencing of an expression (obj.bar) deref << parseWithLocation(applic1 - p.ZeroOrMore(p.Suppress('.') - swallow_errors(identifier)), mkDerefs) # All binary operators at various precedence levels go here: # This piece of code does the moral equivalent of: # # T = F*F | F/F | F # E = T+T | T-T | T # # etc. term = deref for op_level in functions.binary_operators_before_juxtaposition: operator_syms = list(op_level.keys()) term = (term - p.ZeroOrMore(p.oneOf(operator_syms) - term)).setParseAction(mkBinOps) # Juxtaposition function application (fn arg), must be 1-arg every time applic2 = pattern('applic2', parseWithLocation(term - p.ZeroOrMore(early_abort_scan + term), mkApplications)) term = applic2 for op_level in functions.binary_operators_after_juxtaposition: operator_syms = list(op_level.keys()) term = (term - p.ZeroOrMore(p.oneOf(operator_syms) - term)).setParseAction(mkBinOps) expression << term # Two entry points: start at an arbitrary expression, or expect the top-level # scope to be a tuple. start = pattern('start', expression.copy().ignore(comment)) start_tuple = tuple_members.ignore(comment) GRAMMAR_CACHE[allow_errors] = Grammar return Grammar
python
def make_grammar(allow_errors): """Make the part of the grammar that depends on whether we swallow errors or not.""" if allow_errors in GRAMMAR_CACHE: return GRAMMAR_CACHE[allow_errors] tuple = p.Forward() catch_errors = p.Forward() catch_errors << (p.Regex('[^{};]*') - p.Optional(tuple) - p.Regex('[^;}]*')) def swallow_remainder(): if allow_errors: return pattern('swallow_remainder', p.Suppress(catch_errors)) return p.Empty() def swallow_errors(rule): """Extend the production rule by potentially eating errors. This does not return a p.NoMatch() because that messes up the error messages. """ ret = rule if allow_errors: # Synchronize on the first semicolon or the first unbalanced closing curly ret = rule | pattern('catch_errors', parseWithLocation(p.Suppress(catch_errors), UnparseableNode)) return ret class Grammar: keywords = ['and', 'or', 'not', 'if', 'then', 'else', 'include', 'inherit', 'null', 'true', 'false', 'for', 'in'] # This is a hack: this condition helps uselessly recursing into the grammar for # juxtapositions. early_abort_scan = ~p.oneOf([';', ',', ']', '}', 'for' ]) expression = pattern('expression', p.Forward()) comment = p.Regex('#') + ~p.FollowedBy(sym('.')) + p.restOfLine doc_comment = pattern('doc_comment', (sym('#.') - p.restOfLine)) quotedIdentifier = pattern('quotedIdentifier', p.QuotedString('`', multiline=False)) # - Must start with an alphascore # - May contain alphanumericscores and special characters such as : and - # - Must not end in a special character identifier = pattern('identifier', parseWithLocation(quotedIdentifier | p.Regex(r'[a-zA-Z_]([a-zA-Z0-9_:-]*[a-zA-Z0-9_])?'), Identifier)) # Variable identifier (can't be any of the keywords, which may have lower matching priority) variable = pattern('variable', ~p.MatchFirst(p.oneOf(keywords)) + pattern('identifier', parseWithLocation(identifier.copy(), Var))) # Contants integer = pattern('integer', parseWithLocation(p.Word(p.nums), convertAndMake(int, Literal))) floating = pattern('floating', parseWithLocation(p.Regex(r'\d*\.\d+'), convertAndMake(float, Literal))) dq_string = pattern('dq_string', parseWithLocation(p.QuotedString('"', escChar='\\', unquoteResults=False, multiline=True), convertAndMake(unquote, Literal))) sq_string = pattern('sq_string', parseWithLocation(p.QuotedString("'", escChar='\\', unquoteResults=False, multiline=True), convertAndMake(unquote, Literal))) boolean = pattern('boolean', parseWithLocation(p.Keyword('true') | p.Keyword('false'), convertAndMake(mkBool, Literal))) null = pattern('null', parseWithLocation(p.Keyword('null'), Null)) # List list_ = pattern('list', parseWithLocation(bracketedList('[', ']', ',', expression), List)) # Tuple inherit = pattern('inherit', (kw('inherit') - p.ZeroOrMore(variable)).setParseAction(inheritNodes)) schema_spec = pattern('schema_spec', parseWithLocation(p.Optional(p.Keyword('private').setParseAction(lambda: True), default=False) - p.Optional(p.Keyword('required').setParseAction(lambda: True), default=False) - p.Optional(expression, default=any_schema_expr), MemberSchemaNode)) optional_schema = pattern('optional_schema', p.Optional(p.Suppress(':') - schema_spec, default=no_schema)) expression_value = pattern('expression_value', sym('=') - swallow_errors(expression)) void_value = pattern('void_value', parseWithLocation(p.FollowedBy(sym(';') | sym('}')), lambda loc: Void(loc, 'nonameyet'))) member_value = pattern('member_value', swallow_errors(expression_value | void_value)) named_member = pattern('named_member', parseWithLocation(identifier - optional_schema - member_value - swallow_remainder(), TupleMemberNode)) documented_member = pattern('documented_member', parseWithLocation(parseWithLocation(p.ZeroOrMore(doc_comment), DocComment) + named_member, attach_doc_comment)) tuple_member = early_abort_scan + pattern('tuple_member', swallow_errors(inherit | documented_member) - swallow_remainder()) ErrorAwareTupleNode = functools.partial(TupleNode, allow_errors) tuple_members = pattern('tuple_members', parseWithLocation(listMembers(';', tuple_member), ErrorAwareTupleNode)) tuple << pattern('tuple', parseWithLocation(bracketedList('{', '}', ';', tuple_member, allow_missing_close=allow_errors), ErrorAwareTupleNode)) # Argument list will live by itself as a atom. Actually, it's a tuple, but we # don't call it that because we use that term for something else already :) arg_list = pattern('arg_list', bracketedList('(', ')', ',', expression).setParseAction(ArgList)) parenthesized_expr = pattern('parenthesized_expr', (sym('(') - expression - ')').setParseAction(head)) unary_op = pattern('unary_op', (p.oneOf(' '.join(functions.unary_operators.keys())) - expression).setParseAction(mkUnOp)) if_then_else = pattern('if_then_else', parseWithLocation(kw('if') + expression + kw('then') + expression + kw('else') + expression, Condition)) list_comprehension = pattern('list_comprehension', parseWithLocation(sym('[') + expression + kw('for') + variable + kw('in') + expression + p.Optional(kw('if') + expression) + sym(']'), ListComprehension)) # We don't allow space-application here # Now our grammar is becoming very dirty and hackish deref = pattern('deref', p.Forward()) include = pattern('include', parseWithLocation(kw('include') - deref, Include)) atom = pattern('atom', (tuple | sq_string | dq_string | variable | floating | integer | boolean | list_ | null | unary_op | parenthesized_expr | if_then_else | include | list_comprehension )) # We have two different forms of function application, so they can have 2 # different precedences. This one: fn(args), which binds stronger than # dereferencing (fn(args).attr == (fn(args)).attr) applic1 = pattern('applic1', parseWithLocation(atom - p.ZeroOrMore(arg_list), mkApplications)) # Dereferencing of an expression (obj.bar) deref << parseWithLocation(applic1 - p.ZeroOrMore(p.Suppress('.') - swallow_errors(identifier)), mkDerefs) # All binary operators at various precedence levels go here: # This piece of code does the moral equivalent of: # # T = F*F | F/F | F # E = T+T | T-T | T # # etc. term = deref for op_level in functions.binary_operators_before_juxtaposition: operator_syms = list(op_level.keys()) term = (term - p.ZeroOrMore(p.oneOf(operator_syms) - term)).setParseAction(mkBinOps) # Juxtaposition function application (fn arg), must be 1-arg every time applic2 = pattern('applic2', parseWithLocation(term - p.ZeroOrMore(early_abort_scan + term), mkApplications)) term = applic2 for op_level in functions.binary_operators_after_juxtaposition: operator_syms = list(op_level.keys()) term = (term - p.ZeroOrMore(p.oneOf(operator_syms) - term)).setParseAction(mkBinOps) expression << term # Two entry points: start at an arbitrary expression, or expect the top-level # scope to be a tuple. start = pattern('start', expression.copy().ignore(comment)) start_tuple = tuple_members.ignore(comment) GRAMMAR_CACHE[allow_errors] = Grammar return Grammar
[ "def", "make_grammar", "(", "allow_errors", ")", ":", "if", "allow_errors", "in", "GRAMMAR_CACHE", ":", "return", "GRAMMAR_CACHE", "[", "allow_errors", "]", "tuple", "=", "p", ".", "Forward", "(", ")", "catch_errors", "=", "p", ".", "Forward", "(", ")", "catch_errors", "<<", "(", "p", ".", "Regex", "(", "'[^{};]*'", ")", "-", "p", ".", "Optional", "(", "tuple", ")", "-", "p", ".", "Regex", "(", "'[^;}]*'", ")", ")", "def", "swallow_remainder", "(", ")", ":", "if", "allow_errors", ":", "return", "pattern", "(", "'swallow_remainder'", ",", "p", ".", "Suppress", "(", "catch_errors", ")", ")", "return", "p", ".", "Empty", "(", ")", "def", "swallow_errors", "(", "rule", ")", ":", "\"\"\"Extend the production rule by potentially eating errors.\n\n This does not return a p.NoMatch() because that messes up the error messages.\n \"\"\"", "ret", "=", "rule", "if", "allow_errors", ":", "# Synchronize on the first semicolon or the first unbalanced closing curly", "ret", "=", "rule", "|", "pattern", "(", "'catch_errors'", ",", "parseWithLocation", "(", "p", ".", "Suppress", "(", "catch_errors", ")", ",", "UnparseableNode", ")", ")", "return", "ret", "class", "Grammar", ":", "keywords", "=", "[", "'and'", ",", "'or'", ",", "'not'", ",", "'if'", ",", "'then'", ",", "'else'", ",", "'include'", ",", "'inherit'", ",", "'null'", ",", "'true'", ",", "'false'", ",", "'for'", ",", "'in'", "]", "# This is a hack: this condition helps uselessly recursing into the grammar for", "# juxtapositions.", "early_abort_scan", "=", "~", "p", ".", "oneOf", "(", "[", "';'", ",", "','", ",", "']'", ",", "'}'", ",", "'for'", "]", ")", "expression", "=", "pattern", "(", "'expression'", ",", "p", ".", "Forward", "(", ")", ")", "comment", "=", "p", ".", "Regex", "(", "'#'", ")", "+", "~", "p", ".", "FollowedBy", "(", "sym", "(", "'.'", ")", ")", "+", "p", ".", "restOfLine", "doc_comment", "=", "pattern", "(", "'doc_comment'", ",", "(", "sym", "(", "'#.'", ")", "-", "p", ".", "restOfLine", ")", ")", "quotedIdentifier", "=", "pattern", "(", "'quotedIdentifier'", ",", "p", ".", "QuotedString", "(", "'`'", ",", "multiline", "=", "False", ")", ")", "# - Must start with an alphascore", "# - May contain alphanumericscores and special characters such as : and -", "# - Must not end in a special character", "identifier", "=", "pattern", "(", "'identifier'", ",", "parseWithLocation", "(", "quotedIdentifier", "|", "p", ".", "Regex", "(", "r'[a-zA-Z_]([a-zA-Z0-9_:-]*[a-zA-Z0-9_])?'", ")", ",", "Identifier", ")", ")", "# Variable identifier (can't be any of the keywords, which may have lower matching priority)", "variable", "=", "pattern", "(", "'variable'", ",", "~", "p", ".", "MatchFirst", "(", "p", ".", "oneOf", "(", "keywords", ")", ")", "+", "pattern", "(", "'identifier'", ",", "parseWithLocation", "(", "identifier", ".", "copy", "(", ")", ",", "Var", ")", ")", ")", "# Contants", "integer", "=", "pattern", "(", "'integer'", ",", "parseWithLocation", "(", "p", ".", "Word", "(", "p", ".", "nums", ")", ",", "convertAndMake", "(", "int", ",", "Literal", ")", ")", ")", "floating", "=", "pattern", "(", "'floating'", ",", "parseWithLocation", "(", "p", ".", "Regex", "(", "r'\\d*\\.\\d+'", ")", ",", "convertAndMake", "(", "float", ",", "Literal", ")", ")", ")", "dq_string", "=", "pattern", "(", "'dq_string'", ",", "parseWithLocation", "(", "p", ".", "QuotedString", "(", "'\"'", ",", "escChar", "=", "'\\\\'", ",", "unquoteResults", "=", "False", ",", "multiline", "=", "True", ")", ",", "convertAndMake", "(", "unquote", ",", "Literal", ")", ")", ")", "sq_string", "=", "pattern", "(", "'sq_string'", ",", "parseWithLocation", "(", "p", ".", "QuotedString", "(", "\"'\"", ",", "escChar", "=", "'\\\\'", ",", "unquoteResults", "=", "False", ",", "multiline", "=", "True", ")", ",", "convertAndMake", "(", "unquote", ",", "Literal", ")", ")", ")", "boolean", "=", "pattern", "(", "'boolean'", ",", "parseWithLocation", "(", "p", ".", "Keyword", "(", "'true'", ")", "|", "p", ".", "Keyword", "(", "'false'", ")", ",", "convertAndMake", "(", "mkBool", ",", "Literal", ")", ")", ")", "null", "=", "pattern", "(", "'null'", ",", "parseWithLocation", "(", "p", ".", "Keyword", "(", "'null'", ")", ",", "Null", ")", ")", "# List", "list_", "=", "pattern", "(", "'list'", ",", "parseWithLocation", "(", "bracketedList", "(", "'['", ",", "']'", ",", "','", ",", "expression", ")", ",", "List", ")", ")", "# Tuple", "inherit", "=", "pattern", "(", "'inherit'", ",", "(", "kw", "(", "'inherit'", ")", "-", "p", ".", "ZeroOrMore", "(", "variable", ")", ")", ".", "setParseAction", "(", "inheritNodes", ")", ")", "schema_spec", "=", "pattern", "(", "'schema_spec'", ",", "parseWithLocation", "(", "p", ".", "Optional", "(", "p", ".", "Keyword", "(", "'private'", ")", ".", "setParseAction", "(", "lambda", ":", "True", ")", ",", "default", "=", "False", ")", "-", "p", ".", "Optional", "(", "p", ".", "Keyword", "(", "'required'", ")", ".", "setParseAction", "(", "lambda", ":", "True", ")", ",", "default", "=", "False", ")", "-", "p", ".", "Optional", "(", "expression", ",", "default", "=", "any_schema_expr", ")", ",", "MemberSchemaNode", ")", ")", "optional_schema", "=", "pattern", "(", "'optional_schema'", ",", "p", ".", "Optional", "(", "p", ".", "Suppress", "(", "':'", ")", "-", "schema_spec", ",", "default", "=", "no_schema", ")", ")", "expression_value", "=", "pattern", "(", "'expression_value'", ",", "sym", "(", "'='", ")", "-", "swallow_errors", "(", "expression", ")", ")", "void_value", "=", "pattern", "(", "'void_value'", ",", "parseWithLocation", "(", "p", ".", "FollowedBy", "(", "sym", "(", "';'", ")", "|", "sym", "(", "'}'", ")", ")", ",", "lambda", "loc", ":", "Void", "(", "loc", ",", "'nonameyet'", ")", ")", ")", "member_value", "=", "pattern", "(", "'member_value'", ",", "swallow_errors", "(", "expression_value", "|", "void_value", ")", ")", "named_member", "=", "pattern", "(", "'named_member'", ",", "parseWithLocation", "(", "identifier", "-", "optional_schema", "-", "member_value", "-", "swallow_remainder", "(", ")", ",", "TupleMemberNode", ")", ")", "documented_member", "=", "pattern", "(", "'documented_member'", ",", "parseWithLocation", "(", "parseWithLocation", "(", "p", ".", "ZeroOrMore", "(", "doc_comment", ")", ",", "DocComment", ")", "+", "named_member", ",", "attach_doc_comment", ")", ")", "tuple_member", "=", "early_abort_scan", "+", "pattern", "(", "'tuple_member'", ",", "swallow_errors", "(", "inherit", "|", "documented_member", ")", "-", "swallow_remainder", "(", ")", ")", "ErrorAwareTupleNode", "=", "functools", ".", "partial", "(", "TupleNode", ",", "allow_errors", ")", "tuple_members", "=", "pattern", "(", "'tuple_members'", ",", "parseWithLocation", "(", "listMembers", "(", "';'", ",", "tuple_member", ")", ",", "ErrorAwareTupleNode", ")", ")", "tuple", "<<", "pattern", "(", "'tuple'", ",", "parseWithLocation", "(", "bracketedList", "(", "'{'", ",", "'}'", ",", "';'", ",", "tuple_member", ",", "allow_missing_close", "=", "allow_errors", ")", ",", "ErrorAwareTupleNode", ")", ")", "# Argument list will live by itself as a atom. Actually, it's a tuple, but we", "# don't call it that because we use that term for something else already :)", "arg_list", "=", "pattern", "(", "'arg_list'", ",", "bracketedList", "(", "'('", ",", "')'", ",", "','", ",", "expression", ")", ".", "setParseAction", "(", "ArgList", ")", ")", "parenthesized_expr", "=", "pattern", "(", "'parenthesized_expr'", ",", "(", "sym", "(", "'('", ")", "-", "expression", "-", "')'", ")", ".", "setParseAction", "(", "head", ")", ")", "unary_op", "=", "pattern", "(", "'unary_op'", ",", "(", "p", ".", "oneOf", "(", "' '", ".", "join", "(", "functions", ".", "unary_operators", ".", "keys", "(", ")", ")", ")", "-", "expression", ")", ".", "setParseAction", "(", "mkUnOp", ")", ")", "if_then_else", "=", "pattern", "(", "'if_then_else'", ",", "parseWithLocation", "(", "kw", "(", "'if'", ")", "+", "expression", "+", "kw", "(", "'then'", ")", "+", "expression", "+", "kw", "(", "'else'", ")", "+", "expression", ",", "Condition", ")", ")", "list_comprehension", "=", "pattern", "(", "'list_comprehension'", ",", "parseWithLocation", "(", "sym", "(", "'['", ")", "+", "expression", "+", "kw", "(", "'for'", ")", "+", "variable", "+", "kw", "(", "'in'", ")", "+", "expression", "+", "p", ".", "Optional", "(", "kw", "(", "'if'", ")", "+", "expression", ")", "+", "sym", "(", "']'", ")", ",", "ListComprehension", ")", ")", "# We don't allow space-application here", "# Now our grammar is becoming very dirty and hackish", "deref", "=", "pattern", "(", "'deref'", ",", "p", ".", "Forward", "(", ")", ")", "include", "=", "pattern", "(", "'include'", ",", "parseWithLocation", "(", "kw", "(", "'include'", ")", "-", "deref", ",", "Include", ")", ")", "atom", "=", "pattern", "(", "'atom'", ",", "(", "tuple", "|", "sq_string", "|", "dq_string", "|", "variable", "|", "floating", "|", "integer", "|", "boolean", "|", "list_", "|", "null", "|", "unary_op", "|", "parenthesized_expr", "|", "if_then_else", "|", "include", "|", "list_comprehension", ")", ")", "# We have two different forms of function application, so they can have 2", "# different precedences. This one: fn(args), which binds stronger than", "# dereferencing (fn(args).attr == (fn(args)).attr)", "applic1", "=", "pattern", "(", "'applic1'", ",", "parseWithLocation", "(", "atom", "-", "p", ".", "ZeroOrMore", "(", "arg_list", ")", ",", "mkApplications", ")", ")", "# Dereferencing of an expression (obj.bar)", "deref", "<<", "parseWithLocation", "(", "applic1", "-", "p", ".", "ZeroOrMore", "(", "p", ".", "Suppress", "(", "'.'", ")", "-", "swallow_errors", "(", "identifier", ")", ")", ",", "mkDerefs", ")", "# All binary operators at various precedence levels go here:", "# This piece of code does the moral equivalent of:", "#", "# T = F*F | F/F | F", "# E = T+T | T-T | T", "#", "# etc.", "term", "=", "deref", "for", "op_level", "in", "functions", ".", "binary_operators_before_juxtaposition", ":", "operator_syms", "=", "list", "(", "op_level", ".", "keys", "(", ")", ")", "term", "=", "(", "term", "-", "p", ".", "ZeroOrMore", "(", "p", ".", "oneOf", "(", "operator_syms", ")", "-", "term", ")", ")", ".", "setParseAction", "(", "mkBinOps", ")", "# Juxtaposition function application (fn arg), must be 1-arg every time", "applic2", "=", "pattern", "(", "'applic2'", ",", "parseWithLocation", "(", "term", "-", "p", ".", "ZeroOrMore", "(", "early_abort_scan", "+", "term", ")", ",", "mkApplications", ")", ")", "term", "=", "applic2", "for", "op_level", "in", "functions", ".", "binary_operators_after_juxtaposition", ":", "operator_syms", "=", "list", "(", "op_level", ".", "keys", "(", ")", ")", "term", "=", "(", "term", "-", "p", ".", "ZeroOrMore", "(", "p", ".", "oneOf", "(", "operator_syms", ")", "-", "term", ")", ")", ".", "setParseAction", "(", "mkBinOps", ")", "expression", "<<", "term", "# Two entry points: start at an arbitrary expression, or expect the top-level", "# scope to be a tuple.", "start", "=", "pattern", "(", "'start'", ",", "expression", ".", "copy", "(", ")", ".", "ignore", "(", "comment", ")", ")", "start_tuple", "=", "tuple_members", ".", "ignore", "(", "comment", ")", "GRAMMAR_CACHE", "[", "allow_errors", "]", "=", "Grammar", "return", "Grammar" ]
Make the part of the grammar that depends on whether we swallow errors or not.
[ "Make", "the", "part", "of", "the", "grammar", "that", "depends", "on", "whether", "we", "swallow", "errors", "or", "not", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast.py#L958-L1107
rix0rrr/gcl
gcl/ast.py
reads
def reads(s, filename, loader, implicit_tuple, allow_errors): """Load but don't evaluate a GCL expression from a string.""" try: the_context.filename = filename the_context.loader = loader grammar = make_grammar(allow_errors=allow_errors) root = grammar.start_tuple if implicit_tuple else grammar.start return root.parseWithTabs().parseString(s, parseAll=True)[0] except (p.ParseException, p.ParseSyntaxException) as e: loc = SourceLocation(s, find_offset(s, e.lineno, e.col)) raise exceptions.ParseError(the_context.filename, loc, e.msg)
python
def reads(s, filename, loader, implicit_tuple, allow_errors): """Load but don't evaluate a GCL expression from a string.""" try: the_context.filename = filename the_context.loader = loader grammar = make_grammar(allow_errors=allow_errors) root = grammar.start_tuple if implicit_tuple else grammar.start return root.parseWithTabs().parseString(s, parseAll=True)[0] except (p.ParseException, p.ParseSyntaxException) as e: loc = SourceLocation(s, find_offset(s, e.lineno, e.col)) raise exceptions.ParseError(the_context.filename, loc, e.msg)
[ "def", "reads", "(", "s", ",", "filename", ",", "loader", ",", "implicit_tuple", ",", "allow_errors", ")", ":", "try", ":", "the_context", ".", "filename", "=", "filename", "the_context", ".", "loader", "=", "loader", "grammar", "=", "make_grammar", "(", "allow_errors", "=", "allow_errors", ")", "root", "=", "grammar", ".", "start_tuple", "if", "implicit_tuple", "else", "grammar", ".", "start", "return", "root", ".", "parseWithTabs", "(", ")", ".", "parseString", "(", "s", ",", "parseAll", "=", "True", ")", "[", "0", "]", "except", "(", "p", ".", "ParseException", ",", "p", ".", "ParseSyntaxException", ")", "as", "e", ":", "loc", "=", "SourceLocation", "(", "s", ",", "find_offset", "(", "s", ",", "e", ".", "lineno", ",", "e", ".", "col", ")", ")", "raise", "exceptions", ".", "ParseError", "(", "the_context", ".", "filename", ",", "loc", ",", "e", ".", "msg", ")" ]
Load but don't evaluate a GCL expression from a string.
[ "Load", "but", "don", "t", "evaluate", "a", "GCL", "expression", "from", "a", "string", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast.py#L1131-L1143
rix0rrr/gcl
gcl/ast.py
AstNode.find_tokens
def find_tokens(self, q): """Find all AST nodes at the given filename, line and column.""" found_me = [] if hasattr(self, 'location'): if self.location.contains(q): found_me = [self] elif self._found_by(q): found_me = [self] cs = [n.find_tokens(q) for n in self._children()] return found_me + list(itertools.chain(*cs))
python
def find_tokens(self, q): """Find all AST nodes at the given filename, line and column.""" found_me = [] if hasattr(self, 'location'): if self.location.contains(q): found_me = [self] elif self._found_by(q): found_me = [self] cs = [n.find_tokens(q) for n in self._children()] return found_me + list(itertools.chain(*cs))
[ "def", "find_tokens", "(", "self", ",", "q", ")", ":", "found_me", "=", "[", "]", "if", "hasattr", "(", "self", ",", "'location'", ")", ":", "if", "self", ".", "location", ".", "contains", "(", "q", ")", ":", "found_me", "=", "[", "self", "]", "elif", "self", ".", "_found_by", "(", "q", ")", ":", "found_me", "=", "[", "self", "]", "cs", "=", "[", "n", ".", "find_tokens", "(", "q", ")", "for", "n", "in", "self", ".", "_children", "(", ")", "]", "return", "found_me", "+", "list", "(", "itertools", ".", "chain", "(", "*", "cs", ")", ")" ]
Find all AST nodes at the given filename, line and column.
[ "Find", "all", "AST", "nodes", "at", "the", "given", "filename", "line", "and", "column", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast.py#L156-L166
rix0rrr/gcl
gcl/ast.py
TupleNode._make_tuple
def _make_tuple(self, env): """Instantiate the Tuple based on this TupleNode.""" t = runtime.Tuple(self, env, dict2tuple) # A tuple also provides its own schema spec schema = schema_spec_from_tuple(t) t.attach_schema(schema) return t
python
def _make_tuple(self, env): """Instantiate the Tuple based on this TupleNode.""" t = runtime.Tuple(self, env, dict2tuple) # A tuple also provides its own schema spec schema = schema_spec_from_tuple(t) t.attach_schema(schema) return t
[ "def", "_make_tuple", "(", "self", ",", "env", ")", ":", "t", "=", "runtime", ".", "Tuple", "(", "self", ",", "env", ",", "dict2tuple", ")", "# A tuple also provides its own schema spec", "schema", "=", "schema_spec_from_tuple", "(", "t", ")", "t", ".", "attach_schema", "(", "schema", ")", "return", "t" ]
Instantiate the Tuple based on this TupleNode.
[ "Instantiate", "the", "Tuple", "based", "on", "this", "TupleNode", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast.py#L433-L439
rix0rrr/gcl
gcl/ast.py
Application.applyTuple
def applyTuple(self, tuple, right, env): """Apply a tuple to something else.""" if len(right) != 1: raise exceptions.EvaluationError('Tuple (%r) can only be applied to one argument, got %r' % (self.left, self.right)) right = right[0] return tuple(right)
python
def applyTuple(self, tuple, right, env): """Apply a tuple to something else.""" if len(right) != 1: raise exceptions.EvaluationError('Tuple (%r) can only be applied to one argument, got %r' % (self.left, self.right)) right = right[0] return tuple(right)
[ "def", "applyTuple", "(", "self", ",", "tuple", ",", "right", ",", "env", ")", ":", "if", "len", "(", "right", ")", "!=", "1", ":", "raise", "exceptions", ".", "EvaluationError", "(", "'Tuple (%r) can only be applied to one argument, got %r'", "%", "(", "self", ".", "left", ",", "self", ".", "right", ")", ")", "right", "=", "right", "[", "0", "]", "return", "tuple", "(", "right", ")" ]
Apply a tuple to something else.
[ "Apply", "a", "tuple", "to", "something", "else", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast.py#L508-L514
rix0rrr/gcl
gcl/ast.py
Application.applyIndex
def applyIndex(self, lst, right): """Apply a list to something else.""" if len(right) != 1: raise exceptions.EvaluationError('%r can only be applied to one argument, got %r' % (self.left, self.right)) right = right[0] if isinstance(right, int): return lst[right] raise exceptions.EvaluationError("Can't apply %r to argument (%r): integer expected, got %r" % (self.left, self.right, right))
python
def applyIndex(self, lst, right): """Apply a list to something else.""" if len(right) != 1: raise exceptions.EvaluationError('%r can only be applied to one argument, got %r' % (self.left, self.right)) right = right[0] if isinstance(right, int): return lst[right] raise exceptions.EvaluationError("Can't apply %r to argument (%r): integer expected, got %r" % (self.left, self.right, right))
[ "def", "applyIndex", "(", "self", ",", "lst", ",", "right", ")", ":", "if", "len", "(", "right", ")", "!=", "1", ":", "raise", "exceptions", ".", "EvaluationError", "(", "'%r can only be applied to one argument, got %r'", "%", "(", "self", ".", "left", ",", "self", ".", "right", ")", ")", "right", "=", "right", "[", "0", "]", "if", "isinstance", "(", "right", ",", "int", ")", ":", "return", "lst", "[", "right", "]", "raise", "exceptions", ".", "EvaluationError", "(", "\"Can't apply %r to argument (%r): integer expected, got %r\"", "%", "(", "self", ".", "left", ",", "self", ".", "right", ",", "right", ")", ")" ]
Apply a list to something else.
[ "Apply", "a", "list", "to", "something", "else", "." ]
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast.py#L516-L525
hannes-brt/hebel
hebel/parameter_updaters.py
NesterovMomentumUpdate.pre_gradient_update
def pre_gradient_update(self): """ First step of Nesterov momentum method: take step in direction of accumulated gradient """ updates = zip(self.velocity, self.model.n_parameters * [1.]) self.model.update_parameters(updates)
python
def pre_gradient_update(self): """ First step of Nesterov momentum method: take step in direction of accumulated gradient """ updates = zip(self.velocity, self.model.n_parameters * [1.]) self.model.update_parameters(updates)
[ "def", "pre_gradient_update", "(", "self", ")", ":", "updates", "=", "zip", "(", "self", ".", "velocity", ",", "self", ".", "model", ".", "n_parameters", "*", "[", "1.", "]", ")", "self", ".", "model", ".", "update_parameters", "(", "updates", ")" ]
First step of Nesterov momentum method: take step in direction of accumulated gradient
[ "First", "step", "of", "Nesterov", "momentum", "method", ":", "take", "step", "in", "direction", "of", "accumulated", "gradient" ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/parameter_updaters.py#L70-L76
hannes-brt/hebel
hebel/layers/softmax_layer.py
SoftmaxLayer.class_error
def class_error(self, input_data, targets, average=True, cache=None, prediction=False): """ Return the classification error rate """ if cache is not None: activations = cache else: activations = \ self.feed_forward(input_data, prediction=prediction) targets = targets.get().argmax(1) class_error = np.sum(activations.get().argmax(1) != targets) if average: class_error = float(class_error) / targets.shape[0] return class_error
python
def class_error(self, input_data, targets, average=True, cache=None, prediction=False): """ Return the classification error rate """ if cache is not None: activations = cache else: activations = \ self.feed_forward(input_data, prediction=prediction) targets = targets.get().argmax(1) class_error = np.sum(activations.get().argmax(1) != targets) if average: class_error = float(class_error) / targets.shape[0] return class_error
[ "def", "class_error", "(", "self", ",", "input_data", ",", "targets", ",", "average", "=", "True", ",", "cache", "=", "None", ",", "prediction", "=", "False", ")", ":", "if", "cache", "is", "not", "None", ":", "activations", "=", "cache", "else", ":", "activations", "=", "self", ".", "feed_forward", "(", "input_data", ",", "prediction", "=", "prediction", ")", "targets", "=", "targets", ".", "get", "(", ")", ".", "argmax", "(", "1", ")", "class_error", "=", "np", ".", "sum", "(", "activations", ".", "get", "(", ")", ".", "argmax", "(", "1", ")", "!=", "targets", ")", "if", "average", ":", "class_error", "=", "float", "(", "class_error", ")", "/", "targets", ".", "shape", "[", "0", "]", "return", "class_error" ]
Return the classification error rate
[ "Return", "the", "classification", "error", "rate" ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/layers/softmax_layer.py#L293-L308
hannes-brt/hebel
hebel/layers/softmax_layer.py
SoftmaxLayer.kl_error
def kl_error(self, input_data, targets, average=True, cache=None, prediction=True): """ The KL divergence error """ if cache is not None: activations = cache else: activations = \ self.feed_forward(input_data, prediction=prediction) targets_non_nan = gpuarray.empty_like(targets) nan_to_zeros(targets, targets_non_nan) kl_error = gpuarray.sum(targets_non_nan * (cumath.log(targets_non_nan + eps) - cumath.log(activations + eps))) if average: kl_error /= targets.shape[0] return kl_error.get()
python
def kl_error(self, input_data, targets, average=True, cache=None, prediction=True): """ The KL divergence error """ if cache is not None: activations = cache else: activations = \ self.feed_forward(input_data, prediction=prediction) targets_non_nan = gpuarray.empty_like(targets) nan_to_zeros(targets, targets_non_nan) kl_error = gpuarray.sum(targets_non_nan * (cumath.log(targets_non_nan + eps) - cumath.log(activations + eps))) if average: kl_error /= targets.shape[0] return kl_error.get()
[ "def", "kl_error", "(", "self", ",", "input_data", ",", "targets", ",", "average", "=", "True", ",", "cache", "=", "None", ",", "prediction", "=", "True", ")", ":", "if", "cache", "is", "not", "None", ":", "activations", "=", "cache", "else", ":", "activations", "=", "self", ".", "feed_forward", "(", "input_data", ",", "prediction", "=", "prediction", ")", "targets_non_nan", "=", "gpuarray", ".", "empty_like", "(", "targets", ")", "nan_to_zeros", "(", "targets", ",", "targets_non_nan", ")", "kl_error", "=", "gpuarray", ".", "sum", "(", "targets_non_nan", "*", "(", "cumath", ".", "log", "(", "targets_non_nan", "+", "eps", ")", "-", "cumath", ".", "log", "(", "activations", "+", "eps", ")", ")", ")", "if", "average", ":", "kl_error", "/=", "targets", ".", "shape", "[", "0", "]", "return", "kl_error", ".", "get", "(", ")" ]
The KL divergence error
[ "The", "KL", "divergence", "error" ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/layers/softmax_layer.py#L310-L328
hannes-brt/hebel
hebel/pycuda_ops/linalg.py
dot
def dot(x_gpu, y_gpu, transa='N', transb='N', handle=None, target=None): """ Dot product of two arrays. For 1D arrays, this function computes the inner product. For 2D arrays of shapes `(m, k)` and `(k, n)`, it computes the matrix product; the result has shape `(m, n)`. Parameters ---------- x_gpu : pycuda.gpuarray.GPUArray Input array. y_gpu : pycuda.gpuarray.GPUArray Input array. transa : char If 'T', compute the product of the transpose of `x_gpu`. If 'C', compute the product of the Hermitian of `x_gpu`. transb : char If 'T', compute the product of the transpose of `y_gpu`. If 'C', compute the product of the Hermitian of `y_gpu`. handle : int CUBLAS context. If no context is specified, the default handle from `scikits.cuda.misc._global_cublas_handle` is used. Returns ------- c_gpu : pycuda.gpuarray.GPUArray, float{32,64}, or complex{64,128} Inner product of `x_gpu` and `y_gpu`. When the inputs are 1D arrays, the result will be returned as a scalar. Notes ----- The input matrices must all contain elements of the same data type. Examples -------- >>> import pycuda.gpuarray as gpuarray >>> import pycuda.autoinit >>> import numpy as np >>> import linalg >>> import misc >>> linalg.init() >>> a = np.asarray(np.random.rand(4, 2), np.float32) >>> b = np.asarray(np.random.rand(2, 2), np.float32) >>> a_gpu = gpuarray.to_gpu(a) >>> b_gpu = gpuarray.to_gpu(b) >>> c_gpu = linalg.dot(a_gpu, b_gpu) >>> np.allclose(np.dot(a, b), c_gpu.get()) True >>> d = np.asarray(np.random.rand(5), np.float32) >>> e = np.asarray(np.random.rand(5), np.float32) >>> d_gpu = gpuarray.to_gpu(d) >>> e_gpu = gpuarray.to_gpu(e) >>> f = linalg.dot(d_gpu, e_gpu) >>> np.allclose(np.dot(d, e), f) True """ if handle is None: handle = _global_cublas_handle if len(x_gpu.shape) == 1 and len(y_gpu.shape) == 1: if x_gpu.size != y_gpu.size: raise ValueError('arrays must be of same length: ' 'x_gpu.size = %d, y_gpu.size = %d' % (x_gpu.size, y_gpu.size)) # Compute inner product for 1D arrays: if (x_gpu.dtype == np.complex64 and y_gpu.dtype == np.complex64): cublas_func = cublas.cublasCdotu elif (x_gpu.dtype == np.float32 and y_gpu.dtype == np.float32): cublas_func = cublas.cublasSdot elif (x_gpu.dtype == np.complex128 and y_gpu.dtype == np.complex128): cublas_func = cublas.cublasZdotu elif (x_gpu.dtype == np.float64 and y_gpu.dtype == np.float64): cublas_func = cublas.cublasDdot else: raise ValueError('unsupported combination of input types: ' 'x_gpu.dtype = %s, y_gpu.dtype = %s' % (str(x_gpu.dtype), str(y_gpu.dtype))) return cublas_func(handle, x_gpu.size, x_gpu.gpudata, 1, y_gpu.gpudata, 1) else: # Get the shapes of the arguments (accounting for the # possibility that one of them may only have one dimension): x_shape = x_gpu.shape y_shape = y_gpu.shape if len(x_shape) == 1: x_shape = (1, x_shape[0]) if len(y_shape) == 1: y_shape = (1, y_shape[0]) # Perform matrix multiplication for 2D arrays: if (x_gpu.dtype == np.complex64 and y_gpu.dtype == np.complex64): cublas_func = cublas.cublasCgemm alpha = np.complex64(1.0) beta = np.complex64(0.0) elif (x_gpu.dtype == np.float32 and y_gpu.dtype == np.float32): cublas_func = cublas.cublasSgemm alpha = np.float32(1.0) beta = np.float32(0.0) elif (x_gpu.dtype == np.complex128 and y_gpu.dtype == np.complex128): cublas_func = cublas.cublasZgemm alpha = np.complex128(1.0) beta = np.complex128(0.0) elif (x_gpu.dtype == np.float64 and y_gpu.dtype == np.float64): cublas_func = cublas.cublasDgemm alpha = np.float64(1.0) beta = np.float64(0.0) else: raise ValueError('unsupported combination of input types: ' 'x_gpu.dtype = %s, y_gpu.dtype = %s' % (str(x_gpu.dtype), str(y_gpu.dtype))) transa = lower(transa) transb = lower(transb) if transb in ['t', 'c']: m, k = y_shape elif transb in ['n']: k, m = y_shape else: raise ValueError('invalid value "%s" for transb' % transb) if transa in ['t', 'c']: l, n = x_shape elif transa in ['n']: n, l = x_shape else: raise ValueError('invalid value "%s" for transa' % transa) if l != k: raise ValueError('objects are not aligned: x_shape = %s, y_shape = %s' % (x_shape, y_shape)) if transb == 'n': lda = max(1, m) else: lda = max(1, k) if transa == 'n': ldb = max(1, k) else: ldb = max(1, n) ldc = max(1, m) # Note that the desired shape of the output matrix is the transpose # of what CUBLAS assumes: if target is None: target = gpuarray.empty((n, ldc), x_gpu.dtype, allocator=memory_pool.allocate) cublas_func(handle, transb, transa, m, n, k, alpha, y_gpu.gpudata, lda, x_gpu.gpudata, ldb, beta, target.gpudata, ldc) return target
python
def dot(x_gpu, y_gpu, transa='N', transb='N', handle=None, target=None): """ Dot product of two arrays. For 1D arrays, this function computes the inner product. For 2D arrays of shapes `(m, k)` and `(k, n)`, it computes the matrix product; the result has shape `(m, n)`. Parameters ---------- x_gpu : pycuda.gpuarray.GPUArray Input array. y_gpu : pycuda.gpuarray.GPUArray Input array. transa : char If 'T', compute the product of the transpose of `x_gpu`. If 'C', compute the product of the Hermitian of `x_gpu`. transb : char If 'T', compute the product of the transpose of `y_gpu`. If 'C', compute the product of the Hermitian of `y_gpu`. handle : int CUBLAS context. If no context is specified, the default handle from `scikits.cuda.misc._global_cublas_handle` is used. Returns ------- c_gpu : pycuda.gpuarray.GPUArray, float{32,64}, or complex{64,128} Inner product of `x_gpu` and `y_gpu`. When the inputs are 1D arrays, the result will be returned as a scalar. Notes ----- The input matrices must all contain elements of the same data type. Examples -------- >>> import pycuda.gpuarray as gpuarray >>> import pycuda.autoinit >>> import numpy as np >>> import linalg >>> import misc >>> linalg.init() >>> a = np.asarray(np.random.rand(4, 2), np.float32) >>> b = np.asarray(np.random.rand(2, 2), np.float32) >>> a_gpu = gpuarray.to_gpu(a) >>> b_gpu = gpuarray.to_gpu(b) >>> c_gpu = linalg.dot(a_gpu, b_gpu) >>> np.allclose(np.dot(a, b), c_gpu.get()) True >>> d = np.asarray(np.random.rand(5), np.float32) >>> e = np.asarray(np.random.rand(5), np.float32) >>> d_gpu = gpuarray.to_gpu(d) >>> e_gpu = gpuarray.to_gpu(e) >>> f = linalg.dot(d_gpu, e_gpu) >>> np.allclose(np.dot(d, e), f) True """ if handle is None: handle = _global_cublas_handle if len(x_gpu.shape) == 1 and len(y_gpu.shape) == 1: if x_gpu.size != y_gpu.size: raise ValueError('arrays must be of same length: ' 'x_gpu.size = %d, y_gpu.size = %d' % (x_gpu.size, y_gpu.size)) # Compute inner product for 1D arrays: if (x_gpu.dtype == np.complex64 and y_gpu.dtype == np.complex64): cublas_func = cublas.cublasCdotu elif (x_gpu.dtype == np.float32 and y_gpu.dtype == np.float32): cublas_func = cublas.cublasSdot elif (x_gpu.dtype == np.complex128 and y_gpu.dtype == np.complex128): cublas_func = cublas.cublasZdotu elif (x_gpu.dtype == np.float64 and y_gpu.dtype == np.float64): cublas_func = cublas.cublasDdot else: raise ValueError('unsupported combination of input types: ' 'x_gpu.dtype = %s, y_gpu.dtype = %s' % (str(x_gpu.dtype), str(y_gpu.dtype))) return cublas_func(handle, x_gpu.size, x_gpu.gpudata, 1, y_gpu.gpudata, 1) else: # Get the shapes of the arguments (accounting for the # possibility that one of them may only have one dimension): x_shape = x_gpu.shape y_shape = y_gpu.shape if len(x_shape) == 1: x_shape = (1, x_shape[0]) if len(y_shape) == 1: y_shape = (1, y_shape[0]) # Perform matrix multiplication for 2D arrays: if (x_gpu.dtype == np.complex64 and y_gpu.dtype == np.complex64): cublas_func = cublas.cublasCgemm alpha = np.complex64(1.0) beta = np.complex64(0.0) elif (x_gpu.dtype == np.float32 and y_gpu.dtype == np.float32): cublas_func = cublas.cublasSgemm alpha = np.float32(1.0) beta = np.float32(0.0) elif (x_gpu.dtype == np.complex128 and y_gpu.dtype == np.complex128): cublas_func = cublas.cublasZgemm alpha = np.complex128(1.0) beta = np.complex128(0.0) elif (x_gpu.dtype == np.float64 and y_gpu.dtype == np.float64): cublas_func = cublas.cublasDgemm alpha = np.float64(1.0) beta = np.float64(0.0) else: raise ValueError('unsupported combination of input types: ' 'x_gpu.dtype = %s, y_gpu.dtype = %s' % (str(x_gpu.dtype), str(y_gpu.dtype))) transa = lower(transa) transb = lower(transb) if transb in ['t', 'c']: m, k = y_shape elif transb in ['n']: k, m = y_shape else: raise ValueError('invalid value "%s" for transb' % transb) if transa in ['t', 'c']: l, n = x_shape elif transa in ['n']: n, l = x_shape else: raise ValueError('invalid value "%s" for transa' % transa) if l != k: raise ValueError('objects are not aligned: x_shape = %s, y_shape = %s' % (x_shape, y_shape)) if transb == 'n': lda = max(1, m) else: lda = max(1, k) if transa == 'n': ldb = max(1, k) else: ldb = max(1, n) ldc = max(1, m) # Note that the desired shape of the output matrix is the transpose # of what CUBLAS assumes: if target is None: target = gpuarray.empty((n, ldc), x_gpu.dtype, allocator=memory_pool.allocate) cublas_func(handle, transb, transa, m, n, k, alpha, y_gpu.gpudata, lda, x_gpu.gpudata, ldb, beta, target.gpudata, ldc) return target
[ "def", "dot", "(", "x_gpu", ",", "y_gpu", ",", "transa", "=", "'N'", ",", "transb", "=", "'N'", ",", "handle", "=", "None", ",", "target", "=", "None", ")", ":", "if", "handle", "is", "None", ":", "handle", "=", "_global_cublas_handle", "if", "len", "(", "x_gpu", ".", "shape", ")", "==", "1", "and", "len", "(", "y_gpu", ".", "shape", ")", "==", "1", ":", "if", "x_gpu", ".", "size", "!=", "y_gpu", ".", "size", ":", "raise", "ValueError", "(", "'arrays must be of same length: '", "'x_gpu.size = %d, y_gpu.size = %d'", "%", "(", "x_gpu", ".", "size", ",", "y_gpu", ".", "size", ")", ")", "# Compute inner product for 1D arrays:", "if", "(", "x_gpu", ".", "dtype", "==", "np", ".", "complex64", "and", "y_gpu", ".", "dtype", "==", "np", ".", "complex64", ")", ":", "cublas_func", "=", "cublas", ".", "cublasCdotu", "elif", "(", "x_gpu", ".", "dtype", "==", "np", ".", "float32", "and", "y_gpu", ".", "dtype", "==", "np", ".", "float32", ")", ":", "cublas_func", "=", "cublas", ".", "cublasSdot", "elif", "(", "x_gpu", ".", "dtype", "==", "np", ".", "complex128", "and", "y_gpu", ".", "dtype", "==", "np", ".", "complex128", ")", ":", "cublas_func", "=", "cublas", ".", "cublasZdotu", "elif", "(", "x_gpu", ".", "dtype", "==", "np", ".", "float64", "and", "y_gpu", ".", "dtype", "==", "np", ".", "float64", ")", ":", "cublas_func", "=", "cublas", ".", "cublasDdot", "else", ":", "raise", "ValueError", "(", "'unsupported combination of input types: '", "'x_gpu.dtype = %s, y_gpu.dtype = %s'", "%", "(", "str", "(", "x_gpu", ".", "dtype", ")", ",", "str", "(", "y_gpu", ".", "dtype", ")", ")", ")", "return", "cublas_func", "(", "handle", ",", "x_gpu", ".", "size", ",", "x_gpu", ".", "gpudata", ",", "1", ",", "y_gpu", ".", "gpudata", ",", "1", ")", "else", ":", "# Get the shapes of the arguments (accounting for the", "# possibility that one of them may only have one dimension):", "x_shape", "=", "x_gpu", ".", "shape", "y_shape", "=", "y_gpu", ".", "shape", "if", "len", "(", "x_shape", ")", "==", "1", ":", "x_shape", "=", "(", "1", ",", "x_shape", "[", "0", "]", ")", "if", "len", "(", "y_shape", ")", "==", "1", ":", "y_shape", "=", "(", "1", ",", "y_shape", "[", "0", "]", ")", "# Perform matrix multiplication for 2D arrays:", "if", "(", "x_gpu", ".", "dtype", "==", "np", ".", "complex64", "and", "y_gpu", ".", "dtype", "==", "np", ".", "complex64", ")", ":", "cublas_func", "=", "cublas", ".", "cublasCgemm", "alpha", "=", "np", ".", "complex64", "(", "1.0", ")", "beta", "=", "np", ".", "complex64", "(", "0.0", ")", "elif", "(", "x_gpu", ".", "dtype", "==", "np", ".", "float32", "and", "y_gpu", ".", "dtype", "==", "np", ".", "float32", ")", ":", "cublas_func", "=", "cublas", ".", "cublasSgemm", "alpha", "=", "np", ".", "float32", "(", "1.0", ")", "beta", "=", "np", ".", "float32", "(", "0.0", ")", "elif", "(", "x_gpu", ".", "dtype", "==", "np", ".", "complex128", "and", "y_gpu", ".", "dtype", "==", "np", ".", "complex128", ")", ":", "cublas_func", "=", "cublas", ".", "cublasZgemm", "alpha", "=", "np", ".", "complex128", "(", "1.0", ")", "beta", "=", "np", ".", "complex128", "(", "0.0", ")", "elif", "(", "x_gpu", ".", "dtype", "==", "np", ".", "float64", "and", "y_gpu", ".", "dtype", "==", "np", ".", "float64", ")", ":", "cublas_func", "=", "cublas", ".", "cublasDgemm", "alpha", "=", "np", ".", "float64", "(", "1.0", ")", "beta", "=", "np", ".", "float64", "(", "0.0", ")", "else", ":", "raise", "ValueError", "(", "'unsupported combination of input types: '", "'x_gpu.dtype = %s, y_gpu.dtype = %s'", "%", "(", "str", "(", "x_gpu", ".", "dtype", ")", ",", "str", "(", "y_gpu", ".", "dtype", ")", ")", ")", "transa", "=", "lower", "(", "transa", ")", "transb", "=", "lower", "(", "transb", ")", "if", "transb", "in", "[", "'t'", ",", "'c'", "]", ":", "m", ",", "k", "=", "y_shape", "elif", "transb", "in", "[", "'n'", "]", ":", "k", ",", "m", "=", "y_shape", "else", ":", "raise", "ValueError", "(", "'invalid value \"%s\" for transb'", "%", "transb", ")", "if", "transa", "in", "[", "'t'", ",", "'c'", "]", ":", "l", ",", "n", "=", "x_shape", "elif", "transa", "in", "[", "'n'", "]", ":", "n", ",", "l", "=", "x_shape", "else", ":", "raise", "ValueError", "(", "'invalid value \"%s\" for transa'", "%", "transa", ")", "if", "l", "!=", "k", ":", "raise", "ValueError", "(", "'objects are not aligned: x_shape = %s, y_shape = %s'", "%", "(", "x_shape", ",", "y_shape", ")", ")", "if", "transb", "==", "'n'", ":", "lda", "=", "max", "(", "1", ",", "m", ")", "else", ":", "lda", "=", "max", "(", "1", ",", "k", ")", "if", "transa", "==", "'n'", ":", "ldb", "=", "max", "(", "1", ",", "k", ")", "else", ":", "ldb", "=", "max", "(", "1", ",", "n", ")", "ldc", "=", "max", "(", "1", ",", "m", ")", "# Note that the desired shape of the output matrix is the transpose", "# of what CUBLAS assumes:", "if", "target", "is", "None", ":", "target", "=", "gpuarray", ".", "empty", "(", "(", "n", ",", "ldc", ")", ",", "x_gpu", ".", "dtype", ",", "allocator", "=", "memory_pool", ".", "allocate", ")", "cublas_func", "(", "handle", ",", "transb", ",", "transa", ",", "m", ",", "n", ",", "k", ",", "alpha", ",", "y_gpu", ".", "gpudata", ",", "lda", ",", "x_gpu", ".", "gpudata", ",", "ldb", ",", "beta", ",", "target", ".", "gpudata", ",", "ldc", ")", "return", "target" ]
Dot product of two arrays. For 1D arrays, this function computes the inner product. For 2D arrays of shapes `(m, k)` and `(k, n)`, it computes the matrix product; the result has shape `(m, n)`. Parameters ---------- x_gpu : pycuda.gpuarray.GPUArray Input array. y_gpu : pycuda.gpuarray.GPUArray Input array. transa : char If 'T', compute the product of the transpose of `x_gpu`. If 'C', compute the product of the Hermitian of `x_gpu`. transb : char If 'T', compute the product of the transpose of `y_gpu`. If 'C', compute the product of the Hermitian of `y_gpu`. handle : int CUBLAS context. If no context is specified, the default handle from `scikits.cuda.misc._global_cublas_handle` is used. Returns ------- c_gpu : pycuda.gpuarray.GPUArray, float{32,64}, or complex{64,128} Inner product of `x_gpu` and `y_gpu`. When the inputs are 1D arrays, the result will be returned as a scalar. Notes ----- The input matrices must all contain elements of the same data type. Examples -------- >>> import pycuda.gpuarray as gpuarray >>> import pycuda.autoinit >>> import numpy as np >>> import linalg >>> import misc >>> linalg.init() >>> a = np.asarray(np.random.rand(4, 2), np.float32) >>> b = np.asarray(np.random.rand(2, 2), np.float32) >>> a_gpu = gpuarray.to_gpu(a) >>> b_gpu = gpuarray.to_gpu(b) >>> c_gpu = linalg.dot(a_gpu, b_gpu) >>> np.allclose(np.dot(a, b), c_gpu.get()) True >>> d = np.asarray(np.random.rand(5), np.float32) >>> e = np.asarray(np.random.rand(5), np.float32) >>> d_gpu = gpuarray.to_gpu(d) >>> e_gpu = gpuarray.to_gpu(e) >>> f = linalg.dot(d_gpu, e_gpu) >>> np.allclose(np.dot(d, e), f) True
[ "Dot", "product", "of", "two", "arrays", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/linalg.py#L39-L199
smartfile/django-transfer
django_transfer/views.py
make_tempfile
def make_tempfile(data=None): "Create a temp file, write our PID into it." with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp: temp.write(six.text_type(data if data is not None else os.getpid())) return temp.name
python
def make_tempfile(data=None): "Create a temp file, write our PID into it." with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp: temp.write(six.text_type(data if data is not None else os.getpid())) return temp.name
[ "def", "make_tempfile", "(", "data", "=", "None", ")", ":", "with", "tempfile", ".", "NamedTemporaryFile", "(", "mode", "=", "'w'", ",", "delete", "=", "False", ")", "as", "temp", ":", "temp", ".", "write", "(", "six", ".", "text_type", "(", "data", "if", "data", "is", "not", "None", "else", "os", ".", "getpid", "(", ")", ")", ")", "return", "temp", ".", "name" ]
Create a temp file, write our PID into it.
[ "Create", "a", "temp", "file", "write", "our", "PID", "into", "it", "." ]
train
https://github.com/smartfile/django-transfer/blob/65ef60e011c1b98d7f5a195debd81b3efde897dd/django_transfer/views.py#L13-L17
hannes-brt/hebel
hebel/layers/multitask_top_layer.py
MultitaskTopLayer.parameters
def parameters(self): """Return a list where each element contains the parameters for a task. """ parameters = [] for task in self.tasks: parameters.extend(task.parameters) return parameters
python
def parameters(self): """Return a list where each element contains the parameters for a task. """ parameters = [] for task in self.tasks: parameters.extend(task.parameters) return parameters
[ "def", "parameters", "(", "self", ")", ":", "parameters", "=", "[", "]", "for", "task", "in", "self", ".", "tasks", ":", "parameters", ".", "extend", "(", "task", ".", "parameters", ")", "return", "parameters" ]
Return a list where each element contains the parameters for a task.
[ "Return", "a", "list", "where", "each", "element", "contains", "the", "parameters", "for", "a", "task", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/layers/multitask_top_layer.py#L178-L184
hannes-brt/hebel
hebel/layers/multitask_top_layer.py
MultitaskTopLayer.parameters
def parameters(self, value): """Update the parameters. ``value`` must be a list/tuple of length ``MultitaskTopLayer.n_tasks``, each element of which must have the correct number of parameters for the task. """ assert len(value) == self.n_parameters i = 0 for task in self.tasks: task.parameters = value[i:i + task.n_parameters] i += task.n_parameters
python
def parameters(self, value): """Update the parameters. ``value`` must be a list/tuple of length ``MultitaskTopLayer.n_tasks``, each element of which must have the correct number of parameters for the task. """ assert len(value) == self.n_parameters i = 0 for task in self.tasks: task.parameters = value[i:i + task.n_parameters] i += task.n_parameters
[ "def", "parameters", "(", "self", ",", "value", ")", ":", "assert", "len", "(", "value", ")", "==", "self", ".", "n_parameters", "i", "=", "0", "for", "task", "in", "self", ".", "tasks", ":", "task", ".", "parameters", "=", "value", "[", "i", ":", "i", "+", "task", ".", "n_parameters", "]", "i", "+=", "task", ".", "n_parameters" ]
Update the parameters. ``value`` must be a list/tuple of length ``MultitaskTopLayer.n_tasks``, each element of which must have the correct number of parameters for the task.
[ "Update", "the", "parameters", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/layers/multitask_top_layer.py#L187-L199
hannes-brt/hebel
hebel/layers/multitask_top_layer.py
MultitaskTopLayer.feed_forward
def feed_forward(self, input_data, prediction=False): """Call ``feed_forward`` for each task and combine the activations. Passes ``input_data`` to all tasks and returns the activations as a list. **Parameters:** input_data : ``GPUArray`` Inpute data to compute activations for. prediction : bool, optional Whether to use prediction model. Only relevant when using dropout. If true, then weights are multiplied by 1 - dropout if the layer uses dropout. **Returns:** activations : list of ``GPUArray`` The activations of the output units, one element for each task. """ activations = [] for task in self.tasks: activations_task = task.feed_forward(input_data, prediction) activations.append(activations_task) return activations
python
def feed_forward(self, input_data, prediction=False): """Call ``feed_forward`` for each task and combine the activations. Passes ``input_data`` to all tasks and returns the activations as a list. **Parameters:** input_data : ``GPUArray`` Inpute data to compute activations for. prediction : bool, optional Whether to use prediction model. Only relevant when using dropout. If true, then weights are multiplied by 1 - dropout if the layer uses dropout. **Returns:** activations : list of ``GPUArray`` The activations of the output units, one element for each task. """ activations = [] for task in self.tasks: activations_task = task.feed_forward(input_data, prediction) activations.append(activations_task) return activations
[ "def", "feed_forward", "(", "self", ",", "input_data", ",", "prediction", "=", "False", ")", ":", "activations", "=", "[", "]", "for", "task", "in", "self", ".", "tasks", ":", "activations_task", "=", "task", ".", "feed_forward", "(", "input_data", ",", "prediction", ")", "activations", ".", "append", "(", "activations_task", ")", "return", "activations" ]
Call ``feed_forward`` for each task and combine the activations. Passes ``input_data`` to all tasks and returns the activations as a list. **Parameters:** input_data : ``GPUArray`` Inpute data to compute activations for. prediction : bool, optional Whether to use prediction model. Only relevant when using dropout. If true, then weights are multiplied by 1 - dropout if the layer uses dropout. **Returns:** activations : list of ``GPUArray`` The activations of the output units, one element for each task.
[ "Call", "feed_forward", "for", "each", "task", "and", "combine", "the", "activations", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/layers/multitask_top_layer.py#L223-L251
hannes-brt/hebel
hebel/layers/multitask_top_layer.py
MultitaskTopLayer.backprop
def backprop(self, input_data, targets, cache=None): """Compute gradients for each task and combine the results. **Parameters:** input_data : ``GPUArray`` Inpute data to compute activations for. targets : ``GPUArray`` The target values of the units. cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : list Gradients with respect to the weights and biases for each task df_input : ``GPUArray`` Gradients with respect to the input, obtained by adding the gradients with respect to the inputs from each task, weighted by ``MultitaskTopLayer.task_weights``. """ df_input = gpuarray.zeros_like(input_data) if cache is None: cache = self.n_tasks * [None] gradients = [] for targets_task, cache_task, task, task_weight in \ izip(targets, cache, self.tasks, self.task_weights): gradients_task, df_input_task = \ task.backprop(input_data, targets_task, cache_task) df_input = df_input.mul_add(1., df_input_task, task_weight) gradients.extend(gradients_task) return gradients, df_input
python
def backprop(self, input_data, targets, cache=None): """Compute gradients for each task and combine the results. **Parameters:** input_data : ``GPUArray`` Inpute data to compute activations for. targets : ``GPUArray`` The target values of the units. cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : list Gradients with respect to the weights and biases for each task df_input : ``GPUArray`` Gradients with respect to the input, obtained by adding the gradients with respect to the inputs from each task, weighted by ``MultitaskTopLayer.task_weights``. """ df_input = gpuarray.zeros_like(input_data) if cache is None: cache = self.n_tasks * [None] gradients = [] for targets_task, cache_task, task, task_weight in \ izip(targets, cache, self.tasks, self.task_weights): gradients_task, df_input_task = \ task.backprop(input_data, targets_task, cache_task) df_input = df_input.mul_add(1., df_input_task, task_weight) gradients.extend(gradients_task) return gradients, df_input
[ "def", "backprop", "(", "self", ",", "input_data", ",", "targets", ",", "cache", "=", "None", ")", ":", "df_input", "=", "gpuarray", ".", "zeros_like", "(", "input_data", ")", "if", "cache", "is", "None", ":", "cache", "=", "self", ".", "n_tasks", "*", "[", "None", "]", "gradients", "=", "[", "]", "for", "targets_task", ",", "cache_task", ",", "task", ",", "task_weight", "in", "izip", "(", "targets", ",", "cache", ",", "self", ".", "tasks", ",", "self", ".", "task_weights", ")", ":", "gradients_task", ",", "df_input_task", "=", "task", ".", "backprop", "(", "input_data", ",", "targets_task", ",", "cache_task", ")", "df_input", "=", "df_input", ".", "mul_add", "(", "1.", ",", "df_input_task", ",", "task_weight", ")", "gradients", ".", "extend", "(", "gradients_task", ")", "return", "gradients", ",", "df_input" ]
Compute gradients for each task and combine the results. **Parameters:** input_data : ``GPUArray`` Inpute data to compute activations for. targets : ``GPUArray`` The target values of the units. cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : list Gradients with respect to the weights and biases for each task df_input : ``GPUArray`` Gradients with respect to the input, obtained by adding the gradients with respect to the inputs from each task, weighted by ``MultitaskTopLayer.task_weights``.
[ "Compute", "gradients", "for", "each", "task", "and", "combine", "the", "results", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/layers/multitask_top_layer.py#L253-L294
hannes-brt/hebel
hebel/layers/multitask_top_layer.py
MultitaskTopLayer.cross_entropy_error
def cross_entropy_error(self, input_data, targets, average=True, cache=None, prediction=False, sum_errors=True): """ Computes the cross-entropy error for all tasks. """ loss = [] if cache is None: cache = self.n_tasks * [None] for targets_task, cache_task, task in \ izip(targets, cache, self.tasks): loss.append(task.cross_entropy_error( input_data, targets_task, average=average, cache=cache_task, prediction=prediction)) if sum_errors: return sum(loss) else: return loss
python
def cross_entropy_error(self, input_data, targets, average=True, cache=None, prediction=False, sum_errors=True): """ Computes the cross-entropy error for all tasks. """ loss = [] if cache is None: cache = self.n_tasks * [None] for targets_task, cache_task, task in \ izip(targets, cache, self.tasks): loss.append(task.cross_entropy_error( input_data, targets_task, average=average, cache=cache_task, prediction=prediction)) if sum_errors: return sum(loss) else: return loss
[ "def", "cross_entropy_error", "(", "self", ",", "input_data", ",", "targets", ",", "average", "=", "True", ",", "cache", "=", "None", ",", "prediction", "=", "False", ",", "sum_errors", "=", "True", ")", ":", "loss", "=", "[", "]", "if", "cache", "is", "None", ":", "cache", "=", "self", ".", "n_tasks", "*", "[", "None", "]", "for", "targets_task", ",", "cache_task", ",", "task", "in", "izip", "(", "targets", ",", "cache", ",", "self", ".", "tasks", ")", ":", "loss", ".", "append", "(", "task", ".", "cross_entropy_error", "(", "input_data", ",", "targets_task", ",", "average", "=", "average", ",", "cache", "=", "cache_task", ",", "prediction", "=", "prediction", ")", ")", "if", "sum_errors", ":", "return", "sum", "(", "loss", ")", "else", ":", "return", "loss" ]
Computes the cross-entropy error for all tasks.
[ "Computes", "the", "cross", "-", "entropy", "error", "for", "all", "tasks", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/layers/multitask_top_layer.py#L348-L368
hannes-brt/hebel
hebel/layers/hidden_layer.py
HiddenLayer.parameters
def parameters(self, value): """Update the parameters. ``value`` must have the shape ``(weights, biases)``""" self.W = value[0] if isinstance(value[0], GPUArray) else \ gpuarray.to_gpu(value[0]) self.b = value[1] if isinstance(value[0], GPUArray) else \ gpuarray.to_gpu(value[1])
python
def parameters(self, value): """Update the parameters. ``value`` must have the shape ``(weights, biases)``""" self.W = value[0] if isinstance(value[0], GPUArray) else \ gpuarray.to_gpu(value[0]) self.b = value[1] if isinstance(value[0], GPUArray) else \ gpuarray.to_gpu(value[1])
[ "def", "parameters", "(", "self", ",", "value", ")", ":", "self", ".", "W", "=", "value", "[", "0", "]", "if", "isinstance", "(", "value", "[", "0", "]", ",", "GPUArray", ")", "else", "gpuarray", ".", "to_gpu", "(", "value", "[", "0", "]", ")", "self", ".", "b", "=", "value", "[", "1", "]", "if", "isinstance", "(", "value", "[", "0", "]", ",", "GPUArray", ")", "else", "gpuarray", ".", "to_gpu", "(", "value", "[", "1", "]", ")" ]
Update the parameters. ``value`` must have the shape ``(weights, biases)``
[ "Update", "the", "parameters", ".", "value", "must", "have", "the", "shape", "(", "weights", "biases", ")" ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/layers/hidden_layer.py#L161-L167
hannes-brt/hebel
hebel/layers/hidden_layer.py
HiddenLayer.architecture
def architecture(self): """Returns a dictionary describing the architecture of the layer.""" arch = {'class': self.__class__, 'n_in': self.n_in, 'n_units': self.n_units, 'activation_function': self.activation_function if hasattr(self, 'activation_function') else None} return arch
python
def architecture(self): """Returns a dictionary describing the architecture of the layer.""" arch = {'class': self.__class__, 'n_in': self.n_in, 'n_units': self.n_units, 'activation_function': self.activation_function if hasattr(self, 'activation_function') else None} return arch
[ "def", "architecture", "(", "self", ")", ":", "arch", "=", "{", "'class'", ":", "self", ".", "__class__", ",", "'n_in'", ":", "self", ".", "n_in", ",", "'n_units'", ":", "self", ".", "n_units", ",", "'activation_function'", ":", "self", ".", "activation_function", "if", "hasattr", "(", "self", ",", "'activation_function'", ")", "else", "None", "}", "return", "arch" ]
Returns a dictionary describing the architecture of the layer.
[ "Returns", "a", "dictionary", "describing", "the", "architecture", "of", "the", "layer", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/layers/hidden_layer.py#L178-L185
hannes-brt/hebel
hebel/layers/hidden_layer.py
HiddenLayer.feed_forward
def feed_forward(self, input_data, prediction=False): """Propagate forward through the layer **Parameters:** input_data : ``GPUArray`` Input data to compute activations for. prediction : bool, optional Whether to use prediction model. Only relevant when using dropout. If true, then weights are multiplied by 1 - dropout if the layer uses dropout. **Returns:** activations : ``GPUArray`` The activations of the hidden units. """ if input_data.shape[1] != self.W.shape[0]: raise ValueError('Number of outputs from previous layer (%d) ' 'does not match number of inputs to this layer (%d)' % (input_data.shape[1], self.W.shape[0])) activations = linalg.dot(input_data, self.W) activations = add_vec_to_mat(activations, self.b, inplace=True) self.f(activations) if self.dropout > 0: if prediction: activations *= 1 - self.dropout else: dropout_mask = sample_dropout_mask(activations, self.dropout) return activations, dropout_mask return (activations,)
python
def feed_forward(self, input_data, prediction=False): """Propagate forward through the layer **Parameters:** input_data : ``GPUArray`` Input data to compute activations for. prediction : bool, optional Whether to use prediction model. Only relevant when using dropout. If true, then weights are multiplied by 1 - dropout if the layer uses dropout. **Returns:** activations : ``GPUArray`` The activations of the hidden units. """ if input_data.shape[1] != self.W.shape[0]: raise ValueError('Number of outputs from previous layer (%d) ' 'does not match number of inputs to this layer (%d)' % (input_data.shape[1], self.W.shape[0])) activations = linalg.dot(input_data, self.W) activations = add_vec_to_mat(activations, self.b, inplace=True) self.f(activations) if self.dropout > 0: if prediction: activations *= 1 - self.dropout else: dropout_mask = sample_dropout_mask(activations, self.dropout) return activations, dropout_mask return (activations,)
[ "def", "feed_forward", "(", "self", ",", "input_data", ",", "prediction", "=", "False", ")", ":", "if", "input_data", ".", "shape", "[", "1", "]", "!=", "self", ".", "W", ".", "shape", "[", "0", "]", ":", "raise", "ValueError", "(", "'Number of outputs from previous layer (%d) '", "'does not match number of inputs to this layer (%d)'", "%", "(", "input_data", ".", "shape", "[", "1", "]", ",", "self", ".", "W", ".", "shape", "[", "0", "]", ")", ")", "activations", "=", "linalg", ".", "dot", "(", "input_data", ",", "self", ".", "W", ")", "activations", "=", "add_vec_to_mat", "(", "activations", ",", "self", ".", "b", ",", "inplace", "=", "True", ")", "self", ".", "f", "(", "activations", ")", "if", "self", ".", "dropout", ">", "0", ":", "if", "prediction", ":", "activations", "*=", "1", "-", "self", ".", "dropout", "else", ":", "dropout_mask", "=", "sample_dropout_mask", "(", "activations", ",", "self", ".", "dropout", ")", "return", "activations", ",", "dropout_mask", "return", "(", "activations", ",", ")" ]
Propagate forward through the layer **Parameters:** input_data : ``GPUArray`` Input data to compute activations for. prediction : bool, optional Whether to use prediction model. Only relevant when using dropout. If true, then weights are multiplied by 1 - dropout if the layer uses dropout. **Returns:** activations : ``GPUArray`` The activations of the hidden units.
[ "Propagate", "forward", "through", "the", "layer" ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/layers/hidden_layer.py#L226-L262
hannes-brt/hebel
hebel/layers/hidden_layer.py
HiddenLayer.backprop
def backprop(self, input_data, df_output, cache=None): """ Backpropagate through the hidden layer **Parameters:** input_data : ``GPUArray`` Input data to compute activations for. df_output : ``GPUArray`` Gradients with respect to the activations of this layer (received from the layer above). cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : tuple of ``GPUArray`` Gradients with respect to the weights and biases in the form ``(df_weights, df_biases)``. df_input : ``GPUArray`` Gradients with respect to the input. """ # Get cache if it wasn't provided if cache is None: cache = self.feed_forward(input_data, prediction=False) if len(cache) == 2: activations, dropout_mask = cache else: activations = cache[0] # Multiply the binary mask with the incoming gradients if self.dropout > 0 and dropout_mask is not None: apply_dropout_mask(df_output, dropout_mask) # Get gradient wrt activation function df_activations = self.df(activations) delta = mult_matrix(df_activations, df_output) # Gradient wrt weights df_W = linalg.dot(input_data, delta, transa='T') # Gradient wrt bias df_b = matrix_sum_out_axis(delta, 0) # Gradient wrt inputs df_input = linalg.dot(delta, self.W, transb='T') # L1 weight decay if self.l1_penalty_weight: df_W += self.l1_penalty_weight * sign(self.W) # L2 weight decay if self.l2_penalty_weight: df_W += self.l2_penalty_weight * self.W return (df_W, df_b), df_input
python
def backprop(self, input_data, df_output, cache=None): """ Backpropagate through the hidden layer **Parameters:** input_data : ``GPUArray`` Input data to compute activations for. df_output : ``GPUArray`` Gradients with respect to the activations of this layer (received from the layer above). cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : tuple of ``GPUArray`` Gradients with respect to the weights and biases in the form ``(df_weights, df_biases)``. df_input : ``GPUArray`` Gradients with respect to the input. """ # Get cache if it wasn't provided if cache is None: cache = self.feed_forward(input_data, prediction=False) if len(cache) == 2: activations, dropout_mask = cache else: activations = cache[0] # Multiply the binary mask with the incoming gradients if self.dropout > 0 and dropout_mask is not None: apply_dropout_mask(df_output, dropout_mask) # Get gradient wrt activation function df_activations = self.df(activations) delta = mult_matrix(df_activations, df_output) # Gradient wrt weights df_W = linalg.dot(input_data, delta, transa='T') # Gradient wrt bias df_b = matrix_sum_out_axis(delta, 0) # Gradient wrt inputs df_input = linalg.dot(delta, self.W, transb='T') # L1 weight decay if self.l1_penalty_weight: df_W += self.l1_penalty_weight * sign(self.W) # L2 weight decay if self.l2_penalty_weight: df_W += self.l2_penalty_weight * self.W return (df_W, df_b), df_input
[ "def", "backprop", "(", "self", ",", "input_data", ",", "df_output", ",", "cache", "=", "None", ")", ":", "# Get cache if it wasn't provided", "if", "cache", "is", "None", ":", "cache", "=", "self", ".", "feed_forward", "(", "input_data", ",", "prediction", "=", "False", ")", "if", "len", "(", "cache", ")", "==", "2", ":", "activations", ",", "dropout_mask", "=", "cache", "else", ":", "activations", "=", "cache", "[", "0", "]", "# Multiply the binary mask with the incoming gradients", "if", "self", ".", "dropout", ">", "0", "and", "dropout_mask", "is", "not", "None", ":", "apply_dropout_mask", "(", "df_output", ",", "dropout_mask", ")", "# Get gradient wrt activation function", "df_activations", "=", "self", ".", "df", "(", "activations", ")", "delta", "=", "mult_matrix", "(", "df_activations", ",", "df_output", ")", "# Gradient wrt weights", "df_W", "=", "linalg", ".", "dot", "(", "input_data", ",", "delta", ",", "transa", "=", "'T'", ")", "# Gradient wrt bias", "df_b", "=", "matrix_sum_out_axis", "(", "delta", ",", "0", ")", "# Gradient wrt inputs", "df_input", "=", "linalg", ".", "dot", "(", "delta", ",", "self", ".", "W", ",", "transb", "=", "'T'", ")", "# L1 weight decay", "if", "self", ".", "l1_penalty_weight", ":", "df_W", "+=", "self", ".", "l1_penalty_weight", "*", "sign", "(", "self", ".", "W", ")", "# L2 weight decay", "if", "self", ".", "l2_penalty_weight", ":", "df_W", "+=", "self", ".", "l2_penalty_weight", "*", "self", ".", "W", "return", "(", "df_W", ",", "df_b", ")", ",", "df_input" ]
Backpropagate through the hidden layer **Parameters:** input_data : ``GPUArray`` Input data to compute activations for. df_output : ``GPUArray`` Gradients with respect to the activations of this layer (received from the layer above). cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : tuple of ``GPUArray`` Gradients with respect to the weights and biases in the form ``(df_weights, df_biases)``. df_input : ``GPUArray`` Gradients with respect to the input.
[ "Backpropagate", "through", "the", "hidden", "layer" ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/layers/hidden_layer.py#L264-L323
hannes-brt/hebel
hebel/pycuda_ops/cublas.py
cublasCreate
def cublasCreate(): """ Initialize CUBLAS. Initializes CUBLAS and creates a handle to a structure holding the CUBLAS library context. Returns ------- handle : void_p CUBLAS context. """ handle = ctypes.c_void_p() status = _libcublas.cublasCreate_v2(ctypes.byref(handle)) cublasCheckStatus(status) return handle.value
python
def cublasCreate(): """ Initialize CUBLAS. Initializes CUBLAS and creates a handle to a structure holding the CUBLAS library context. Returns ------- handle : void_p CUBLAS context. """ handle = ctypes.c_void_p() status = _libcublas.cublasCreate_v2(ctypes.byref(handle)) cublasCheckStatus(status) return handle.value
[ "def", "cublasCreate", "(", ")", ":", "handle", "=", "ctypes", ".", "c_void_p", "(", ")", "status", "=", "_libcublas", ".", "cublasCreate_v2", "(", "ctypes", ".", "byref", "(", "handle", ")", ")", "cublasCheckStatus", "(", "status", ")", "return", "handle", ".", "value" ]
Initialize CUBLAS. Initializes CUBLAS and creates a handle to a structure holding the CUBLAS library context. Returns ------- handle : void_p CUBLAS context.
[ "Initialize", "CUBLAS", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cublas.py#L185-L202
hannes-brt/hebel
hebel/pycuda_ops/cublas.py
cublasDestroy
def cublasDestroy(handle): """ Release CUBLAS resources. Releases hardware resources used by CUBLAS. Parameters ---------- handle : void_p CUBLAS context. """ status = _libcublas.cublasDestroy_v2(ctypes.c_void_p(handle)) cublasCheckStatus(status)
python
def cublasDestroy(handle): """ Release CUBLAS resources. Releases hardware resources used by CUBLAS. Parameters ---------- handle : void_p CUBLAS context. """ status = _libcublas.cublasDestroy_v2(ctypes.c_void_p(handle)) cublasCheckStatus(status)
[ "def", "cublasDestroy", "(", "handle", ")", ":", "status", "=", "_libcublas", ".", "cublasDestroy_v2", "(", "ctypes", ".", "c_void_p", "(", "handle", ")", ")", "cublasCheckStatus", "(", "status", ")" ]
Release CUBLAS resources. Releases hardware resources used by CUBLAS. Parameters ---------- handle : void_p CUBLAS context.
[ "Release", "CUBLAS", "resources", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cublas.py#L206-L220
hannes-brt/hebel
hebel/pycuda_ops/cublas.py
cublasGetVersion
def cublasGetVersion(handle): """ Get CUBLAS version. Returns version number of installed CUBLAS libraries. Parameters ---------- handle : void_p CUBLAS context. Returns ------- version : int CUBLAS version. """ version = ctypes.c_int() status = _libcublas.cublasGetVersion_v2(handle, ctypes.byref(version)) cublasCheckStatus(status) return version.value
python
def cublasGetVersion(handle): """ Get CUBLAS version. Returns version number of installed CUBLAS libraries. Parameters ---------- handle : void_p CUBLAS context. Returns ------- version : int CUBLAS version. """ version = ctypes.c_int() status = _libcublas.cublasGetVersion_v2(handle, ctypes.byref(version)) cublasCheckStatus(status) return version.value
[ "def", "cublasGetVersion", "(", "handle", ")", ":", "version", "=", "ctypes", ".", "c_int", "(", ")", "status", "=", "_libcublas", ".", "cublasGetVersion_v2", "(", "handle", ",", "ctypes", ".", "byref", "(", "version", ")", ")", "cublasCheckStatus", "(", "status", ")", "return", "version", ".", "value" ]
Get CUBLAS version. Returns version number of installed CUBLAS libraries. Parameters ---------- handle : void_p CUBLAS context. Returns ------- version : int CUBLAS version.
[ "Get", "CUBLAS", "version", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cublas.py#L225-L246
hannes-brt/hebel
hebel/pycuda_ops/cublas.py
cublasSetStream
def cublasSetStream(handle, id): """ Set current CUBLAS library stream. Parameters ---------- handle : id CUBLAS context. id : int Stream ID. """ status = _libcublas.cublasSetStream_v2(handle, id) cublasCheckStatus(status)
python
def cublasSetStream(handle, id): """ Set current CUBLAS library stream. Parameters ---------- handle : id CUBLAS context. id : int Stream ID. """ status = _libcublas.cublasSetStream_v2(handle, id) cublasCheckStatus(status)
[ "def", "cublasSetStream", "(", "handle", ",", "id", ")", ":", "status", "=", "_libcublas", ".", "cublasSetStream_v2", "(", "handle", ",", "id", ")", "cublasCheckStatus", "(", "status", ")" ]
Set current CUBLAS library stream. Parameters ---------- handle : id CUBLAS context. id : int Stream ID.
[ "Set", "current", "CUBLAS", "library", "stream", ".", "Parameters", "----------", "handle", ":", "id", "CUBLAS", "context", ".", "id", ":", "int", "Stream", "ID", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cublas.py#L251-L265
hannes-brt/hebel
hebel/pycuda_ops/cublas.py
cublasGetStream
def cublasGetStream(handle): """ Set current CUBLAS library stream. Parameters ---------- handle : void_p CUBLAS context. Returns ------- id : int Stream ID. """ id = ctypes.c_int() status = _libcublas.cublasGetStream_v2(handle, ctypes.byref(id)) cublasCheckStatus(status) return id.value
python
def cublasGetStream(handle): """ Set current CUBLAS library stream. Parameters ---------- handle : void_p CUBLAS context. Returns ------- id : int Stream ID. """ id = ctypes.c_int() status = _libcublas.cublasGetStream_v2(handle, ctypes.byref(id)) cublasCheckStatus(status) return id.value
[ "def", "cublasGetStream", "(", "handle", ")", ":", "id", "=", "ctypes", ".", "c_int", "(", ")", "status", "=", "_libcublas", ".", "cublasGetStream_v2", "(", "handle", ",", "ctypes", ".", "byref", "(", "id", ")", ")", "cublasCheckStatus", "(", "status", ")", "return", "id", ".", "value" ]
Set current CUBLAS library stream. Parameters ---------- handle : void_p CUBLAS context. Returns ------- id : int Stream ID.
[ "Set", "current", "CUBLAS", "library", "stream", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cublas.py#L270-L289
hannes-brt/hebel
hebel/pycuda_ops/cublas.py
cublasSgbmv
def cublasSgbmv(handle, trans, m, n, kl, ku, alpha, A, lda, x, incx, beta, y, incy): """ Matrix-vector product for real general banded matrix. """ status = _libcublas.cublasSgbmv_v2(handle, trans, m, n, kl, ku, ctypes.byref(ctypes.c_float(alpha)), int(A), lda, int(x), incx, ctypes.byref(ctypes.c_float(beta)), int(y), incy) cublasCheckStatus(status)
python
def cublasSgbmv(handle, trans, m, n, kl, ku, alpha, A, lda, x, incx, beta, y, incy): """ Matrix-vector product for real general banded matrix. """ status = _libcublas.cublasSgbmv_v2(handle, trans, m, n, kl, ku, ctypes.byref(ctypes.c_float(alpha)), int(A), lda, int(x), incx, ctypes.byref(ctypes.c_float(beta)), int(y), incy) cublasCheckStatus(status)
[ "def", "cublasSgbmv", "(", "handle", ",", "trans", ",", "m", ",", "n", ",", "kl", ",", "ku", ",", "alpha", ",", "A", ",", "lda", ",", "x", ",", "incx", ",", "beta", ",", "y", ",", "incy", ")", ":", "status", "=", "_libcublas", ".", "cublasSgbmv_v2", "(", "handle", ",", "trans", ",", "m", ",", "n", ",", "kl", ",", "ku", ",", "ctypes", ".", "byref", "(", "ctypes", ".", "c_float", "(", "alpha", ")", ")", ",", "int", "(", "A", ")", ",", "lda", ",", "int", "(", "x", ")", ",", "incx", ",", "ctypes", ".", "byref", "(", "ctypes", ".", "c_float", "(", "beta", ")", ")", ",", "int", "(", "y", ")", ",", "incy", ")", "cublasCheckStatus", "(", "status", ")" ]
Matrix-vector product for real general banded matrix.
[ "Matrix", "-", "vector", "product", "for", "real", "general", "banded", "matrix", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cublas.py#L2071-L2085
hannes-brt/hebel
hebel/pycuda_ops/cublas.py
cublasCgbmv
def cublasCgbmv(handle, trans, m, n, kl, ku, alpha, A, lda, x, incx, beta, y, incy): """ Matrix-vector product for complex general banded matrix. """ status = _libcublas.cublasCgbmv_v2(handle, trans, m, n, kl, ku, ctypes.byref(cuda.cuFloatComplex(alpha.real, alpha.imag)), int(A), lda, int(x), incx, ctypes.byref(cuda.cuFloatComplex(beta.real, beta.imag)), int(y), incy) cublasCheckStatus(status)
python
def cublasCgbmv(handle, trans, m, n, kl, ku, alpha, A, lda, x, incx, beta, y, incy): """ Matrix-vector product for complex general banded matrix. """ status = _libcublas.cublasCgbmv_v2(handle, trans, m, n, kl, ku, ctypes.byref(cuda.cuFloatComplex(alpha.real, alpha.imag)), int(A), lda, int(x), incx, ctypes.byref(cuda.cuFloatComplex(beta.real, beta.imag)), int(y), incy) cublasCheckStatus(status)
[ "def", "cublasCgbmv", "(", "handle", ",", "trans", ",", "m", ",", "n", ",", "kl", ",", "ku", ",", "alpha", ",", "A", ",", "lda", ",", "x", ",", "incx", ",", "beta", ",", "y", ",", "incy", ")", ":", "status", "=", "_libcublas", ".", "cublasCgbmv_v2", "(", "handle", ",", "trans", ",", "m", ",", "n", ",", "kl", ",", "ku", ",", "ctypes", ".", "byref", "(", "cuda", ".", "cuFloatComplex", "(", "alpha", ".", "real", ",", "alpha", ".", "imag", ")", ")", ",", "int", "(", "A", ")", ",", "lda", ",", "int", "(", "x", ")", ",", "incx", ",", "ctypes", ".", "byref", "(", "cuda", ".", "cuFloatComplex", "(", "beta", ".", "real", ",", "beta", ".", "imag", ")", ")", ",", "int", "(", "y", ")", ",", "incy", ")", "cublasCheckStatus", "(", "status", ")" ]
Matrix-vector product for complex general banded matrix.
[ "Matrix", "-", "vector", "product", "for", "complex", "general", "banded", "matrix", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cublas.py#L2132-L2147
hannes-brt/hebel
hebel/pycuda_ops/cublas.py
cublasZgbmv
def cublasZgbmv(handle, trans, m, n, kl, ku, alpha, A, lda, x, incx, beta, y, incy): """ Matrix-vector product for complex general banded matrix. """ status = _libcublas.cublasZgbmv_v2(handle, trans, m, n, kl, ku, ctypes.byref(cuda.cuDoubleComplex(alpha.real, alpha.imag)), int(A), lda, int(x), incx, ctypes.byref(cuda.cuDoubleComplex(beta.real, beta.imag)), int(y), incy) cublasCheckStatus(status)
python
def cublasZgbmv(handle, trans, m, n, kl, ku, alpha, A, lda, x, incx, beta, y, incy): """ Matrix-vector product for complex general banded matrix. """ status = _libcublas.cublasZgbmv_v2(handle, trans, m, n, kl, ku, ctypes.byref(cuda.cuDoubleComplex(alpha.real, alpha.imag)), int(A), lda, int(x), incx, ctypes.byref(cuda.cuDoubleComplex(beta.real, beta.imag)), int(y), incy) cublasCheckStatus(status)
[ "def", "cublasZgbmv", "(", "handle", ",", "trans", ",", "m", ",", "n", ",", "kl", ",", "ku", ",", "alpha", ",", "A", ",", "lda", ",", "x", ",", "incx", ",", "beta", ",", "y", ",", "incy", ")", ":", "status", "=", "_libcublas", ".", "cublasZgbmv_v2", "(", "handle", ",", "trans", ",", "m", ",", "n", ",", "kl", ",", "ku", ",", "ctypes", ".", "byref", "(", "cuda", ".", "cuDoubleComplex", "(", "alpha", ".", "real", ",", "alpha", ".", "imag", ")", ")", ",", "int", "(", "A", ")", ",", "lda", ",", "int", "(", "x", ")", ",", "incx", ",", "ctypes", ".", "byref", "(", "cuda", ".", "cuDoubleComplex", "(", "beta", ".", "real", ",", "beta", ".", "imag", ")", ")", ",", "int", "(", "y", ")", ",", "incy", ")", "cublasCheckStatus", "(", "status", ")" ]
Matrix-vector product for complex general banded matrix.
[ "Matrix", "-", "vector", "product", "for", "complex", "general", "banded", "matrix", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cublas.py#L2163-L2178
hannes-brt/hebel
hebel/pycuda_ops/cublas.py
cublasSgemv
def cublasSgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy): """ Matrix-vector product for real general matrix. """ status = _libcublas.cublasSgemv_v2(handle, _CUBLAS_OP[trans], m, n, ctypes.byref(ctypes.c_float(alpha)), int(A), lda, int(x), incx, ctypes.byref(ctypes.c_float(beta)), int(y), incy) cublasCheckStatus(status)
python
def cublasSgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy): """ Matrix-vector product for real general matrix. """ status = _libcublas.cublasSgemv_v2(handle, _CUBLAS_OP[trans], m, n, ctypes.byref(ctypes.c_float(alpha)), int(A), lda, int(x), incx, ctypes.byref(ctypes.c_float(beta)), int(y), incy) cublasCheckStatus(status)
[ "def", "cublasSgemv", "(", "handle", ",", "trans", ",", "m", ",", "n", ",", "alpha", ",", "A", ",", "lda", ",", "x", ",", "incx", ",", "beta", ",", "y", ",", "incy", ")", ":", "status", "=", "_libcublas", ".", "cublasSgemv_v2", "(", "handle", ",", "_CUBLAS_OP", "[", "trans", "]", ",", "m", ",", "n", ",", "ctypes", ".", "byref", "(", "ctypes", ".", "c_float", "(", "alpha", ")", ")", ",", "int", "(", "A", ")", ",", "lda", ",", "int", "(", "x", ")", ",", "incx", ",", "ctypes", ".", "byref", "(", "ctypes", ".", "c_float", "(", "beta", ")", ")", ",", "int", "(", "y", ")", ",", "incy", ")", "cublasCheckStatus", "(", "status", ")" ]
Matrix-vector product for real general matrix.
[ "Matrix", "-", "vector", "product", "for", "real", "general", "matrix", "." ]
train
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cublas.py#L2253-L2264