body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def check(self): ' Checks parameters and paths\n ' if ('UUID' not in PAR): setattr(PAR, 'UUID', str(uuid4())) if ('SCRATCH' not in PATH): setattr(PATH, 'SCRATCH', join('/scratch/gpfs', getuser(), 'seisflows', PAR.UUID)) if ('LOCAL' not in PATH): setattr(PATH, 'LOCAL', '') ...
-5,435,384,274,526,499,000
Checks parameters and paths
seisflows/system/tiger_md.py
check
chukren/seisflows
python
def check(self): ' \n ' if ('UUID' not in PAR): setattr(PAR, 'UUID', str(uuid4())) if ('SCRATCH' not in PATH): setattr(PATH, 'SCRATCH', join('/scratch/gpfs', getuser(), 'seisflows', PAR.UUID)) if ('LOCAL' not in PATH): setattr(PATH, 'LOCAL', ) super(tiger_md, self).che...
def submit(self, *args, **kwargs): ' Submits job\n ' if (not exists(((PATH.SUBMIT + '/') + 'scratch'))): unix.ln(PATH.SCRATCH, ((PATH.SUBMIT + '/') + 'scratch')) super(tiger_md, self).submit(*args, **kwargs)
-6,969,412,769,645,984,000
Submits job
seisflows/system/tiger_md.py
submit
chukren/seisflows
python
def submit(self, *args, **kwargs): ' \n ' if (not exists(((PATH.SUBMIT + '/') + 'scratch'))): unix.ln(PATH.SCRATCH, ((PATH.SUBMIT + '/') + 'scratch')) super(tiger_md, self).submit(*args, **kwargs)
def cltv_lock_to_height(node, tx, to_address, amount, height=(- 1)): 'Modify the scriptPubKey to add an OP_CHECKLOCKTIMEVERIFY, and make\n a transaction that spends it.\n\n This transforms the output script to anyone can spend (OP_TRUE) if the\n lock time condition is valid.\n\n Default height is -1 whi...
-6,207,897,528,851,743,000
Modify the scriptPubKey to add an OP_CHECKLOCKTIMEVERIFY, and make a transaction that spends it. This transforms the output script to anyone can spend (OP_TRUE) if the lock time condition is valid. Default height is -1 which leads CLTV to fail TODO: test more ways that transactions using CLTV could be invalid (eg lo...
test/functional/feature_cltv.py
cltv_lock_to_height
ComputerCraftr/devault
python
def cltv_lock_to_height(node, tx, to_address, amount, height=(- 1)): 'Modify the scriptPubKey to add an OP_CHECKLOCKTIMEVERIFY, and make\n a transaction that spends it.\n\n This transforms the output script to anyone can spend (OP_TRUE) if the\n lock time condition is valid.\n\n Default height is -1 whi...
def expand_dims(var, dim=0): ' Is similar to [numpy.expand_dims](https://docs.scipy.org/doc/numpy/reference/generated/numpy.expand_dims.html).\n var = torch.range(0, 9).view(-1, 2)\n torch.expand_dims(var, 0).size()\n # (1, 5, 2)\n ' sizes = list(var.size()) sizes.insert(dim, 1) ...
123,622,040,983,809,650
Is similar to [numpy.expand_dims](https://docs.scipy.org/doc/numpy/reference/generated/numpy.expand_dims.html). var = torch.range(0, 9).view(-1, 2) torch.expand_dims(var, 0).size() # (1, 5, 2)
losses/magnet_loss.py
expand_dims
jiajunhua/HaydenFaulkner-pytorch.repmet
python
def expand_dims(var, dim=0): ' Is similar to [numpy.expand_dims](https://docs.scipy.org/doc/numpy/reference/generated/numpy.expand_dims.html).\n var = torch.range(0, 9).view(-1, 2)\n torch.expand_dims(var, 0).size()\n # (1, 5, 2)\n ' sizes = list(var.size()) sizes.insert(dim, 1) ...
def comparison_mask(a_labels, b_labels): 'Computes boolean mask for distance comparisons' return torch.eq(expand_dims(a_labels, 1), expand_dims(b_labels, 0))
1,893,867,106,700,745,200
Computes boolean mask for distance comparisons
losses/magnet_loss.py
comparison_mask
jiajunhua/HaydenFaulkner-pytorch.repmet
python
def comparison_mask(a_labels, b_labels): return torch.eq(expand_dims(a_labels, 1), expand_dims(b_labels, 0))
def dynamic_partition(X, partitions, n_clusters): 'Partitions the data into the number of cluster bins' cluster_bin = torch.chunk(X, n_clusters) return cluster_bin
9,038,985,545,027,217,000
Partitions the data into the number of cluster bins
losses/magnet_loss.py
dynamic_partition
jiajunhua/HaydenFaulkner-pytorch.repmet
python
def dynamic_partition(X, partitions, n_clusters): cluster_bin = torch.chunk(X, n_clusters) return cluster_bin
def bool_from_env(var, default: bool=False) -> bool: "Helper for converting env string into boolean.\n\n Returns bool True for string values: '1' or 'true', False otherwise.\n " def str_to_bool(s: str) -> bool: return (s.lower() in ('1', 'true')) os_var = os.environ.get(var) if (os_var is...
5,744,022,469,535,834,000
Helper for converting env string into boolean. Returns bool True for string values: '1' or 'true', False otherwise.
src/ralph/settings/base.py
bool_from_env
p-bo/ralph
python
def bool_from_env(var, default: bool=False) -> bool: "Helper for converting env string into boolean.\n\n Returns bool True for string values: '1' or 'true', False otherwise.\n " def str_to_bool(s: str) -> bool: return (s.lower() in ('1', 'true')) os_var = os.environ.get(var) if (os_var is...
def _crypted_transfer(self, load, tries=3, timeout=60): '\n In case of authentication errors, try to renegotiate authentication\n and retry the method.\n Indeed, we can fail too early in case of a master restart during a\n minion state execution call\n ' def _do_transfer(): ...
4,214,069,522,247,119,400
In case of authentication errors, try to renegotiate authentication and retry the method. Indeed, we can fail too early in case of a master restart during a minion state execution call
salt/transport/__init__.py
_crypted_transfer
otrempe/salt
python
def _crypted_transfer(self, load, tries=3, timeout=60): '\n In case of authentication errors, try to renegotiate authentication\n and retry the method.\n Indeed, we can fail too early in case of a master restart during a\n minion state execution call\n ' def _do_transfer(): ...
def _identifier_split(identifier): 'Return (name, start, end) string tuple from an identifier (PRIVATE).' (id, loc, strand) = identifier.split(':') (start, end) = map(int, loc.split('-')) start -= 1 return (id, start, end, strand)
-346,816,607,895,191,600
Return (name, start, end) string tuple from an identifier (PRIVATE).
Bio/AlignIO/MauveIO.py
_identifier_split
BioinfoCat/biopython
python
def _identifier_split(identifier): (id, loc, strand) = identifier.split(':') (start, end) = map(int, loc.split('-')) start -= 1 return (id, start, end, strand)
def __init__(self, *args, **kwargs): 'Initialize.' super(MauveWriter, self).__init__(*args, **kwargs) self._wrote_header = False self._wrote_first = False
6,127,116,914,066,891,000
Initialize.
Bio/AlignIO/MauveIO.py
__init__
BioinfoCat/biopython
python
def __init__(self, *args, **kwargs): super(MauveWriter, self).__init__(*args, **kwargs) self._wrote_header = False self._wrote_first = False
def write_alignment(self, alignment): 'Use this to write (another) single alignment to an open file.\n\n Note that sequences and their annotation are recorded\n together (rather than having a block of annotation followed\n by a block of aligned sequences).\n ' count = len(alignment) ...
7,175,577,788,833,652,000
Use this to write (another) single alignment to an open file. Note that sequences and their annotation are recorded together (rather than having a block of annotation followed by a block of aligned sequences).
Bio/AlignIO/MauveIO.py
write_alignment
BioinfoCat/biopython
python
def write_alignment(self, alignment): 'Use this to write (another) single alignment to an open file.\n\n Note that sequences and their annotation are recorded\n together (rather than having a block of annotation followed\n by a block of aligned sequences).\n ' count = len(alignment) ...
def _write_record(self, record, record_idx=0): 'Write a single SeqRecord to the file (PRIVATE).' if (self._length_of_sequences != len(record.seq)): raise ValueError('Sequences must all be the same length') seq_name = record.name try: seq_name = str(int(record.name)) except ValueError...
5,108,774,003,558,236,000
Write a single SeqRecord to the file (PRIVATE).
Bio/AlignIO/MauveIO.py
_write_record
BioinfoCat/biopython
python
def _write_record(self, record, record_idx=0): if (self._length_of_sequences != len(record.seq)): raise ValueError('Sequences must all be the same length') seq_name = record.name try: seq_name = str(int(record.name)) except ValueError: seq_name = str((record_idx + 1)) if...
def __next__(self): 'Parse the next alignment from the handle.' handle = self.handle line = handle.readline() if (not line): raise StopIteration while (line and line.strip().startswith('#')): line = handle.readline() seqs = {} seq_regions = {} passed_end_alignment = False...
-5,048,572,972,203,061,000
Parse the next alignment from the handle.
Bio/AlignIO/MauveIO.py
__next__
BioinfoCat/biopython
python
def __next__(self): handle = self.handle line = handle.readline() if (not line): raise StopIteration while (line and line.strip().startswith('#')): line = handle.readline() seqs = {} seq_regions = {} passed_end_alignment = False latest_id = None while True: ...
def run_gaussian_dataset_montecarlo(iterations: int=30, m: int=10000, n: int=128, param_list=None, epochs: int=300, batch_size: int=100, display_freq: int=1, optimizer='sgd', validation_split: float=0.2, shape_raw: List[int]=None, activation: t_activation='cart_relu', verbose: bool=False, do_all: bool=True, tensorboard...
-1,363,922,818,580,274,200
This function is used to compare CVNN vs RVNN performance over statistical non-circular data. 1. Generates a complex-valued gaussian correlated noise with the characteristics given by the inputs. 2. It then runs a monte carlo simulation of several iterations of both CVNN and an equivalent RVNN model. 3. Sav...
cvnn/montecarlo.py
run_gaussian_dataset_montecarlo
NEGU93/cvnn
python
def run_gaussian_dataset_montecarlo(iterations: int=30, m: int=10000, n: int=128, param_list=None, epochs: int=300, batch_size: int=100, display_freq: int=1, optimizer='sgd', validation_split: float=0.2, shape_raw: List[int]=None, activation: t_activation='cart_relu', verbose: bool=False, do_all: bool=True, tensorboard...
def run_montecarlo(models: List[Model], dataset: cvnn.dataset.Dataset, open_dataset: Optional[t_path]=None, iterations: int=30, epochs: int=300, batch_size: int=100, display_freq: int=1, validation_split: float=0.2, validation_data: Optional[Union[(Tuple, data.Dataset)]]=None, verbose: Union[(bool, int)]=False, do_conf...
8,390,725,974,427,718,000
This function is used to compare different neural networks performance. 1. Runs simulation and compares them. 2. Saves several files into ./log/montecarlo/date/of/run/ 2.1. run_summary.txt: Summary of the run models and data 2.2. run_data.csv: Full information of performance of iteration of each model at each e...
cvnn/montecarlo.py
run_montecarlo
NEGU93/cvnn
python
def run_montecarlo(models: List[Model], dataset: cvnn.dataset.Dataset, open_dataset: Optional[t_path]=None, iterations: int=30, epochs: int=300, batch_size: int=100, display_freq: int=1, validation_split: float=0.2, validation_data: Optional[Union[(Tuple, data.Dataset)]]=None, verbose: Union[(bool, int)]=False, do_conf...
def mlp_run_real_comparison_montecarlo(dataset: cvnn.dataset.Dataset, open_dataset: Optional[t_path]=None, iterations: int=30, epochs: int=300, batch_size: int=100, display_freq: int=1, optimizer='adam', shape_raw=None, activation: t_activation='cart_relu', output_activation: t_activation=DEFAULT_OUTPUT_ACT, verbose: U...
-8,105,493,941,948,592,000
This function is used to compare CVNN vs RVNN performance over any dataset. 1. Automatically creates two Multi-Layer Perceptrons (MLP), one complex and one real. 2. Runs simulation and compares them. 3. Saves several files into ./log/montecarlo/date/of/run/ 3.1. run_summary.txt: Summary of the run models and data ...
cvnn/montecarlo.py
mlp_run_real_comparison_montecarlo
NEGU93/cvnn
python
def mlp_run_real_comparison_montecarlo(dataset: cvnn.dataset.Dataset, open_dataset: Optional[t_path]=None, iterations: int=30, epochs: int=300, batch_size: int=100, display_freq: int=1, optimizer='adam', shape_raw=None, activation: t_activation='cart_relu', output_activation: t_activation=DEFAULT_OUTPUT_ACT, verbose: U...
def __init__(self): '\n Class that allows the statistical comparison of several models on the same dataset\n ' self.models = [] self.pandas_full_data = pd.DataFrame() self.monte_carlo_analyzer = MonteCarloAnalyzer() self.verbose = 1 self.output_config = {'plot_all': False, 'confusi...
6,956,332,109,359,251,000
Class that allows the statistical comparison of several models on the same dataset
cvnn/montecarlo.py
__init__
NEGU93/cvnn
python
def __init__(self): '\n \n ' self.models = [] self.pandas_full_data = pd.DataFrame() self.monte_carlo_analyzer = MonteCarloAnalyzer() self.verbose = 1 self.output_config = {'plot_all': False, 'confusion_matrix': False, 'excel_summary': True, 'summary_of_run': True, 'tensorboard': F...
def add_model(self, model: Type[Model]): '\n Adds a cvnn.CvnnModel to the list to then compare between them\n ' self.models.append(model)
-5,031,837,254,433,821,000
Adds a cvnn.CvnnModel to the list to then compare between them
cvnn/montecarlo.py
add_model
NEGU93/cvnn
python
def add_model(self, model: Type[Model]): '\n \n ' self.models.append(model)
def run(self, x, y, data_summary: str='', real_cast_modes: Optional[Union[(str, List[Optional[str]], Tuple[Optional[str]])]]=None, validation_split: float=0.2, validation_data: Optional[Union[(Tuple[(np.ndarray, np.ndarray)], data.Dataset)]]=None, test_data: Optional[Union[(Tuple[(np.ndarray, np.ndarray)], data.Dataset...
3,118,653,797,893,880,300
This function is used to compare all models added with `self.add_model` method. Runs the iteration dataset (x, y). 1. It then runs a monte carlo simulation of several iterations of both CVNN and an equivalent RVNN model. 2. Saves several files into ./log/montecarlo/date/of/run/ 2.1. run_summary.txt: Summary of the ...
cvnn/montecarlo.py
run
NEGU93/cvnn
python
def run(self, x, y, data_summary: str=, real_cast_modes: Optional[Union[(str, List[Optional[str]], Tuple[Optional[str]])]]=None, validation_split: float=0.2, validation_data: Optional[Union[(Tuple[(np.ndarray, np.ndarray)], data.Dataset)]]=None, test_data: Optional[Union[(Tuple[(np.ndarray, np.ndarray)], data.Dataset)]...
def _save_summary_of_run(self, run_summary, data_summary): '\n Saves 2 files:\n - run_summary.txt: A user-friendly resume of the monte carlo run.\n - models_details.json: A full serialized version of the models.\n Contains info that lacks in the txt file like the loss or ...
2,147,652,564,802,560,800
Saves 2 files: - run_summary.txt: A user-friendly resume of the monte carlo run. - models_details.json: A full serialized version of the models. Contains info that lacks in the txt file like the loss or optimizer.
cvnn/montecarlo.py
_save_summary_of_run
NEGU93/cvnn
python
def _save_summary_of_run(self, run_summary, data_summary): '\n Saves 2 files:\n - run_summary.txt: A user-friendly resume of the monte carlo run.\n - models_details.json: A full serialized version of the models.\n Contains info that lacks in the txt file like the loss or ...
def __init__(self, complex_model: Type[Model], capacity_equivalent: bool=True, equiv_technique: str='ratio'): "\n :param complex_model: Complex keras model (ex: sequential)\n :param capacity_equivalent: An equivalent model can be equivalent in terms of layer neurons or\n trainab...
-8,439,983,612,307,735,000
:param complex_model: Complex keras model (ex: sequential) :param capacity_equivalent: An equivalent model can be equivalent in terms of layer neurons or trainable parameters (capacity equivalent according to: https://arxiv.org/abs/1811.12351) - True, it creates a capacity-equivalent model in terms ...
cvnn/montecarlo.py
__init__
NEGU93/cvnn
python
def __init__(self, complex_model: Type[Model], capacity_equivalent: bool=True, equiv_technique: str='ratio'): "\n :param complex_model: Complex keras model (ex: sequential)\n :param capacity_equivalent: An equivalent model can be equivalent in terms of layer neurons or\n trainab...
def require_collection_playable(handler): 'Decorator that checks if the user can play the given collection.' def test_can_play(self, collection_id, **kwargs): 'Check if the current user can play the collection.' actor = rights_manager.Actor(self.user_id) can_play = actor.can_play(rights...
9,035,655,413,190,753,000
Decorator that checks if the user can play the given collection.
core/controllers/collection_viewer.py
require_collection_playable
Himanshu1495/oppia
python
def require_collection_playable(handler): def test_can_play(self, collection_id, **kwargs): 'Check if the current user can play the collection.' actor = rights_manager.Actor(self.user_id) can_play = actor.can_play(rights_manager.ACTIVITY_TYPE_COLLECTION, collection_id) can_view...
def test_can_play(self, collection_id, **kwargs): 'Check if the current user can play the collection.' actor = rights_manager.Actor(self.user_id) can_play = actor.can_play(rights_manager.ACTIVITY_TYPE_COLLECTION, collection_id) can_view = actor.can_view(rights_manager.ACTIVITY_TYPE_COLLECTION, collectio...
8,139,909,462,468,710,000
Check if the current user can play the collection.
core/controllers/collection_viewer.py
test_can_play
Himanshu1495/oppia
python
def test_can_play(self, collection_id, **kwargs): actor = rights_manager.Actor(self.user_id) can_play = actor.can_play(rights_manager.ACTIVITY_TYPE_COLLECTION, collection_id) can_view = actor.can_view(rights_manager.ACTIVITY_TYPE_COLLECTION, collection_id) if (can_play and can_view): return...
@require_collection_playable def get(self, collection_id): 'Handles GET requests.' try: collection = collection_services.get_collection_by_id(collection_id) except Exception as e: raise self.PageNotFoundException(e) whitelisted_usernames = config_domain.WHITELISTED_COLLECTION_EDITOR_USER...
-5,600,260,206,156,374,000
Handles GET requests.
core/controllers/collection_viewer.py
get
Himanshu1495/oppia
python
@require_collection_playable def get(self, collection_id): try: collection = collection_services.get_collection_by_id(collection_id) except Exception as e: raise self.PageNotFoundException(e) whitelisted_usernames = config_domain.WHITELISTED_COLLECTION_EDITOR_USERNAMES.value self.va...
def get(self, collection_id): 'Populates the data on the individual collection page.' allow_invalid_explorations = bool(self.request.get('allow_invalid_explorations')) try: collection_dict = collection_services.get_learner_collection_dict_by_id(collection_id, self.user_id, allow_invalid_explorations...
-5,690,707,837,412,497,000
Populates the data on the individual collection page.
core/controllers/collection_viewer.py
get
Himanshu1495/oppia
python
def get(self, collection_id): allow_invalid_explorations = bool(self.request.get('allow_invalid_explorations')) try: collection_dict = collection_services.get_learner_collection_dict_by_id(collection_id, self.user_id, allow_invalid_explorations=allow_invalid_explorations) except Exception as e:...
def to_getdist(nested_samples): 'Convert from anesthetic to getdist samples.\n\n Parameters\n ----------\n nested_samples: MCMCSamples or NestedSamples\n anesthetic samples to be converted\n\n Returns\n -------\n getdist_samples: getdist.mcsamples.MCSamples\n getdist equivalent sampl...
3,120,332,844,846,308,000
Convert from anesthetic to getdist samples. Parameters ---------- nested_samples: MCMCSamples or NestedSamples anesthetic samples to be converted Returns ------- getdist_samples: getdist.mcsamples.MCSamples getdist equivalent samples
anesthetic/convert.py
to_getdist
Stefan-Heimersheim/anesthetic
python
def to_getdist(nested_samples): 'Convert from anesthetic to getdist samples.\n\n Parameters\n ----------\n nested_samples: MCMCSamples or NestedSamples\n anesthetic samples to be converted\n\n Returns\n -------\n getdist_samples: getdist.mcsamples.MCSamples\n getdist equivalent sampl...
def __init__(self, cfg): '\n model: torch.nn.Module\n cfg: model-agnostic experiment configs\n ' super().__init__() self.cfg = cfg self.image = ('MAGNETOGRAM' in cfg.DATA.FEATURES) self.model = build_model(cfg) self.save_hyperparameters()
5,289,503,828,256,731,000
model: torch.nn.Module cfg: model-agnostic experiment configs
arnet/modeling/learner.py
__init__
ZeyuSun/flare-prediction-smarp
python
def __init__(self, cfg): '\n model: torch.nn.Module\n cfg: model-agnostic experiment configs\n ' super().__init__() self.cfg = cfg self.image = ('MAGNETOGRAM' in cfg.DATA.FEATURES) self.model = build_model(cfg) self.save_hyperparameters()
def grad_norm(self, norm_type: Union[(float, int, str)]) -> Dict[(str, float)]: "Compute each parameter's gradient's norm and their overall norm.\n\n The overall norm is computed over all gradients together, as if they\n were concatenated into a single vector.\n\n Args:\n norm_type: ...
6,898,759,577,125,780,000
Compute each parameter's gradient's norm and their overall norm. The overall norm is computed over all gradients together, as if they were concatenated into a single vector. Args: norm_type: The type of the used p-norm, cast to float if necessary. Can be ``'inf'`` for infinity norm. Return: norms: Th...
arnet/modeling/learner.py
grad_norm
ZeyuSun/flare-prediction-smarp
python
def grad_norm(self, norm_type: Union[(float, int, str)]) -> Dict[(str, float)]: "Compute each parameter's gradient's norm and their overall norm.\n\n The overall norm is computed over all gradients together, as if they\n were concatenated into a single vector.\n\n Args:\n norm_type: ...
def _set_by_path(tree, keys, value): 'Set a value in a nested object in tree by sequence of keys.' keys = keys.split(';') _get_by_path(tree, keys[:(- 1)])[keys[(- 1)]] = value
2,666,638,307,653,452,000
Set a value in a nested object in tree by sequence of keys.
code/utils/parse_config.py
_set_by_path
weinajin/evaluate_multimodal_medical_image_heatmap_explanation
python
def _set_by_path(tree, keys, value): keys = keys.split(';') _get_by_path(tree, keys[:(- 1)])[keys[(- 1)]] = value
def _get_by_path(tree, keys): 'Access a nested object in tree by sequence of keys.' return reduce(getitem, keys, tree)
320,196,700,010,566,400
Access a nested object in tree by sequence of keys.
code/utils/parse_config.py
_get_by_path
weinajin/evaluate_multimodal_medical_image_heatmap_explanation
python
def _get_by_path(tree, keys): return reduce(getitem, keys, tree)
def __init__(self, config, resume=None, modification=None, run_id=None): '\n class to parse configuration json file. Handles hyperparameters for training, initializations of modules, checkpoint saving\n and logging module.\n :param config: Dict containing configurations, hyperparameters for tra...
-4,933,217,559,418,165,000
class to parse configuration json file. Handles hyperparameters for training, initializations of modules, checkpoint saving and logging module. :param config: Dict containing configurations, hyperparameters for training. contents of `config.json` file for example. :param resume: String, path to the checkpoint being loa...
code/utils/parse_config.py
__init__
weinajin/evaluate_multimodal_medical_image_heatmap_explanation
python
def __init__(self, config, resume=None, modification=None, run_id=None): '\n class to parse configuration json file. Handles hyperparameters for training, initializations of modules, checkpoint saving\n and logging module.\n :param config: Dict containing configurations, hyperparameters for tra...
@classmethod def from_args(cls, args, options='', updates=dict()): '\n Initialize this class from some cli arguments. Used in train, test.\n ' for opt in options: args.add_argument(*opt.flags, default=None, type=opt.type) if (not isinstance(args, tuple)): args = args.parse_args...
1,624,221,919,907,510,500
Initialize this class from some cli arguments. Used in train, test.
code/utils/parse_config.py
from_args
weinajin/evaluate_multimodal_medical_image_heatmap_explanation
python
@classmethod def from_args(cls, args, options=, updates=dict()): '\n \n ' for opt in options: args.add_argument(*opt.flags, default=None, type=opt.type) if (not isinstance(args, tuple)): args = args.parse_args() if (args.device is not None): os.environ['CUDA_VISIBLE...
def init_obj(self, name, module, *args, **kwargs): "\n Finds a function handle with the name given as 'type' in config, and returns the\n instance initialized with corresponding arguments given.\n\n `object = config.init_obj('name', module, a, b=1)`\n is equivalent to\n `object = ...
2,826,001,643,421,151,000
Finds a function handle with the name given as 'type' in config, and returns the instance initialized with corresponding arguments given. `object = config.init_obj('name', module, a, b=1)` is equivalent to `object = module.name(a, b=1)`
code/utils/parse_config.py
init_obj
weinajin/evaluate_multimodal_medical_image_heatmap_explanation
python
def init_obj(self, name, module, *args, **kwargs): "\n Finds a function handle with the name given as 'type' in config, and returns the\n instance initialized with corresponding arguments given.\n\n `object = config.init_obj('name', module, a, b=1)`\n is equivalent to\n `object = ...
def init_ftn(self, name, module, *args, **kwargs): "\n Finds a function handle with the name given as 'type' in config, and returns the\n function with given arguments fixed with functools.partial.\n\n `function = config.init_ftn('name', module, a, b=1)`\n is equivalent to\n `func...
706,491,489,085,046,500
Finds a function handle with the name given as 'type' in config, and returns the function with given arguments fixed with functools.partial. `function = config.init_ftn('name', module, a, b=1)` is equivalent to `function = lambda *args, **kwargs: module.name(a, *args, b=1, **kwargs)`.
code/utils/parse_config.py
init_ftn
weinajin/evaluate_multimodal_medical_image_heatmap_explanation
python
def init_ftn(self, name, module, *args, **kwargs): "\n Finds a function handle with the name given as 'type' in config, and returns the\n function with given arguments fixed with functools.partial.\n\n `function = config.init_ftn('name', module, a, b=1)`\n is equivalent to\n `func...
def __getitem__(self, name): 'Access items like ordinary dict.' return self.config[name]
-3,024,819,929,258,913,300
Access items like ordinary dict.
code/utils/parse_config.py
__getitem__
weinajin/evaluate_multimodal_medical_image_heatmap_explanation
python
def __getitem__(self, name): return self.config[name]
def _get_sorted_box_lims(boxes, box_init): 'Sort the uncertainties for each box in boxes based on a\n normalization given box_init. Unrestricted dimensions are dropped.\n The sorting is based on the normalization of the first box in boxes.\n\n Parameters\n ----------\n boxes : list of numpy structure...
-9,012,174,965,740,264,000
Sort the uncertainties for each box in boxes based on a normalization given box_init. Unrestricted dimensions are dropped. The sorting is based on the normalization of the first box in boxes. Parameters ---------- boxes : list of numpy structured arrays box_init : numpy structured array Returns ------- tuple with...
ema_workbench/analysis/scenario_discovery_util.py
_get_sorted_box_lims
brodderickrodriguez/EMA_lite
python
def _get_sorted_box_lims(boxes, box_init): 'Sort the uncertainties for each box in boxes based on a\n normalization given box_init. Unrestricted dimensions are dropped.\n The sorting is based on the normalization of the first box in boxes.\n\n Parameters\n ----------\n boxes : list of numpy structure...
def _make_box(x): '\n Make a box that encompasses all the data\n\n Parameters\n ----------\n x : DataFrame\n\n Returns\n -------\n DataFrame\n\n\n ' def limits(x): if (pd.api.types.is_integer_dtype(x.dtype) or pd.api.types.is_float_dtype(x.dtype)): return pd.Series([...
-2,165,776,446,668,643,600
Make a box that encompasses all the data Parameters ---------- x : DataFrame Returns ------- DataFrame
ema_workbench/analysis/scenario_discovery_util.py
_make_box
brodderickrodriguez/EMA_lite
python
def _make_box(x): '\n Make a box that encompasses all the data\n\n Parameters\n ----------\n x : DataFrame\n\n Returns\n -------\n DataFrame\n\n\n ' def limits(x): if (pd.api.types.is_integer_dtype(x.dtype) or pd.api.types.is_float_dtype(x.dtype)): return pd.Series([...
def _normalize(box_lim, box_init, uncertainties): 'Normalize the given box lim to the unit interval derived\n from box init for the specified uncertainties.\n\n Categorical uncertainties are normalized based on fractionated. So\n value specifies the fraction of categories in the box_lim.\n\n Parameters\...
-522,221,167,959,005,700
Normalize the given box lim to the unit interval derived from box init for the specified uncertainties. Categorical uncertainties are normalized based on fractionated. So value specifies the fraction of categories in the box_lim. Parameters ---------- box_lim : DataFrame box_init : DataFrame uncertainties : list of ...
ema_workbench/analysis/scenario_discovery_util.py
_normalize
brodderickrodriguez/EMA_lite
python
def _normalize(box_lim, box_init, uncertainties): 'Normalize the given box lim to the unit interval derived\n from box init for the specified uncertainties.\n\n Categorical uncertainties are normalized based on fractionated. So\n value specifies the fraction of categories in the box_lim.\n\n Parameters\...
def _determine_restricted_dims(box_limits, box_init): 'returns a list of dimensions that is restricted\n\n Parameters\n ----------\n box_limits : pd.DataFrame\n box_init : pd.DataFrame\n\n Returns\n -------\n list of str\n\n ' cols = box_init.columns.values restricted_dims = cols[(np...
4,333,435,090,552,701,000
returns a list of dimensions that is restricted Parameters ---------- box_limits : pd.DataFrame box_init : pd.DataFrame Returns ------- list of str
ema_workbench/analysis/scenario_discovery_util.py
_determine_restricted_dims
brodderickrodriguez/EMA_lite
python
def _determine_restricted_dims(box_limits, box_init): 'returns a list of dimensions that is restricted\n\n Parameters\n ----------\n box_limits : pd.DataFrame\n box_init : pd.DataFrame\n\n Returns\n -------\n list of str\n\n ' cols = box_init.columns.values restricted_dims = cols[(np...
def _determine_nr_restricted_dims(box_lims, box_init): '\n\n determine the number of restriced dimensions of a box given\n compared to the inital box that contains all the data\n\n Parameters\n ----------\n box_lims : structured numpy array\n a specific box limit\n box_init : structu...
-6,357,786,457,148,202,000
determine the number of restriced dimensions of a box given compared to the inital box that contains all the data Parameters ---------- box_lims : structured numpy array a specific box limit box_init : structured numpy array the initial box containing all data points Returns ------- int
ema_workbench/analysis/scenario_discovery_util.py
_determine_nr_restricted_dims
brodderickrodriguez/EMA_lite
python
def _determine_nr_restricted_dims(box_lims, box_init): '\n\n determine the number of restriced dimensions of a box given\n compared to the inital box that contains all the data\n\n Parameters\n ----------\n box_lims : structured numpy array\n a specific box limit\n box_init : structu...
def _compare(a, b): 'compare two boxes, for each dimension return True if the\n same and false otherwise' dtypesDesc = a.dtype.descr logical = np.ones(len(dtypesDesc), dtype=np.bool) for (i, entry) in enumerate(dtypesDesc): name = entry[0] logical[i] = ((logical[i] & (a[name][0] == b[...
7,529,231,224,578,261,000
compare two boxes, for each dimension return True if the same and false otherwise
ema_workbench/analysis/scenario_discovery_util.py
_compare
brodderickrodriguez/EMA_lite
python
def _compare(a, b): 'compare two boxes, for each dimension return True if the\n same and false otherwise' dtypesDesc = a.dtype.descr logical = np.ones(len(dtypesDesc), dtype=np.bool) for (i, entry) in enumerate(dtypesDesc): name = entry[0] logical[i] = ((logical[i] & (a[name][0] == b[...
def _in_box(x, boxlim): '\n\n returns the a boolean index indicated which data points are inside\n and which are outside of the given box_lims\n\n Parameters\n ----------\n x : pd.DataFrame\n boxlim : pd.DataFrame\n\n Returns\n -------\n ndarray\n boolean 1D array\n\n Raises\n ...
-4,604,904,357,187,681,000
returns the a boolean index indicated which data points are inside and which are outside of the given box_lims Parameters ---------- x : pd.DataFrame boxlim : pd.DataFrame Returns ------- ndarray boolean 1D array Raises ------ Attribute error if not numbered columns are not pandas category dtype
ema_workbench/analysis/scenario_discovery_util.py
_in_box
brodderickrodriguez/EMA_lite
python
def _in_box(x, boxlim): '\n\n returns the a boolean index indicated which data points are inside\n and which are outside of the given box_lims\n\n Parameters\n ----------\n x : pd.DataFrame\n boxlim : pd.DataFrame\n\n Returns\n -------\n ndarray\n boolean 1D array\n\n Raises\n ...
def _setup(results, classify, incl_unc=[]): 'helper function for setting up CART or PRIM\n\n Parameters\n ----------\n results : tuple of DataFrame and dict with numpy arrays\n the return from :meth:`perform_experiments`.\n classify : string, function or callable\n either a st...
6,892,984,485,061,205,000
helper function for setting up CART or PRIM Parameters ---------- results : tuple of DataFrame and dict with numpy arrays the return from :meth:`perform_experiments`. classify : string, function or callable either a string denoting the outcome of interest to use or a function. incl_unc ...
ema_workbench/analysis/scenario_discovery_util.py
_setup
brodderickrodriguez/EMA_lite
python
def _setup(results, classify, incl_unc=[]): 'helper function for setting up CART or PRIM\n\n Parameters\n ----------\n results : tuple of DataFrame and dict with numpy arrays\n the return from :meth:`perform_experiments`.\n classify : string, function or callable\n either a st...
def _calculate_quasip(x, y, box, Hbox, Tbox): '\n\n Parameters\n ----------\n x : DataFrame\n y : np.array\n box : DataFrame\n Hbox : int\n Tbox : int\n\n ' logical = _in_box(x, box) yi = y[logical] Tj = yi.shape[0] Hj = np.sum(yi) p = (Hj / Tj) Hbox = int(Hbox) T...
5,761,812,687,022,758,000
Parameters ---------- x : DataFrame y : np.array box : DataFrame Hbox : int Tbox : int
ema_workbench/analysis/scenario_discovery_util.py
_calculate_quasip
brodderickrodriguez/EMA_lite
python
def _calculate_quasip(x, y, box, Hbox, Tbox): '\n\n Parameters\n ----------\n x : DataFrame\n y : np.array\n box : DataFrame\n Hbox : int\n Tbox : int\n\n ' logical = _in_box(x, box) yi = y[logical] Tj = yi.shape[0] Hj = np.sum(yi) p = (Hj / Tj) Hbox = int(Hbox) T...
def plot_pair_wise_scatter(x, y, boxlim, box_init, restricted_dims): ' helper function for pair wise scatter plotting\n\n Parameters\n ----------\n x : DataFrame\n the experiments\n y : numpy array\n the outcome of interest\n box_lim : DataFrame\n a boxlim\n box_init : D...
-772,414,657,591,447,600
helper function for pair wise scatter plotting Parameters ---------- x : DataFrame the experiments y : numpy array the outcome of interest box_lim : DataFrame a boxlim box_init : DataFrame restricted_dims : collection of strings list of uncertainties that define the boxlims
ema_workbench/analysis/scenario_discovery_util.py
plot_pair_wise_scatter
brodderickrodriguez/EMA_lite
python
def plot_pair_wise_scatter(x, y, boxlim, box_init, restricted_dims): ' helper function for pair wise scatter plotting\n\n Parameters\n ----------\n x : DataFrame\n the experiments\n y : numpy array\n the outcome of interest\n box_lim : DataFrame\n a boxlim\n box_init : D...
def _setup_figure(uncs): '\n\n helper function for creating the basic layout for the figures that\n show the box lims.\n\n ' nr_unc = len(uncs) fig = plt.figure() ax = fig.add_subplot(111) rect = mpl.patches.Rectangle((0, (- 0.5)), 1, (nr_unc + 1.5), alpha=0.25, facecolor='#C0C0C0', edgecol...
6,302,861,723,883,544,000
helper function for creating the basic layout for the figures that show the box lims.
ema_workbench/analysis/scenario_discovery_util.py
_setup_figure
brodderickrodriguez/EMA_lite
python
def _setup_figure(uncs): '\n\n helper function for creating the basic layout for the figures that\n show the box lims.\n\n ' nr_unc = len(uncs) fig = plt.figure() ax = fig.add_subplot(111) rect = mpl.patches.Rectangle((0, (- 0.5)), 1, (nr_unc + 1.5), alpha=0.25, facecolor='#C0C0C0', edgecol...
def plot_box(boxlim, qp_values, box_init, uncs, coverage, density, ticklabel_formatter='{} ({})', boxlim_formatter='{: .2g}', table_formatter='{:.3g}'): 'Helper function for parallel coordinate style visualization\n of a box\n\n Parameters\n ----------\n boxlim : DataFrame\n qp_values : dict\n box...
3,736,344,928,917,715,000
Helper function for parallel coordinate style visualization of a box Parameters ---------- boxlim : DataFrame qp_values : dict box_init : DataFrame uncs : list coverage : float density : float ticklabel_formatter : str boxlim_formatter : str table_formatter : str Returns ------- a Figure instance
ema_workbench/analysis/scenario_discovery_util.py
plot_box
brodderickrodriguez/EMA_lite
python
def plot_box(boxlim, qp_values, box_init, uncs, coverage, density, ticklabel_formatter='{} ({})', boxlim_formatter='{: .2g}', table_formatter='{:.3g}'): 'Helper function for parallel coordinate style visualization\n of a box\n\n Parameters\n ----------\n boxlim : DataFrame\n qp_values : dict\n box...
def plot_ppt(peeling_trajectory): 'show the peeling and pasting trajectory in a figure' ax = host_subplot(111) ax.set_xlabel('peeling and pasting trajectory') par = ax.twinx() par.set_ylabel('nr. restricted dimensions') ax.plot(peeling_trajectory['mean'], label='mean') ax.plot(peeling_trajec...
-7,503,041,594,958,456,000
show the peeling and pasting trajectory in a figure
ema_workbench/analysis/scenario_discovery_util.py
plot_ppt
brodderickrodriguez/EMA_lite
python
def plot_ppt(peeling_trajectory): ax = host_subplot(111) ax.set_xlabel('peeling and pasting trajectory') par = ax.twinx() par.set_ylabel('nr. restricted dimensions') ax.plot(peeling_trajectory['mean'], label='mean') ax.plot(peeling_trajectory['mass'], label='mass') ax.plot(peeling_traje...
def plot_tradeoff(peeling_trajectory, cmap=mpl.cm.viridis): 'Visualize the trade off between coverage and density. Color\n is used to denote the number of restricted dimensions.\n\n Parameters\n ----------\n cmap : valid matplotlib colormap\n\n Returns\n -------\n a Figure instance\n\n ' ...
-840,138,350,086,765,300
Visualize the trade off between coverage and density. Color is used to denote the number of restricted dimensions. Parameters ---------- cmap : valid matplotlib colormap Returns ------- a Figure instance
ema_workbench/analysis/scenario_discovery_util.py
plot_tradeoff
brodderickrodriguez/EMA_lite
python
def plot_tradeoff(peeling_trajectory, cmap=mpl.cm.viridis): 'Visualize the trade off between coverage and density. Color\n is used to denote the number of restricted dimensions.\n\n Parameters\n ----------\n cmap : valid matplotlib colormap\n\n Returns\n -------\n a Figure instance\n\n ' ...
def plot_unc(box_init, xi, i, j, norm_box_lim, box_lim, u, ax, color=sns.color_palette()[0]): '\n\n Parameters:\n ----------\n xi : int\n the row at which to plot\n i : int\n the index of the uncertainty being plotted\n j : int\n the index of the box being plotted\n u : strin...
-4,350,063,930,766,987,000
Parameters: ---------- xi : int the row at which to plot i : int the index of the uncertainty being plotted j : int the index of the box being plotted u : string the uncertainty being plotted: ax : axes instance the ax on which to plot
ema_workbench/analysis/scenario_discovery_util.py
plot_unc
brodderickrodriguez/EMA_lite
python
def plot_unc(box_init, xi, i, j, norm_box_lim, box_lim, u, ax, color=sns.color_palette()[0]): '\n\n Parameters:\n ----------\n xi : int\n the row at which to plot\n i : int\n the index of the uncertainty being plotted\n j : int\n the index of the box being plotted\n u : strin...
def plot_boxes(x, boxes, together): 'Helper function for plotting multiple boxlims\n\n Parameters\n ----------\n x : pd.DataFrame\n boxes : list of pd.DataFrame\n together : bool\n\n ' box_init = _make_box(x) (box_lims, uncs) = _get_sorted_box_lims(boxes, box_init) norm_box_lims = [_no...
6,861,939,631,656,800,000
Helper function for plotting multiple boxlims Parameters ---------- x : pd.DataFrame boxes : list of pd.DataFrame together : bool
ema_workbench/analysis/scenario_discovery_util.py
plot_boxes
brodderickrodriguez/EMA_lite
python
def plot_boxes(x, boxes, together): 'Helper function for plotting multiple boxlims\n\n Parameters\n ----------\n x : pd.DataFrame\n boxes : list of pd.DataFrame\n together : bool\n\n ' box_init = _make_box(x) (box_lims, uncs) = _get_sorted_box_lims(boxes, box_init) norm_box_lims = [_no...
@abc.abstractproperty def boxes(self): 'Property for getting a list of box limits' raise NotImplementedError
-4,880,963,140,910,533,000
Property for getting a list of box limits
ema_workbench/analysis/scenario_discovery_util.py
boxes
brodderickrodriguez/EMA_lite
python
@abc.abstractproperty def boxes(self): raise NotImplementedError
@abc.abstractproperty def stats(self): 'property for getting a list of dicts containing the statistics\n for each box' raise NotImplementedError
5,963,327,774,905,103,000
property for getting a list of dicts containing the statistics for each box
ema_workbench/analysis/scenario_discovery_util.py
stats
brodderickrodriguez/EMA_lite
python
@abc.abstractproperty def stats(self): 'property for getting a list of dicts containing the statistics\n for each box' raise NotImplementedError
def boxes_to_dataframe(self): 'convert boxes to pandas dataframe' boxes = self.boxes (box_lims, uncs) = _get_sorted_box_lims(boxes, _make_box(self.x)) nr_boxes = len(boxes) dtype = float index = ['box {}'.format((i + 1)) for i in range(nr_boxes)] for value in box_lims[0].dtypes: if (...
2,196,040,324,089,710,600
convert boxes to pandas dataframe
ema_workbench/analysis/scenario_discovery_util.py
boxes_to_dataframe
brodderickrodriguez/EMA_lite
python
def boxes_to_dataframe(self): boxes = self.boxes (box_lims, uncs) = _get_sorted_box_lims(boxes, _make_box(self.x)) nr_boxes = len(boxes) dtype = float index = ['box {}'.format((i + 1)) for i in range(nr_boxes)] for value in box_lims[0].dtypes: if (value == object): dtype...
def stats_to_dataframe(self): 'convert stats to pandas dataframe' stats = self.stats index = pd.Index(['box {}'.format((i + 1)) for i in range(len(stats))]) return pd.DataFrame(stats, index=index)
1,509,923,910,514,162,200
convert stats to pandas dataframe
ema_workbench/analysis/scenario_discovery_util.py
stats_to_dataframe
brodderickrodriguez/EMA_lite
python
def stats_to_dataframe(self): stats = self.stats index = pd.Index(['box {}'.format((i + 1)) for i in range(len(stats))]) return pd.DataFrame(stats, index=index)
def show_boxes(self, together=False): 'display boxes\n\n Parameters\n ----------\n together : bool, otional\n\n ' plot_boxes(self.x, self.boxes, together=together)
-1,717,583,655,820,089,600
display boxes Parameters ---------- together : bool, otional
ema_workbench/analysis/scenario_discovery_util.py
show_boxes
brodderickrodriguez/EMA_lite
python
def show_boxes(self, together=False): 'display boxes\n\n Parameters\n ----------\n together : bool, otional\n\n ' plot_boxes(self.x, self.boxes, together=together)
def read_code(filename): '读取code编码文件并输出为字典格式\n 1、支持json格式\n 2、支持本包规定的xlsx格式\n see alse to_code\n ' file_type = os.path.splitext(filename)[1][1:] if (file_type == 'json'): import json code = json.load(filename) return code d = pd.read_excel(filename, header=None) d...
2,747,203,315,166,959,600
读取code编码文件并输出为字典格式 1、支持json格式 2、支持本包规定的xlsx格式 see alse to_code
reportgen/questionnaire/questionnaire.py
read_code
brightgeng/reportgen
python
def read_code(filename): '读取code编码文件并输出为字典格式\n 1、支持json格式\n 2、支持本包规定的xlsx格式\n see alse to_code\n ' file_type = os.path.splitext(filename)[1][1:] if (file_type == 'json'): import json code = json.load(filename) return code d = pd.read_excel(filename, header=None) d...
def save_code(code, filename='code.xlsx'): 'code本地输出\n 1、输出为json格式,根据文件名自动识别\n 2、输出为Excel格式\n see also read_code\n ' save_type = os.path.splitext(filename)[1][1:] if (save_type == 'json'): code = pd.DataFrame(code) code.to_json(filename, force_ascii=False) return tmp ...
-741,657,556,108,953,100
code本地输出 1、输出为json格式,根据文件名自动识别 2、输出为Excel格式 see also read_code
reportgen/questionnaire/questionnaire.py
save_code
brightgeng/reportgen
python
def save_code(code, filename='code.xlsx'): 'code本地输出\n 1、输出为json格式,根据文件名自动识别\n 2、输出为Excel格式\n see also read_code\n ' save_type = os.path.splitext(filename)[1][1:] if (save_type == 'json'): code = pd.DataFrame(code) code.to_json(filename, force_ascii=False) return tmp ...
def dataText_to_code(df, sep, qqlist=None): '编码文本数据\n\n ' if (sep in [';', '┋']): qtype = '多选题' elif (sep in ['-->', '→']): qtype = '排序题' if (not qqlist): qqlist = df.columns code = {} for qq in qqlist: tmp = df[qq].map((lambda x: (x.split(sep) if isinstance(x,...
-3,276,474,069,112,958,000
编码文本数据
reportgen/questionnaire/questionnaire.py
dataText_to_code
brightgeng/reportgen
python
def dataText_to_code(df, sep, qqlist=None): '\n\n ' if (sep in [';', '┋']): qtype = '多选题' elif (sep in ['-->', '→']): qtype = '排序题' if (not qqlist): qqlist = df.columns code = {} for qq in qqlist: tmp = df[qq].map((lambda x: (x.split(sep) if isinstance(x, str) ...
def dataCode_to_text(df, code=None): '将按序号数据转换成文本\n\n ' if (df.max().max() > 1): sep = '→' else: sep = '┋' if code: df = df.rename(code) qlist = list(df.columns) df['text'] = np.nan if (sep in ['┋']): for i in df.index: w = (df.loc[i, :] == 1) ...
2,083,806,469,545,990,400
将按序号数据转换成文本
reportgen/questionnaire/questionnaire.py
dataCode_to_text
brightgeng/reportgen
python
def dataCode_to_text(df, code=None): '\n\n ' if (df.max().max() > 1): sep = '→' else: sep = '┋' if code: df = df.rename(code) qlist = list(df.columns) df['text'] = np.nan if (sep in ['┋']): for i in df.index: w = (df.loc[i, :] == 1) ...
def var_combine(data, code, qq1, qq2, sep=',', qnum_new=None, qname_new=None): "将两个变量组合成一个变量\n 例如:\n Q1:'性别',Q2: 年龄\n 组合后生成:\n 1、男_16~19岁\n 2、男_20岁~40岁\n 3、女_16~19岁\n 4、女_20~40岁\n " if (qnum_new is None): if ('Q' == qq2[0]): qnum_new = ((qq1 + '_') + qq2[1:]) ...
-7,023,054,160,902,175,000
将两个变量组合成一个变量 例如: Q1:'性别',Q2: 年龄 组合后生成: 1、男_16~19岁 2、男_20岁~40岁 3、女_16~19岁 4、女_20~40岁
reportgen/questionnaire/questionnaire.py
var_combine
brightgeng/reportgen
python
def var_combine(data, code, qq1, qq2, sep=',', qnum_new=None, qname_new=None): "将两个变量组合成一个变量\n 例如:\n Q1:'性别',Q2: 年龄\n 组合后生成:\n 1、男_16~19岁\n 2、男_20岁~40岁\n 3、女_16~19岁\n 4、女_20~40岁\n " if (qnum_new is None): if ('Q' == qq2[0]): qnum_new = ((qq1 + '_') + qq2[1:]) ...
def wenjuanwang(filepath='.\\data', encoding='gbk'): '问卷网数据导入和编码\n 输入:\n filepath:\n 列表,[0]为按文本数据路径,[1]为按序号文本,[2]为编码文件\n 文件夹路径,函数会自动在文件夹下搜寻相关数据\n 输出:\n (data,code):\n data为按序号的数据,题目都替换成了Q_n\n code为数据编码,可利用函数to_code()导出为json格式或者Excel格式数据\n ' if isinstance(filepath, list...
575,949,123,954,226,600
问卷网数据导入和编码 输入: filepath: 列表,[0]为按文本数据路径,[1]为按序号文本,[2]为编码文件 文件夹路径,函数会自动在文件夹下搜寻相关数据 输出: (data,code): data为按序号的数据,题目都替换成了Q_n code为数据编码,可利用函数to_code()导出为json格式或者Excel格式数据
reportgen/questionnaire/questionnaire.py
wenjuanwang
brightgeng/reportgen
python
def wenjuanwang(filepath='.\\data', encoding='gbk'): '问卷网数据导入和编码\n 输入:\n filepath:\n 列表,[0]为按文本数据路径,[1]为按序号文本,[2]为编码文件\n 文件夹路径,函数会自动在文件夹下搜寻相关数据\n 输出:\n (data,code):\n data为按序号的数据,题目都替换成了Q_n\n code为数据编码,可利用函数to_code()导出为json格式或者Excel格式数据\n ' if isinstance(filepath, list...
def wenjuanxing(filepath='.\\data', headlen=6): '问卷星数据导入和编码\n 输入:\n filepath:\n 列表, filepath[0]: (23_22_0.xls)为按文本数据路径,filepath[1]: (23_22_2.xls)为按序号文本\n 文件夹路径,函数会自动在文件夹下搜寻相关数据,优先为\\d+_\\d+_0.xls和\\d+_\\d+_2.xls\n headlen: 问卷星数据基础信息的列数\n 输出:\n (data,code):\n data为按序号的数据,题目都替换成了Q_...
-4,022,388,977,035,539,000
问卷星数据导入和编码 输入: filepath: 列表, filepath[0]: (23_22_0.xls)为按文本数据路径,filepath[1]: (23_22_2.xls)为按序号文本 文件夹路径,函数会自动在文件夹下搜寻相关数据,优先为\d+_\d+_0.xls和\d+_\d+_2.xls headlen: 问卷星数据基础信息的列数 输出: (data,code): data为按序号的数据,题目都替换成了Q_n code为数据编码,可利用函数to_code()导出为json格式或者Excel格式数据
reportgen/questionnaire/questionnaire.py
wenjuanxing
brightgeng/reportgen
python
def wenjuanxing(filepath='.\\data', headlen=6): '问卷星数据导入和编码\n 输入:\n filepath:\n 列表, filepath[0]: (23_22_0.xls)为按文本数据路径,filepath[1]: (23_22_2.xls)为按序号文本\n 文件夹路径,函数会自动在文件夹下搜寻相关数据,优先为\\d+_\\d+_0.xls和\\d+_\\d+_2.xls\n headlen: 问卷星数据基础信息的列数\n 输出:\n (data,code):\n data为按序号的数据,题目都替换成了Q_...
def load_data(method='filedialog', **kwargs): '导入问卷数据\n # 暂时只支持已编码的和问卷星数据\n 1、支持路径搜寻\n 2、支持自由选择文件\n method:\n -filedialog: 打开文件窗口选择\n -pathsearch:自带搜索路径,需提供filepath\n ' if (method == 'filedialog'): import tkinter as tk from tkinter.filedialog import askopenfilenames ...
4,276,380,133,315,128,000
导入问卷数据 # 暂时只支持已编码的和问卷星数据 1、支持路径搜寻 2、支持自由选择文件 method: -filedialog: 打开文件窗口选择 -pathsearch:自带搜索路径,需提供filepath
reportgen/questionnaire/questionnaire.py
load_data
brightgeng/reportgen
python
def load_data(method='filedialog', **kwargs): '导入问卷数据\n # 暂时只支持已编码的和问卷星数据\n 1、支持路径搜寻\n 2、支持自由选择文件\n method:\n -filedialog: 打开文件窗口选择\n -pathsearch:自带搜索路径,需提供filepath\n ' if (method == 'filedialog'): import tkinter as tk from tkinter.filedialog import askopenfilenames ...
def levenshtein(s, t): "'' From Wikipedia article; Iterative with two matrix rows. " if (s == t): return 0 elif (len(s) == 0): return len(t) elif (len(t) == 0): return len(s) v0 = ([None] * (len(t) + 1)) v1 = ([None] * (len(t) + 1)) for i in range(len(v0)): v0...
-6,058,263,141,906,959,000
'' From Wikipedia article; Iterative with two matrix rows.
reportgen/questionnaire/questionnaire.py
levenshtein
brightgeng/reportgen
python
def levenshtein(s, t): " " if (s == t): return 0 elif (len(s) == 0): return len(t) elif (len(t) == 0): return len(s) v0 = ([None] * (len(t) + 1)) v1 = ([None] * (len(t) + 1)) for i in range(len(v0)): v0[i] = i for i in range(len(s)): v1[0] = (i + 1...
def code_similar(code1, code2): '\n 题目内容相似度用最小编辑距离来度量\n 选项相似度分为几种\n 1、完全相同:1\n 2、单选题:暂时只考虑序号和值都相等的,且共同变量超过一半:2\n 2、多选题/排序题:不考虑序号,共同变量超过一半即可:3\n 3、矩阵单选题:code_r 暂时只考虑完全匹配\n 4、其他情况为0\n\n ' code_distance_min = pd.DataFrame(index=code1.keys(), columns=['qnum', 'similar_content', 'similar_code...
2,684,696,960,743,818,000
题目内容相似度用最小编辑距离来度量 选项相似度分为几种 1、完全相同:1 2、单选题:暂时只考虑序号和值都相等的,且共同变量超过一半:2 2、多选题/排序题:不考虑序号,共同变量超过一半即可:3 3、矩阵单选题:code_r 暂时只考虑完全匹配 4、其他情况为0
reportgen/questionnaire/questionnaire.py
code_similar
brightgeng/reportgen
python
def code_similar(code1, code2): '\n 题目内容相似度用最小编辑距离来度量\n 选项相似度分为几种\n 1、完全相同:1\n 2、单选题:暂时只考虑序号和值都相等的,且共同变量超过一半:2\n 2、多选题/排序题:不考虑序号,共同变量超过一半即可:3\n 3、矩阵单选题:code_r 暂时只考虑完全匹配\n 4、其他情况为0\n\n ' code_distance_min = pd.DataFrame(index=code1.keys(), columns=['qnum', 'similar_content', 'similar_code...
def data_merge(ques1, ques2, qlist1=None, qlist2=None, name1='ques1', name2='ques2', mergeqnum='Q0', similar_threshold=70): '合并两份数据\n ques1: 列表,[data1,code1]\n ques2: 列表,[data2,code2]\n ' (data1, code1) = ques1 (data2, code2) = ques2 if ((qlist1 is None) or (qlist2 is None)): qlist1 = [...
6,126,437,658,393,545,000
合并两份数据 ques1: 列表,[data1,code1] ques2: 列表,[data2,code2]
reportgen/questionnaire/questionnaire.py
data_merge
brightgeng/reportgen
python
def data_merge(ques1, ques2, qlist1=None, qlist2=None, name1='ques1', name2='ques2', mergeqnum='Q0', similar_threshold=70): '合并两份数据\n ques1: 列表,[data1,code1]\n ques2: 列表,[data2,code2]\n ' (data1, code1) = ques1 (data2, code2) = ques2 if ((qlist1 is None) or (qlist2 is None)): qlist1 = [...
def clean_ftime(ftime, cut_percent=0.25): '\n ftime 是完成问卷的秒数\n 思路:\n 1、只考虑截断问卷完成时间较小的样本\n 2、找到完成时间变化的拐点,即需要截断的时间点\n 返回:r\n 建议截断<r的样本\n ' t_min = int(ftime.min()) t_cut = int(ftime.quantile(cut_percent)) x = np.array(range(t_min, t_cut)) y = np.array([len(ftime[(ftime <= i)]) for...
-6,333,484,139,126,520,000
ftime 是完成问卷的秒数 思路: 1、只考虑截断问卷完成时间较小的样本 2、找到完成时间变化的拐点,即需要截断的时间点 返回:r 建议截断<r的样本
reportgen/questionnaire/questionnaire.py
clean_ftime
brightgeng/reportgen
python
def clean_ftime(ftime, cut_percent=0.25): '\n ftime 是完成问卷的秒数\n 思路:\n 1、只考虑截断问卷完成时间较小的样本\n 2、找到完成时间变化的拐点,即需要截断的时间点\n 返回:r\n 建议截断<r的样本\n ' t_min = int(ftime.min()) t_cut = int(ftime.quantile(cut_percent)) x = np.array(range(t_min, t_cut)) y = np.array([len(ftime[(ftime <= i)]) for...
def data_auto_code(data): '智能判断问卷数据\n 输入\n data: 数据框,列名需要满足Qi或者Qi_\n 输出:\n code: 自动编码\n ' data = pd.DataFrame(data) columns = data.columns columns = [c for c in columns if re.match('Q\\d+', c)] code = {} for cc in columns: if ('_' not in cc): key = cc e...
7,994,973,602,825,367,000
智能判断问卷数据 输入 data: 数据框,列名需要满足Qi或者Qi_ 输出: code: 自动编码
reportgen/questionnaire/questionnaire.py
data_auto_code
brightgeng/reportgen
python
def data_auto_code(data): '智能判断问卷数据\n 输入\n data: 数据框,列名需要满足Qi或者Qi_\n 输出:\n code: 自动编码\n ' data = pd.DataFrame(data) columns = data.columns columns = [c for c in columns if re.match('Q\\d+', c)] code = {} for cc in columns: if ('_' not in cc): key = cc e...
def save_data(data, filename=u'data.xlsx', code=None): '保存问卷数据到本地\n 根据filename后缀选择相应的格式保存\n 如果有code,则保存按文本数据\n ' savetype = os.path.splitext(filename)[1][1:] data1 = data.copy() if code: for qq in code.keys(): qtype = code[qq]['qtype'] qlist = code[qq]['qlist'] ...
5,844,349,184,456,264,000
保存问卷数据到本地 根据filename后缀选择相应的格式保存 如果有code,则保存按文本数据
reportgen/questionnaire/questionnaire.py
save_data
brightgeng/reportgen
python
def save_data(data, filename=u'data.xlsx', code=None): '保存问卷数据到本地\n 根据filename后缀选择相应的格式保存\n 如果有code,则保存按文本数据\n ' savetype = os.path.splitext(filename)[1][1:] data1 = data.copy() if code: for qq in code.keys(): qtype = code[qq]['qtype'] qlist = code[qq]['qlist'] ...
def sa_to_ma(data): '单选题数据转换成多选题数据\n data是单选题数据, 要求非有效列别为nan\n 可以使用内置函数pd.get_dummies()代替\n ' if isinstance(data, pd.core.frame.DataFrame): data = data[data.columns[0]] categorys = data[data.notnull()].unique() try: categorys = sorted(categorys) except: pass data...
-8,025,656,272,193,248,000
单选题数据转换成多选题数据 data是单选题数据, 要求非有效列别为nan 可以使用内置函数pd.get_dummies()代替
reportgen/questionnaire/questionnaire.py
sa_to_ma
brightgeng/reportgen
python
def sa_to_ma(data): '单选题数据转换成多选题数据\n data是单选题数据, 要求非有效列别为nan\n 可以使用内置函数pd.get_dummies()代替\n ' if isinstance(data, pd.core.frame.DataFrame): data = data[data.columns[0]] categorys = data[data.notnull()].unique() try: categorys = sorted(categorys) except: pass data...
def to_dummpy(data, code, qqlist=None, qtype_new='多选题', ignore_open=True): '转化成哑变量\n 将数据中所有的单选题全部转化成哑变量,另外剔除掉开放题和填空题\n 返回一个很大的只有0和1的数据\n ' if (qqlist is None): qqlist = sorted(code, key=(lambda x: int(re.findall('\\d+', x)[0]))) bdata = pd.DataFrame() bcode = {} for qq in qqlist: ...
840,683,149,439,387,100
转化成哑变量 将数据中所有的单选题全部转化成哑变量,另外剔除掉开放题和填空题 返回一个很大的只有0和1的数据
reportgen/questionnaire/questionnaire.py
to_dummpy
brightgeng/reportgen
python
def to_dummpy(data, code, qqlist=None, qtype_new='多选题', ignore_open=True): '转化成哑变量\n 将数据中所有的单选题全部转化成哑变量,另外剔除掉开放题和填空题\n 返回一个很大的只有0和1的数据\n ' if (qqlist is None): qqlist = sorted(code, key=(lambda x: int(re.findall('\\d+', x)[0]))) bdata = pd.DataFrame() bcode = {} for qq in qqlist: ...
def qdata_flatten(data, code, quesid=None, userid_begin=None): '将问卷数据展平,字段如下\n userid: 用户ID\n quesid: 问卷ID\n qnum: 题号\n qname: 题目内容\n qtype: 题目类型\n samplelen:题目的样本数\n itemnum: 选项序号\n itemname: 选项内容\n code: 用户的选择\n codename: 用户选择的具体值\n count: 计数\n percent(%): 计数占比(百分比)\n ' ...
8,205,630,172,292,489,000
将问卷数据展平,字段如下 userid: 用户ID quesid: 问卷ID qnum: 题号 qname: 题目内容 qtype: 题目类型 samplelen:题目的样本数 itemnum: 选项序号 itemname: 选项内容 code: 用户的选择 codename: 用户选择的具体值 count: 计数 percent(%): 计数占比(百分比)
reportgen/questionnaire/questionnaire.py
qdata_flatten
brightgeng/reportgen
python
def qdata_flatten(data, code, quesid=None, userid_begin=None): '将问卷数据展平,字段如下\n userid: 用户ID\n quesid: 问卷ID\n qnum: 题号\n qname: 题目内容\n qtype: 题目类型\n samplelen:题目的样本数\n itemnum: 选项序号\n itemname: 选项内容\n code: 用户的选择\n codename: 用户选择的具体值\n count: 计数\n percent(%): 计数占比(百分比)\n ' ...
def sample_size_cal(interval, N, alpha=0.05): '调研样本量的计算\n 参考:https://www.surveysystem.com/sscalc.htm\n sample_size_cal(interval,N,alpha=0.05)\n 输入:\n interval: 误差范围,例如0.03\n N: 总体的大小,一般1万以上就没啥差别啦\n alpha:置信水平,默认95%\n ' import scipy.stats as stats p = stats.norm.ppf((1 - (alpha / 2))) ...
906,193,507,839,740,700
调研样本量的计算 参考:https://www.surveysystem.com/sscalc.htm sample_size_cal(interval,N,alpha=0.05) 输入: interval: 误差范围,例如0.03 N: 总体的大小,一般1万以上就没啥差别啦 alpha:置信水平,默认95%
reportgen/questionnaire/questionnaire.py
sample_size_cal
brightgeng/reportgen
python
def sample_size_cal(interval, N, alpha=0.05): '调研样本量的计算\n 参考:https://www.surveysystem.com/sscalc.htm\n sample_size_cal(interval,N,alpha=0.05)\n 输入:\n interval: 误差范围,例如0.03\n N: 总体的大小,一般1万以上就没啥差别啦\n alpha:置信水平,默认95%\n ' import scipy.stats as stats p = stats.norm.ppf((1 - (alpha / 2))) ...
def gof_test(fo, fe=None, alpha=0.05): '拟合优度检验\n 输入:\n fo:观察频数\n fe:期望频数,缺省为平均数\n 返回:\n 1: 样本与总体有差异\n 0:样本与总体无差异\n 例子:\n gof_test(np.array([0.3,0.4,0.3])*222)\n ' import scipy.stats as stats fo = np.array(fo).flatten() C = len(fo) if (not fe): N = fo.sum() ...
-1,421,774,208,672,722,700
拟合优度检验 输入: fo:观察频数 fe:期望频数,缺省为平均数 返回: 1: 样本与总体有差异 0:样本与总体无差异 例子: gof_test(np.array([0.3,0.4,0.3])*222)
reportgen/questionnaire/questionnaire.py
gof_test
brightgeng/reportgen
python
def gof_test(fo, fe=None, alpha=0.05): '拟合优度检验\n 输入:\n fo:观察频数\n fe:期望频数,缺省为平均数\n 返回:\n 1: 样本与总体有差异\n 0:样本与总体无差异\n 例子:\n gof_test(np.array([0.3,0.4,0.3])*222)\n ' import scipy.stats as stats fo = np.array(fo).flatten() C = len(fo) if (not fe): N = fo.sum() ...
def fisher_exact(fo, alpha=0.05): 'fisher_exact 显著性检验函数\n 此处采用的是调用R的解决方案,需要安装包 pyper\n python解决方案参见\n https://mrnoutahi.com/2016/01/03/Fisher-exac-test-for-mxn-table/\n 但还有些问题,所以没用.\n ' import pyper as pr r = pr.R(use_pandas=True, use_numpy=True) r.assign('fo', fo) r('b<-fisher.test(f...
8,313,948,059,721,519,000
fisher_exact 显著性检验函数 此处采用的是调用R的解决方案,需要安装包 pyper python解决方案参见 https://mrnoutahi.com/2016/01/03/Fisher-exac-test-for-mxn-table/ 但还有些问题,所以没用.
reportgen/questionnaire/questionnaire.py
fisher_exact
brightgeng/reportgen
python
def fisher_exact(fo, alpha=0.05): 'fisher_exact 显著性检验函数\n 此处采用的是调用R的解决方案,需要安装包 pyper\n python解决方案参见\n https://mrnoutahi.com/2016/01/03/Fisher-exac-test-for-mxn-table/\n 但还有些问题,所以没用.\n ' import pyper as pr r = pr.R(use_pandas=True, use_numpy=True) r.assign('fo', fo) r('b<-fisher.test(f...
def anova(data, formula): '方差分析\n 输入\n --data: DataFrame格式,包含数值型变量和分类型变量\n --formula:变量之间的关系,如:数值型变量~C(分类型变量1)[+C(分类型变量1)[+C(分类型变量1):(分类型变量1)]\n\n 返回[方差分析表]\n [总体的方差来源于组内方差和组间方差,通过比较组间方差和组内方差的比来推断两者的差异]\n --df:自由度\n --sum_sq:误差平方和\n --mean_sq:误差平方和/对应的自由度\n --F:mean_sq之比\n --PR(>F):p值,...
3,996,825,466,765,998,600
方差分析 输入 --data: DataFrame格式,包含数值型变量和分类型变量 --formula:变量之间的关系,如:数值型变量~C(分类型变量1)[+C(分类型变量1)[+C(分类型变量1):(分类型变量1)] 返回[方差分析表] [总体的方差来源于组内方差和组间方差,通过比较组间方差和组内方差的比来推断两者的差异] --df:自由度 --sum_sq:误差平方和 --mean_sq:误差平方和/对应的自由度 --F:mean_sq之比 --PR(>F):p值,比如<0.05则代表有显著性差异
reportgen/questionnaire/questionnaire.py
anova
brightgeng/reportgen
python
def anova(data, formula): '方差分析\n 输入\n --data: DataFrame格式,包含数值型变量和分类型变量\n --formula:变量之间的关系,如:数值型变量~C(分类型变量1)[+C(分类型变量1)[+C(分类型变量1):(分类型变量1)]\n\n 返回[方差分析表]\n [总体的方差来源于组内方差和组间方差,通过比较组间方差和组内方差的比来推断两者的差异]\n --df:自由度\n --sum_sq:误差平方和\n --mean_sq:误差平方和/对应的自由度\n --F:mean_sq之比\n --PR(>F):p值,...
def mca(X, N=2): "对应分析函数,暂时支持双因素\n X:观察频数表\n N:返回的维数,默认2维\n 可以通过scatter函数绘制:\n fig=scatter([pr,pc])\n fig.savefig('mca.png')\n " from scipy.linalg import diagsvd S = X.sum().sum() Z = (X / S) r = Z.sum(axis=1) c = Z.sum() D_r = np.diag((1 / np.sqrt(r))) Z_c = (Z - np.ou...
8,680,992,238,391,971,000
对应分析函数,暂时支持双因素 X:观察频数表 N:返回的维数,默认2维 可以通过scatter函数绘制: fig=scatter([pr,pc]) fig.savefig('mca.png')
reportgen/questionnaire/questionnaire.py
mca
brightgeng/reportgen
python
def mca(X, N=2): "对应分析函数,暂时支持双因素\n X:观察频数表\n N:返回的维数,默认2维\n 可以通过scatter函数绘制:\n fig=scatter([pr,pc])\n fig.savefig('mca.png')\n " from scipy.linalg import diagsvd S = X.sum().sum() Z = (X / S) r = Z.sum(axis=1) c = Z.sum() D_r = np.diag((1 / np.sqrt(r))) Z_c = (Z - np.ou...
def cluster(data, code, cluster_qq, n_clusters='auto', max_clusters=7): '对态度题进行聚类\n ' from sklearn.cluster import KMeans from sklearn import metrics qq_max = sorted(code, key=(lambda x: int(re.findall('\\d+', x)[0])))[(- 1)] new_cluster = 'Q{}'.format((int(re.findall('\\d+', qq_max)[0]) + 1)) ...
1,234,331,575,149,067,300
对态度题进行聚类
reportgen/questionnaire/questionnaire.py
cluster
brightgeng/reportgen
python
def cluster(data, code, cluster_qq, n_clusters='auto', max_clusters=7): '\n ' from sklearn.cluster import KMeans from sklearn import metrics qq_max = sorted(code, key=(lambda x: int(re.findall('\\d+', x)[0])))[(- 1)] new_cluster = 'Q{}'.format((int(re.findall('\\d+', qq_max)[0]) + 1)) qlist =...
def scatter(data, legend=False, title=None, font_ch=None, find_path=None): '\n 绘制带数据标签的散点图\n ' import matplotlib.font_manager as fm if (font_ch is None): fontlist = ['calibri.ttf', 'simfang.ttf', 'simkai.ttf', 'simhei.ttf', 'simsun.ttc', 'msyh.ttf', 'msyh.ttc'] myfont = '' if (...
-3,198,626,937,060,132,000
绘制带数据标签的散点图
reportgen/questionnaire/questionnaire.py
scatter
brightgeng/reportgen
python
def scatter(data, legend=False, title=None, font_ch=None, find_path=None): '\n \n ' import matplotlib.font_manager as fm if (font_ch is None): fontlist = ['calibri.ttf', 'simfang.ttf', 'simkai.ttf', 'simhei.ttf', 'simsun.ttc', 'msyh.ttf', 'msyh.ttc'] myfont = if (not find_path...
def sankey(df, filename=None): 'SanKey图绘制\n df的列是左节点,行是右节点\n 注:暂时没找到好的Python方法,所以只生成R语言所需数据\n 返回links 和 nodes\n # R code 参考\n library(networkD3)\n dd=read.csv(\'price_links.csv\')\n links<-data.frame(source=dd$from,target=dd$to,value=dd$value)\n nodes=read.csv(\'price_nodes.csv\',encoding = ...
-9,141,936,434,518,142,000
SanKey图绘制 df的列是左节点,行是右节点 注:暂时没找到好的Python方法,所以只生成R语言所需数据 返回links 和 nodes # R code 参考 library(networkD3) dd=read.csv('price_links.csv') links<-data.frame(source=dd$from,target=dd$to,value=dd$value) nodes=read.csv('price_nodes.csv',encoding = 'UTF-8') nodes<-nodes['name'] Energy=c(links=links,nodes=nodes) sankeyNetwork(Li...
reportgen/questionnaire/questionnaire.py
sankey
brightgeng/reportgen
python
def sankey(df, filename=None): 'SanKey图绘制\n df的列是左节点,行是右节点\n 注:暂时没找到好的Python方法,所以只生成R语言所需数据\n 返回links 和 nodes\n # R code 参考\n library(networkD3)\n dd=read.csv(\'price_links.csv\')\n links<-data.frame(source=dd$from,target=dd$to,value=dd$value)\n nodes=read.csv(\'price_nodes.csv\',encoding = ...
def table(data, code, total=True): "\n 单个题目描述统计\n code是data的编码,列数大于1\n 返回字典格式数据:\n 'fop':百分比, 对于单选题和为1,多选题分母为样本数\n 'fo': 观察频数表,其中添加了合计项\n 'fw': 加权频数表,可实现平均值、T2B等功能,仅当code中存在关键词'weight'时才有\n " qtype = code['qtype'] index = code['qlist'] data = pd.DataFrame(data) sample_len = dat...
-7,405,818,918,458,051,000
单个题目描述统计 code是data的编码,列数大于1 返回字典格式数据: 'fop':百分比, 对于单选题和为1,多选题分母为样本数 'fo': 观察频数表,其中添加了合计项 'fw': 加权频数表,可实现平均值、T2B等功能,仅当code中存在关键词'weight'时才有
reportgen/questionnaire/questionnaire.py
table
brightgeng/reportgen
python
def table(data, code, total=True): "\n 单个题目描述统计\n code是data的编码,列数大于1\n 返回字典格式数据:\n 'fop':百分比, 对于单选题和为1,多选题分母为样本数\n 'fo': 观察频数表,其中添加了合计项\n 'fw': 加权频数表,可实现平均值、T2B等功能,仅当code中存在关键词'weight'时才有\n " qtype = code['qtype'] index = code['qlist'] data = pd.DataFrame(data) sample_len = dat...
def crosstab(data_index, data_column, code_index=None, code_column=None, qtype=None, total=True): "适用于问卷数据的交叉统计\n 输入参数:\n data_index: 因变量,放在行中\n data_column:自变量,放在列中\n code_index: dict格式,指定data_index的编码等信息\n code_column: dict格式,指定data_column的编码等信息\n qtype: 给定两个数据的题目类型,若为字符串则给定data_index,若为列表,则给定两个...
-7,027,066,733,633,316,000
适用于问卷数据的交叉统计 输入参数: data_index: 因变量,放在行中 data_column:自变量,放在列中 code_index: dict格式,指定data_index的编码等信息 code_column: dict格式,指定data_column的编码等信息 qtype: 给定两个数据的题目类型,若为字符串则给定data_index,若为列表,则给定两个的 返回字典格式数据 'fop':默认的百分比表,行是data_index,列是data_column 'fo':原始频数表,且添加了总体项 'fw': 加权平均值 简要说明: 因为要处理各类题型,这里将单选题处理为多选题 fo:观察频数表 nij是同时选择了R...
reportgen/questionnaire/questionnaire.py
crosstab
brightgeng/reportgen
python
def crosstab(data_index, data_column, code_index=None, code_column=None, qtype=None, total=True): "适用于问卷数据的交叉统计\n 输入参数:\n data_index: 因变量,放在行中\n data_column:自变量,放在列中\n code_index: dict格式,指定data_index的编码等信息\n code_column: dict格式,指定data_column的编码等信息\n qtype: 给定两个数据的题目类型,若为字符串则给定data_index,若为列表,则给定两个...
def qtable(data, *args, **kwargs): "简易频数统计函数\n 输入\n data:数据框,可以是所有的数据\n code:数据编码\n q1: 题目序号\n q2: 题目序号\n # 单个变量的频数统计\n qtable(data,code,'Q1')\n # 两个变量的交叉统计\n qtable(data,code,'Q1','Q2')\n\n " code = None q1 = None q2 = None for a in args: if (isinstance(a, str)...
-5,886,488,093,965,830,000
简易频数统计函数 输入 data:数据框,可以是所有的数据 code:数据编码 q1: 题目序号 q2: 题目序号 # 单个变量的频数统计 qtable(data,code,'Q1') # 两个变量的交叉统计 qtable(data,code,'Q1','Q2')
reportgen/questionnaire/questionnaire.py
qtable
brightgeng/reportgen
python
def qtable(data, *args, **kwargs): "简易频数统计函数\n 输入\n data:数据框,可以是所有的数据\n code:数据编码\n q1: 题目序号\n q2: 题目序号\n # 单个变量的频数统计\n qtable(data,code,'Q1')\n # 两个变量的交叉统计\n qtable(data,code,'Q1','Q2')\n\n " code = None q1 = None q2 = None for a in args: if (isinstance(a, str)...
def association_rules(df, minSup=0.08, minConf=0.4, Y=None): '关联规则分析\n df: DataFrame,bool 类型。是一个类似购物篮数据 \n\n ' try: df = df.astype(bool) except: print('df 必须为 bool 类型') return (None, None, None) columns = np.array(df.columns) gen = associate.frequent_itemsets(np.array...
-8,448,162,522,908,191,000
关联规则分析 df: DataFrame,bool 类型。是一个类似购物篮数据
reportgen/questionnaire/questionnaire.py
association_rules
brightgeng/reportgen
python
def association_rules(df, minSup=0.08, minConf=0.4, Y=None): '关联规则分析\n df: DataFrame,bool 类型。是一个类似购物篮数据 \n\n ' try: df = df.astype(bool) except: print('df 必须为 bool 类型') return (None, None, None) columns = np.array(df.columns) gen = associate.frequent_itemsets(np.array...
def contingency(fo, alpha=0.05): " 列联表分析:(观察频数表分析)\n # 预增加一个各类别之间的距离\n 1、生成TGI指数、TWI指数、CHI指数\n 2、独立性检验\n 3、当两个变量不显著时,考虑单个之间的显著性\n 返回字典格式\n chi_test: 卡方检验结果,1:显著;0:不显著;-1:期望值不满足条件\n coef: 包含chi2、p值、V相关系数\n log: 记录一些异常情况\n FO: 观察频数\n FE: 期望频数\n TGI:fo/fe\n TWI:fo-fe\n CHI:sqrt((...
-9,149,343,796,111,427,000
列联表分析:(观察频数表分析) # 预增加一个各类别之间的距离 1、生成TGI指数、TWI指数、CHI指数 2、独立性检验 3、当两个变量不显著时,考虑单个之间的显著性 返回字典格式 chi_test: 卡方检验结果,1:显著;0:不显著;-1:期望值不满足条件 coef: 包含chi2、p值、V相关系数 log: 记录一些异常情况 FO: 观察频数 FE: 期望频数 TGI:fo/fe TWI:fo-fe CHI:sqrt((fo-fe)(fo/fe-1))*sign(fo-fe) significant:{ .'result': 显著性结果[1(显著),0(不显著),-1(fe小于5的过多)] .'pvalue': .'meth...
reportgen/questionnaire/questionnaire.py
contingency
brightgeng/reportgen
python
def contingency(fo, alpha=0.05): " 列联表分析:(观察频数表分析)\n # 预增加一个各类别之间的距离\n 1、生成TGI指数、TWI指数、CHI指数\n 2、独立性检验\n 3、当两个变量不显著时,考虑单个之间的显著性\n 返回字典格式\n chi_test: 卡方检验结果,1:显著;0:不显著;-1:期望值不满足条件\n coef: 包含chi2、p值、V相关系数\n log: 记录一些异常情况\n FO: 观察频数\n FE: 期望频数\n TGI:fo/fe\n TWI:fo-fe\n CHI:sqrt((...
def pre_cross_qlist(data, code): '自适应给出可以进行交叉分析的变量和相应选项\n 满足以下条件的将一键交叉分析:\n 1、单选题\n 2、如果选项是文本,则平均长度应小于10\n ...\n 返回:\n cross_qlist: [[题目序号,变量选项],]\n ' cross_qlist = [] for qq in code: qtype = code[qq]['qtype'] qlist = code[qq]['qlist'] content = code[qq]['content...
8,495,738,686,203,361,000
自适应给出可以进行交叉分析的变量和相应选项 满足以下条件的将一键交叉分析: 1、单选题 2、如果选项是文本,则平均长度应小于10 ... 返回: cross_qlist: [[题目序号,变量选项],]
reportgen/questionnaire/questionnaire.py
pre_cross_qlist
brightgeng/reportgen
python
def pre_cross_qlist(data, code): '自适应给出可以进行交叉分析的变量和相应选项\n 满足以下条件的将一键交叉分析:\n 1、单选题\n 2、如果选项是文本,则平均长度应小于10\n ...\n 返回:\n cross_qlist: [[题目序号,变量选项],]\n ' cross_qlist = [] for qq in code: qtype = code[qq]['qtype'] qlist = code[qq]['qlist'] content = code[qq]['content...
def cross_chart(data, code, cross_class, filename=u'交叉分析报告', cross_qlist=None, delclass=None, plt_dstyle=None, cross_order=None, reverse_display=False, total_display=True, max_column_chart=20, save_dstyle=None, template=None): "使用帮助\n data: 问卷数据,包含交叉变量和所有的因变量\n code: 数据编码\n cross_class: 交叉变量,单选题或者多选题,例如:Q1...
-7,219,620,354,715,659,000
使用帮助 data: 问卷数据,包含交叉变量和所有的因变量 code: 数据编码 cross_class: 交叉变量,单选题或者多选题,例如:Q1 filename:文件名,用于PPT和保存相关数据 cross_list: 需要进行交叉分析的变量,缺省为code中的所有变量 delclass: 交叉变量中需要删除的单个变量,缺省空 plt_dstyle: 绘制图表需要用的数据类型,默认为百分比表,可以选择['TGI'、'CHI'、'TWI']等 save_dstyle: 需要保存的数据类型,格式为列表。 cross_order: 交叉变量中各个类别的顺序,可以缺少 total_display: PPT绘制图表中是否显示总体情况 ma...
reportgen/questionnaire/questionnaire.py
cross_chart
brightgeng/reportgen
python
def cross_chart(data, code, cross_class, filename=u'交叉分析报告', cross_qlist=None, delclass=None, plt_dstyle=None, cross_order=None, reverse_display=False, total_display=True, max_column_chart=20, save_dstyle=None, template=None): "使用帮助\n data: 问卷数据,包含交叉变量和所有的因变量\n code: 数据编码\n cross_class: 交叉变量,单选题或者多选题,例如:Q1...
def onekey_gen(data, code, filename=u'reprotgen 报告自动生成', template=None): '一键生成所有可能需要的报告\n 包括\n 描述统计报告\n 单选题的交叉分析报告\n ' try: summary_chart(data, code, filename=filename, template=template) except: print('整体报告生成过程中出现错误,将跳过..') pass print(('已生成 ' + filename)) cross_q...
-4,211,264,381,394,803,000
一键生成所有可能需要的报告 包括 描述统计报告 单选题的交叉分析报告
reportgen/questionnaire/questionnaire.py
onekey_gen
brightgeng/reportgen
python
def onekey_gen(data, code, filename=u'reprotgen 报告自动生成', template=None): '一键生成所有可能需要的报告\n 包括\n 描述统计报告\n 单选题的交叉分析报告\n ' try: summary_chart(data, code, filename=filename, template=template) except: print('整体报告生成过程中出现错误,将跳过..') pass print(('已生成 ' + filename)) cross_q...
def scorpion(data, code, filename='scorpion'): '天蝎X计划\n 返回一个excel文件\n 1、索引\n 2、各个题目的频数表\n 3、所有可能的交叉分析\n ' if (not os.path.exists('.\\out')): os.mkdir('.\\out') Writer = pd.ExcelWriter((('.\\out\\' + filename) + '.xlsx')) try: qqlist = list(sorted(code, key=(lambda c: int(r...
-6,475,010,946,498,080,000
天蝎X计划 返回一个excel文件 1、索引 2、各个题目的频数表 3、所有可能的交叉分析
reportgen/questionnaire/questionnaire.py
scorpion
brightgeng/reportgen
python
def scorpion(data, code, filename='scorpion'): '天蝎X计划\n 返回一个excel文件\n 1、索引\n 2、各个题目的频数表\n 3、所有可能的交叉分析\n ' if (not os.path.exists('.\\out')): os.mkdir('.\\out') Writer = pd.ExcelWriter((('.\\out\\' + filename) + '.xlsx')) try: qqlist = list(sorted(code, key=(lambda c: int(r...
def timer_callback(self): ' Calculate Mx1, My1, ...... Mx6, My6 ' if (self.t == 0): self.Phix1 = 0 self.Phiy1 = 0 self.Phix3 = 0 self.Phiy3 = 0 self.t += 1 Mx1 = (self.x3 - self.x1) My1 = (self.y3 - self.y1) Mx3 = (self.x1 - self.x3) My3 = (self.y1 - self....
-1,594,632,542,043,036,000
Calculate Mx1, My1, ...... Mx6, My6
Real Topology Graph/GNN Model 2/Cyclic Graph/test_n2_robot3.py
timer_callback
HusseinLezzaik/Consensus-Algorithm-for-2-Mobile-Robots
python
def timer_callback(self): ' ' if (self.t == 0): self.Phix1 = 0 self.Phiy1 = 0 self.Phix3 = 0 self.Phiy3 = 0 self.t += 1 Mx1 = (self.x3 - self.x1) My1 = (self.y3 - self.y1) Mx3 = (self.x1 - self.x3) My3 = (self.y1 - self.y3) ' Use MLP to Predict contro...
def get_samplesheet(self): 'Return path of an annotation samplesheet.' files_path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', 'files')) samplesheet_name = 'annotation_spreadsheet.xlsm' return os.path.join(files_path, samplesheet_name)
-6,796,047,194,800,837,000
Return path of an annotation samplesheet.
resdk/tests/functional/data_upload/e2e_upload.py
get_samplesheet
tristanbrown/resolwe-bio-py
python
def get_samplesheet(self): files_path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', 'files')) samplesheet_name = 'annotation_spreadsheet.xlsm' return os.path.join(files_path, samplesheet_name)
def r2(self): 'Calculate R2 for either the train model or the test model' sse_sst = (self.sse() / self.sst()) return (1 - sse_sst)
9,113,696,639,111,318,000
Calculate R2 for either the train model or the test model
metrics/__init__.py
r2
nathan-bennett/skellam
python
def r2(self): sse_sst = (self.sse() / self.sst()) return (1 - sse_sst)
def adjusted_r2(self): 'Calculate adjusted R2 for either the train model or the test model' r2 = self.r2() return (1 - (((1 - r2) * (self.train_length - 1)) / ((self.train_length - self.coeff_size) - 1)))
-1,238,627,807,371,270,000
Calculate adjusted R2 for either the train model or the test model
metrics/__init__.py
adjusted_r2
nathan-bennett/skellam
python
def adjusted_r2(self): r2 = self.r2() return (1 - (((1 - r2) * (self.train_length - 1)) / ((self.train_length - self.coeff_size) - 1)))
def log_likelihood(self): 'Returns the maximum of the log likelihood function' return self.max_ll
-512,140,616,830,212,860
Returns the maximum of the log likelihood function
metrics/__init__.py
log_likelihood
nathan-bennett/skellam
python
def log_likelihood(self): return self.max_ll
def _calculate_lambda(self): 'Create arrays for our predictions of the two Poisson distributions\n ' _lambda0 = ArrayUtils.convert_to_array(np.exp(np.squeeze((self._x0 @ self.lambda_0_coefficients)))) _lambda1 = ArrayUtils.convert_to_array(np.exp(np.squeeze((self._x1 @ self.lambda_1_coefficients)))) ...
1,492,520,975,505,939,200
Create arrays for our predictions of the two Poisson distributions
metrics/__init__.py
_calculate_lambda
nathan-bennett/skellam
python
def _calculate_lambda(self): '\n ' _lambda0 = ArrayUtils.convert_to_array(np.exp(np.squeeze((self._x0 @ self.lambda_0_coefficients)))) _lambda1 = ArrayUtils.convert_to_array(np.exp(np.squeeze((self._x1 @ self.lambda_1_coefficients)))) return (_lambda0, _lambda1)
def _calculate_v(self): 'Create diagonal matrix consisting of our predictions of the Poisson distributions\n ' (_lambda0, _lambda1) = self._calculate_lambda() _v0 = np.diagflat(_lambda0) _v1 = np.diagflat(_lambda1) return (_v0, _v1)
-1,923,027,407,107,608,300
Create diagonal matrix consisting of our predictions of the Poisson distributions
metrics/__init__.py
_calculate_v
nathan-bennett/skellam
python
def _calculate_v(self): '\n ' (_lambda0, _lambda1) = self._calculate_lambda() _v0 = np.diagflat(_lambda0) _v1 = np.diagflat(_lambda1) return (_v0, _v1)
def _calculate_w(self): 'Create a diagonal matrix consisting of the difference between our predictions of the 2 Poisson distributions\n with their observed values\n ' (_lambda0, _lambda1) = self._calculate_lambda() _w0 = np.diagflat(((self.l0 - _lambda0.reshape((- 1), 1)) ** 2)) _w1 = np.d...
-9,050,369,303,904,403,000
Create a diagonal matrix consisting of the difference between our predictions of the 2 Poisson distributions with their observed values
metrics/__init__.py
_calculate_w
nathan-bennett/skellam
python
def _calculate_w(self): 'Create a diagonal matrix consisting of the difference between our predictions of the 2 Poisson distributions\n with their observed values\n ' (_lambda0, _lambda1) = self._calculate_lambda() _w0 = np.diagflat(((self.l0 - _lambda0.reshape((- 1), 1)) ** 2)) _w1 = np.d...
def _calculate_robust_covariance(self): 'Calculate robust variance covariance matrices for our two sets of coefficients\n ' (_v0, _v1) = self._calculate_v() (_w0, _w1) = self._calculate_w() _robust_cov0 = ((np.linalg.inv(np.dot(np.dot(self._x0.T, _v0), self._x0)) * np.dot(np.dot(self._x0.T, _w0),...
-5,582,450,034,621,296,000
Calculate robust variance covariance matrices for our two sets of coefficients
metrics/__init__.py
_calculate_robust_covariance
nathan-bennett/skellam
python
def _calculate_robust_covariance(self): '\n ' (_v0, _v1) = self._calculate_v() (_w0, _w1) = self._calculate_w() _robust_cov0 = ((np.linalg.inv(np.dot(np.dot(self._x0.T, _v0), self._x0)) * np.dot(np.dot(self._x0.T, _w0), self._x0)) * np.linalg.inv(np.dot(np.dot(self._x0.T, _v0), self._x0))) _r...
def _calculate_robust_standard_errors(self): 'Calculate robust standard errors for our two sets of coefficients by taking the square root of the diagonal\n values in the variance covariance matrices\n ' (_robust_cov0, _robust_cov1) = self._calculate_robust_covariance() _std_error0 = np.sqrt(np...
-3,722,754,812,175,301,600
Calculate robust standard errors for our two sets of coefficients by taking the square root of the diagonal values in the variance covariance matrices
metrics/__init__.py
_calculate_robust_standard_errors
nathan-bennett/skellam
python
def _calculate_robust_standard_errors(self): 'Calculate robust standard errors for our two sets of coefficients by taking the square root of the diagonal\n values in the variance covariance matrices\n ' (_robust_cov0, _robust_cov1) = self._calculate_robust_covariance() _std_error0 = np.sqrt(np...