added stringdate 2024-11-18 17:59:49 2024-11-19 03:44:43 | created int64 126B 2,086B | id stringlengths 40 40 | int_score int64 2 5 | metadata dict | score float64 2.31 5.41 | source stringclasses 1
value | text stringlengths 259 26.9k | num_lines int64 16 649 | avg_line_length float64 15 61 | max_line_length int64 31 179 | ast_depth int64 8 40 | length int64 101 3.8k | lang stringclasses 1
value | sast_semgrep_findings stringlengths 1.17k 878k | sast_semgrep_findings_count int64 1 629 | sast_semgrep_success bool 1
class | sast_semgrep_error stringclasses 1
value | cwe_ids listlengths 1 561 | rule_ids listlengths 1 561 | subcategories listlengths 1 561 | confidences listlengths 1 561 | severities listlengths 1 561 | line_starts listlengths 1 561 | line_ends listlengths 1 561 | column_starts listlengths 1 561 | column_ends listlengths 1 561 | owasp_categories listlengths 1 561 | messages listlengths 1 561 | cvss_scores listlengths 1 561 | likelihoods listlengths 1 561 | impacts listlengths 1 561 | filename stringlengths 4 105 | path stringlengths 5 372 | repo_name stringlengths 5 115 | license stringclasses 452
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2024-11-18T18:05:43.388874+00:00 | 1,567,036,262,000 | 912041d1cee909d29a9b46991268ac30470556ff | 3 | {
"blob_id": "912041d1cee909d29a9b46991268ac30470556ff",
"branch_name": "refs/heads/master",
"committer_date": 1567036262000,
"content_id": "c6d2536833fc35c3e025cd9d5cea56298f7fe360",
"detected_licenses": [
"BSD-3-Clause"
],
"directory_id": "ca5fc43049f94a794d90a561fd8126f02b603599",
"extension": "py",
"filename": "alias.py",
"fork_events_count": 0,
"gha_created_at": 1469110078000,
"gha_event_created_at": 1527068726000,
"gha_language": "Python",
"gha_license_id": "BSD-3-Clause",
"github_id": 63874745,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2632,
"license": "BSD-3-Clause",
"license_type": "permissive",
"path": "/i3py/core/features/alias.py",
"provenance": "stack-edu-0054.json.gz:568753",
"repo_name": "Exopy/i3py",
"revision_date": 1567036262000,
"revision_id": "6f004d3e2ee2b788fb4693606cc4092147655ce1",
"snapshot_id": "32d9ee343d21d275680a2d030b660a80960e99ac",
"src_encoding": "UTF-8",
"star_events_count": 1,
"url": "https://raw.githubusercontent.com/Exopy/i3py/6f004d3e2ee2b788fb4693606cc4092147655ce1/i3py/core/features/alias.py",
"visit_date": "2022-02-18T21:51:16.423188"
} | 2.5625 | stackv2 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2016-2017 by I3py Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Feature whose value is mapped to another Feature.
"""
from types import MethodType
from typing import Any, Dict, Callable
from ..abstracts import AbstractHasFeatures
from .feature import Feature, get_chain, set_chain
GET_DEF =\
"""def get(self, driver):
return {}
"""
SET_DEF =\
"""def set(self, driver, value):
{} = value
"""
class Alias(Feature):
"""Feature whose value is mapped to another Feature.
Parameters
----------
alias : str
Path to the feature to which the alias refers to. The path should be
dot separated and use leading dots to access to parent features.
settable: bool, optional
Boolean indicating if the alias can be used to set the value of the
aliased feature.
"""
def __init__(self, alias: str, settable: bool=False) -> None:
super(Alias, self).__init__(True, settable if settable else None)
accessor = 'driver.' + '.'.join([p if p else 'parent'
for p in alias.split('.')])
defs = GET_DEF.format(accessor)
if settable:
defs += '\n' + SET_DEF.format(accessor)
loc: Dict[str, Callable] = {}
exec(defs, globals(), loc)
self.get = MethodType(loc['get'], self) # type: ignore
if settable:
self.set = MethodType(loc['set'], self) # type: ignore
def post_set(self, driver: AbstractHasFeatures, value: Any, i_value: Any,
response: Any):
"""Re-implemented here as an Alias does not need to do anything
by default.
"""
pass
# =========================================================================
# --- Private API ---------------------------------------------------------
# =========================================================================
def _get(self, driver: AbstractHasFeatures):
"""Re-implemented so that Alias never use the cache.
"""
with driver.lock:
return get_chain(self, driver)
def _set(self, driver: AbstractHasFeatures, value: Any):
"""Re-implemented so that Alias never uses the cache.
"""
with driver.lock:
set_chain(self, driver, value)
| 90 | 28.24 | 79 | 16 | 535 | python | [{"finding_id": "semgrep_rules.python.lang.security.audit.exec-detected_715a30efc2342539_ee5be2eb", "tool_name": "semgrep", "rule_id": "rules.python.lang.security.audit.exec-detected", "finding_type": "security", "severity": "medium", "confidence": "low", "message": "Detected the use of exec(). exec() can be dangerous if used to evaluate dynamic content. If this content can be input from outside the program, this may be a code injection vulnerability. Ensure evaluated content is not definable by external sources.", "remediation": "", "location": {"file_path": "unknown", "line_start": 60, "line_end": 60, "column_start": 9, "column_end": 35, "code_snippet": "requires login"}, "cwe_id": "CWE-95: Improper Neutralization of Directives in Dynamically Evaluated Code ('Eval Injection')", "cwe_name": null, "cvss_score": 5.0, "cvss_vector": null, "owasp_category": "A03:2021 - Injection", "references": [{"url": "https://owasp.org/Top10/A03_2021-Injection", "title": null}], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.security.audit.exec-detected", "path": "/tmp/tmpb8jm_z1l/715a30efc2342539.py", "start": {"line": 60, "col": 9, "offset": 1581}, "end": {"line": 60, "col": 35, "offset": 1607}, "extra": {"message": "Detected the use of exec(). exec() can be dangerous if used to evaluate dynamic content. If this content can be input from outside the program, this may be a code injection vulnerability. Ensure evaluated content is not definable by external sources.", "metadata": {"source-rule-url": "https://bandit.readthedocs.io/en/latest/plugins/b102_exec_used.html", "cwe": ["CWE-95: Improper Neutralization of Directives in Dynamically Evaluated Code ('Eval Injection')"], "owasp": ["A03:2021 - Injection", "A05:2025 - Injection"], "asvs": {"control_id": "5.2.4 Dyanmic Code Execution Features", "control_url": "https://github.com/OWASP/ASVS/blob/master/4.0/en/0x13-V5-Validation-Sanitization-Encoding.md#v52-sanitization-and-sandboxing-requirements", "section": "V5: Validation, Sanitization and Encoding Verification Requirements", "version": "4"}, "category": "security", "technology": ["python"], "references": ["https://owasp.org/Top10/A03_2021-Injection"], "subcategory": ["audit"], "likelihood": "LOW", "impact": "HIGH", "confidence": "LOW"}, "severity": "WARNING", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}] | 1 | true | [
"CWE-95"
] | [
"rules.python.lang.security.audit.exec-detected"
] | [
"security"
] | [
"LOW"
] | [
"MEDIUM"
] | [
60
] | [
60
] | [
9
] | [
35
] | [
"A03:2021 - Injection"
] | [
"Detected the use of exec(). exec() can be dangerous if used to evaluate dynamic content. If this content can be input from outside the program, this may be a code injection vulnerability. Ensure evaluated content is not definable by external sources."
] | [
5
] | [
"LOW"
] | [
"HIGH"
] | alias.py | /i3py/core/features/alias.py | Exopy/i3py | BSD-3-Clause | |
2024-11-18T18:05:43.895243+00:00 | 1,619,674,139,000 | c814a34864a4158acf4e1abbbb583376f3efad66 | 3 | {
"blob_id": "c814a34864a4158acf4e1abbbb583376f3efad66",
"branch_name": "refs/heads/master",
"committer_date": 1619674139000,
"content_id": "8b45c9393b96c8f7482a7754a3078e8eb2a7bbc4",
"detected_licenses": [
"MIT"
],
"directory_id": "4790aa158050f52b2f0fa990467774c5276b4bc8",
"extension": "py",
"filename": "utils_math.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": null,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6388,
"license": "MIT",
"license_type": "permissive",
"path": "/utils/utils_math.py",
"provenance": "stack-edu-0054.json.gz:568759",
"repo_name": "yff12345/bsc_lcs",
"revision_date": 1619674139000,
"revision_id": "4076fd40656efee3365f10d4e2fd7e7d88d524a4",
"snapshot_id": "6024a6dd0ed82e35dd2acd8def1b9d480c84d28e",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/yff12345/bsc_lcs/4076fd40656efee3365f10d4e2fd7e7d88d524a4/utils/utils_math.py",
"visit_date": "2023-04-12T13:47:02.626502"
} | 2.640625 | stackv2 | import numpy as np
import torch
import torch.nn.functional as F
def sample_gumbel(shape, eps=1e-10):
"""
NOTE: Stolen from https://github.com/YongfeiYan/Gumbel_Softmax_VAE/blob/master/gumbel_softmax_vae.py
Sample from Gumbel(0, 1)
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,
(MIT license)
"""
U = torch.rand(shape).float()
return -torch.log(eps - torch.log(U + eps))
def gumbel_softmax_sample(logits, temp=1, eps=1e-10, dim=-1):
"""
NOTE: Stolen from https://github.com/YongfeiYan/Gumbel_Softmax_VAE/blob/master/gumbel_softmax_vae.py
Draw a sample from the Gumbel-Softmax distribution
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb
(MIT license)
"""
gumbel_noise = sample_gumbel(logits.size(), eps=eps)
if logits.is_cuda:
gumbel_noise = gumbel_noise.cuda()
y = logits + gumbel_noise
return F.softmax(y / temp, dim=dim)
def gumbel_softmax(logits, temp=1, hard=False, eps=1e-10, dim=-1):
"""
NOTE: Stolen from https://github.com/YongfeiYan/Gumbel_Softmax_VAE/blob/master/gumbel_softmax_vae.py
Added dimension selection feature.
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,
(MIT license)
"""
y_soft = gumbel_softmax_sample(logits, temp=temp, eps=eps, dim=dim)
if hard:
shape = logits.size()
_, idx = y_soft.max(dim=dim, keepdim=True)
# this bit is based on
# https://discuss.pytorch.org/t/stop-gradients-for-st-gumbel-softmax/530/5
y_hard = torch.zeros_like(y_soft)
if y_soft.is_cuda:
y_hard = y_hard.cuda()
y_hard = y_hard.zero_().scatter_(dim, idx, 1.0)
y = (y_hard - y_soft).detach() + y_soft
else:
y = y_soft
return y
def threshold_sampling(logits, threshold=0.5, hard=False):
"""
Omit Gumbel sampling for deterministic sampling.
"""
y_soft = torch.sigmoid(logits)
y_hard = y_soft.ge(threshold).to(y_soft.device, dtype=torch.float32)
y = (y_hard - y_soft).detach() + y_soft
return y
def threshold_sampling_v2(logits, threshold=0.5, hard=False):
"""
Omit Gumbel sampling for deterministic sampling.
V2 different: no sigmoid in sampling function (sigmoid is applied at logit function)
"""
# y_soft = torch.sigmoid(logits)
y_soft = logits
y_hard = y_soft.ge(threshold).to(y_soft.device, dtype=torch.float32)
y = (y_hard - y_soft).detach() + y_soft
return y
def binary_accuracy(output, labels):
preds = output > 0.5
correct = preds.type_as(labels).eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def encode_onehot(labels):
classes = set(labels)
classes_dict = {
c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)
}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
def get_triu_indices(num_nodes):
"""Linear triu (upper triangular) indices."""
ones = torch.ones(num_nodes, num_nodes)
eye = torch.eye(num_nodes, num_nodes)
triu_indices = (ones.triu() - eye).nonzero().t()
triu_indices = triu_indices[0] * num_nodes + triu_indices[1]
return triu_indices
def get_tril_indices(num_nodes):
"""Linear tril (lower triangular) indices."""
ones = torch.ones(num_nodes, num_nodes)
eye = torch.eye(num_nodes, num_nodes)
tril_indices = (ones.tril() - eye).nonzero().t()
tril_indices = tril_indices[0] * num_nodes + tril_indices[1]
return tril_indices
def get_offdiag_indices(num_nodes):
"""Linear off-diagonal indices."""
ones = torch.ones(num_nodes, num_nodes)
eye = torch.eye(num_nodes, num_nodes)
offdiag_indices = (ones - eye).nonzero().t()
offdiag_indices = offdiag_indices[0] * num_nodes + offdiag_indices[1]
return offdiag_indices
def get_triu_offdiag_indices(num_nodes):
"""Linear triu (upper) indices w.r.t. vector of off-diagonal elements."""
triu_idx = torch.zeros(num_nodes * num_nodes)
triu_idx[get_triu_indices(num_nodes)] = 1.
triu_idx = triu_idx[get_offdiag_indices(num_nodes)]
return triu_idx.nonzero()
def get_tril_offdiag_indices(num_nodes):
"""Linear tril (lower) indices w.r.t. vector of off-diagonal elements."""
tril_idx = torch.zeros(num_nodes * num_nodes)
tril_idx[get_tril_indices(num_nodes)] = 1.
tril_idx = tril_idx[get_offdiag_indices(num_nodes)]
return tril_idx.nonzero()
def mat_to_offdiag(inputs, num_atoms, num_edge_types):
off_diag_idx = np.ravel_multi_index(
np.where(np.ones((num_atoms, num_atoms)) - np.eye(num_atoms)),
[num_atoms, num_atoms]).astype(np.int32)
num_edges = (num_atoms * num_atoms) - num_atoms
if not inputs.is_contiguous():
inputs = inputs.contiguous()
inputs = inputs.view(-1, num_edge_types, num_atoms * num_atoms)
inputs = torch.transpose(inputs, 2, 1)
off_diag_idx = torch.LongTensor(off_diag_idx)
if inputs.is_cuda:
off_diag_idx = off_diag_idx.cuda()
mat_offdiag = torch.index_select(inputs, 1, off_diag_idx).contiguous()
return mat_offdiag
def offdiag_to_mat(inputs, num_nodes):
off_diag_idx = np.ravel_multi_index(
np.where(np.ones((num_nodes, num_nodes)) - np.eye(num_nodes)),
[num_nodes, num_nodes]).astype(np.int32)
batch_size = inputs.size(0)
edge_types = inputs.size(2)
output = torch.zeros((batch_size, num_nodes * num_nodes, edge_types))
if inputs.is_cuda:
output = output.cuda()
output[:, off_diag_idx, :] = inputs
output = output.view(batch_size, num_nodes, num_nodes, edge_types)
return output
def sample_graph(logits, args):
if args.deterministic_sampling:
edges = threshold_sampling(logits, threshold=args.threshold)
else:
edges = gumbel_softmax(logits, temp=args.temp, hard=args.hard)
return edges
def sample_graph_v2(logits, args):
if args.deterministic_sampling:
edges = threshold_sampling_v2(logits, threshold=args.threshold)
else:
edges = gumbel_softmax(logits, temp=args.temp, hard=args.hard)
return edges
| 201 | 30.78 | 118 | 16 | 1,799 | python | [{"finding_id": "semgrep_rules.python.lang.maintainability.is-function-without-parentheses_a2b042502ae2f5b4_d50af3ea", "tool_name": "semgrep", "rule_id": "rules.python.lang.maintainability.is-function-without-parentheses", "finding_type": "maintainability", "severity": "medium", "confidence": "medium", "message": "Is \"is_cuda\" a function or an attribute? If it is a function, you may have meant logits.is_cuda() because logits.is_cuda is always true.", "remediation": "", "location": {"file_path": "unknown", "line_start": 31, "line_end": 31, "column_start": 8, "column_end": 22, "code_snippet": "requires login"}, "cwe_id": null, "cwe_name": null, "cvss_score": 5.0, "cvss_vector": null, "owasp_category": null, "references": [], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.maintainability.is-function-without-parentheses", "path": "/tmp/tmpb8jm_z1l/a2b042502ae2f5b4.py", "start": {"line": 31, "col": 8, "offset": 941}, "end": {"line": 31, "col": 22, "offset": 955}, "extra": {"message": "Is \"is_cuda\" a function or an attribute? If it is a function, you may have meant logits.is_cuda() because logits.is_cuda is always true.", "metadata": {"category": "maintainability", "technology": ["python"]}, "severity": "WARNING", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}, {"finding_id": "semgrep_rules.python.lang.maintainability.is-function-without-parentheses_a2b042502ae2f5b4_8835219f", "tool_name": "semgrep", "rule_id": "rules.python.lang.maintainability.is-function-without-parentheses", "finding_type": "maintainability", "severity": "medium", "confidence": "medium", "message": "Is \"is_cuda\" a function or an attribute? If it is a function, you may have meant y_soft.is_cuda() because y_soft.is_cuda is always true.", "remediation": "", "location": {"file_path": "unknown", "line_start": 55, "line_end": 55, "column_start": 12, "column_end": 26, "code_snippet": "requires login"}, "cwe_id": null, "cwe_name": null, "cvss_score": 5.0, "cvss_vector": null, "owasp_category": null, "references": [], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.maintainability.is-function-without-parentheses", "path": "/tmp/tmpb8jm_z1l/a2b042502ae2f5b4.py", "start": {"line": 55, "col": 12, "offset": 1785}, "end": {"line": 55, "col": 26, "offset": 1799}, "extra": {"message": "Is \"is_cuda\" a function or an attribute? If it is a function, you may have meant y_soft.is_cuda() because y_soft.is_cuda is always true.", "metadata": {"category": "maintainability", "technology": ["python"]}, "severity": "WARNING", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}, {"finding_id": "semgrep_rules.python.lang.maintainability.is-function-without-parentheses_a2b042502ae2f5b4_22890543", "tool_name": "semgrep", "rule_id": "rules.python.lang.maintainability.is-function-without-parentheses", "finding_type": "maintainability", "severity": "medium", "confidence": "medium", "message": "Is \"is_cuda\" a function or an attribute? If it is a function, you may have meant inputs.is_cuda() because inputs.is_cuda is always true.", "remediation": "", "location": {"file_path": "unknown", "line_start": 162, "line_end": 162, "column_start": 8, "column_end": 22, "code_snippet": "requires login"}, "cwe_id": null, "cwe_name": null, "cvss_score": 5.0, "cvss_vector": null, "owasp_category": null, "references": [], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.maintainability.is-function-without-parentheses", "path": "/tmp/tmpb8jm_z1l/a2b042502ae2f5b4.py", "start": {"line": 162, "col": 8, "offset": 5222}, "end": {"line": 162, "col": 22, "offset": 5236}, "extra": {"message": "Is \"is_cuda\" a function or an attribute? If it is a function, you may have meant inputs.is_cuda() because inputs.is_cuda is always true.", "metadata": {"category": "maintainability", "technology": ["python"]}, "severity": "WARNING", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}, {"finding_id": "semgrep_rules.python.lang.maintainability.is-function-without-parentheses_a2b042502ae2f5b4_3e9f1477", "tool_name": "semgrep", "rule_id": "rules.python.lang.maintainability.is-function-without-parentheses", "finding_type": "maintainability", "severity": "medium", "confidence": "medium", "message": "Is \"is_cuda\" a function or an attribute? If it is a function, you may have meant inputs.is_cuda() because inputs.is_cuda is always true.", "remediation": "", "location": {"file_path": "unknown", "line_start": 177, "line_end": 177, "column_start": 8, "column_end": 22, "code_snippet": "requires login"}, "cwe_id": null, "cwe_name": null, "cvss_score": 5.0, "cvss_vector": null, "owasp_category": null, "references": [], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.maintainability.is-function-without-parentheses", "path": "/tmp/tmpb8jm_z1l/a2b042502ae2f5b4.py", "start": {"line": 177, "col": 8, "offset": 5728}, "end": {"line": 177, "col": 22, "offset": 5742}, "extra": {"message": "Is \"is_cuda\" a function or an attribute? If it is a function, you may have meant inputs.is_cuda() because inputs.is_cuda is always true.", "metadata": {"category": "maintainability", "technology": ["python"]}, "severity": "WARNING", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}] | 4 | true | [
"",
"",
"",
""
] | [
"rules.python.lang.maintainability.is-function-without-parentheses",
"rules.python.lang.maintainability.is-function-without-parentheses",
"rules.python.lang.maintainability.is-function-without-parentheses",
"rules.python.lang.maintainability.is-function-without-parentheses"
] | [
"maintainability",
"maintainability",
"maintainability",
"maintainability"
] | [
"MEDIUM",
"MEDIUM",
"MEDIUM",
"MEDIUM"
] | [
"MEDIUM",
"MEDIUM",
"MEDIUM",
"MEDIUM"
] | [
31,
55,
162,
177
] | [
31,
55,
162,
177
] | [
8,
12,
8,
8
] | [
22,
26,
22,
22
] | [
"",
"",
"",
""
] | [
"Is \"is_cuda\" a function or an attribute? If it is a function, you may have meant logits.is_cuda() because logits.is_cuda is always true.",
"Is \"is_cuda\" a function or an attribute? If it is a function, you may have meant y_soft.is_cuda() because y_soft.is_cuda is always true.",
"Is \"is_cuda\" a function o... | [
5,
5,
5,
5
] | [
"",
"",
"",
""
] | [
"",
"",
"",
""
] | utils_math.py | /utils/utils_math.py | yff12345/bsc_lcs | MIT | |
2024-11-18T18:05:44.308384+00:00 | 1,578,981,575,000 | 8d0411d1a3851550c8d731400213ff48f1c3b0c7 | 2 | {
"blob_id": "8d0411d1a3851550c8d731400213ff48f1c3b0c7",
"branch_name": "refs/heads/master",
"committer_date": 1578981575000,
"content_id": "e86b6be15675ecfb59b4c74485893d83221a5f43",
"detected_licenses": [
"MIT"
],
"directory_id": "8142e588999503979e1cd9d1989cdd43dc9417d6",
"extension": "py",
"filename": "losses.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": null,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11390,
"license": "MIT",
"license_type": "permissive",
"path": "/evkit/utils/losses.py",
"provenance": "stack-edu-0054.json.gz:568764",
"repo_name": "lilujunai/side-tuning",
"revision_date": 1578981575000,
"revision_id": "dea345691fb7ee0230150fe56ddd644efdffa6ac",
"snapshot_id": "07a0e2015fcb7f3699f749fb233b95ceba449277",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/lilujunai/side-tuning/dea345691fb7ee0230150fe56ddd644efdffa6ac/evkit/utils/losses.py",
"visit_date": "2021-02-26T23:52:13.468928"
} | 2.390625 | stackv2 | from evkit.models.taskonomy_network import TaskonomyDecoder
from tlkit.utils import SINGLE_IMAGE_TASKS, TASKS_TO_CHANNELS, FEED_FORWARD_TASKS
import torch
import torch.nn.functional as F
def softmax_cross_entropy(inputs, target, weight=None, cache={}, size_average=None, ignore_index=-100,
reduce=None, reduction='mean'):
cache['predictions'] = inputs
cache['labels'] = target
if len(target.shape) == 2: # unsqueeze one-hot representation
target = torch.argmax(target, dim=1)
loss = F.cross_entropy(inputs, target, weight)
# when working with 2D data, cannot use spatial weight mask, it becomes categorical/class
return {'total': loss, 'xentropy': loss}
def heteroscedastic_normal(mean_and_scales, target, weight=None, cache={}, eps=1e-2):
mu, scales = mean_and_scales
loss = (mu - target)**2 / (scales**2 + eps) + torch.log(scales**2 + eps)
# return torch.sum(weight * loss) / torch.sum(weight) if weight is not None else loss.mean()
loss = torch.mean(weight * loss) / weight.mean() if weight is not None else loss.mean()
return {'total': loss, 'nll': loss}
def heteroscedastic_double_exponential(mean_and_scales, target, weight=None, cache={}, eps=5e-2):
mu, scales = mean_and_scales
loss = torch.abs(mu - target) / (scales + eps) + torch.log(2.0 * (scales + eps))
loss = torch.mean(weight * loss) / weight.mean() if weight is not None else loss.mean()
return {'total': loss, 'nll': loss}
def weighted_mse_loss(inputs, target, weight=None, cache={}):
losses = {}
cache['predictions'] = inputs
cache['labels'] = target
if weight is not None:
# sq = (inputs - target) ** 2
# weightsq = torch.sum(weight * sq)
loss = torch.mean(weight * (inputs - target) ** 2)/torch.mean(weight)
else:
loss = F.mse_loss(inputs, target)
return {'total': loss, 'mse': loss}
weighted_l2_loss = weighted_mse_loss
def weighted_l1_loss(inputs, target, weight=None, cache={}):
target = target.float()
if weight is not None:
loss = torch.mean(weight * torch.abs(inputs - target))/torch.mean(weight)
else:
loss = F.l1_loss(inputs, target)
return {'total': loss, 'l1': loss}
def perceptual_l1_loss(decoder_path, bake_decodings):
task = [t for t in SINGLE_IMAGE_TASKS if t in decoder_path][0]
decoder = TaskonomyDecoder(TASKS_TO_CHANNELS[task], feed_forward=task in FEED_FORWARD_TASKS)
checkpoint = torch.load(decoder_path)
decoder.load_state_dict(checkpoint['state_dict'])
decoder.cuda()
decoder.eval()
print(f'Loaded decoder from {decoder_path} for perceptual loss')
def runner(inputs, target, weight=None, cache={}):
# the last arguments are so we can 'cache' and pass the decodings outside
inputs_decoded = decoder(inputs)
targets_decoded = target if bake_decodings else decoder(target)
cache['predictions'] = inputs_decoded
cache['labels'] = targets_decoded
if weight is not None:
loss = torch.mean(weight * torch.abs(inputs_decoded - targets_decoded))/torch.mean(weight)
else:
loss = F.l1_loss(inputs_decoded, targets_decoded)
return {'total': loss, 'perceptual_l1': loss}
return runner
def perceptual_l2_loss(decoder_path, bake_decodings):
task = [t for t in SINGLE_IMAGE_TASKS if t in decoder_path][0]
decoder = TaskonomyDecoder(TASKS_TO_CHANNELS[task], feed_forward=task in FEED_FORWARD_TASKS)
checkpoint = torch.load(decoder_path)
decoder.load_state_dict(checkpoint['state_dict'])
decoder.cuda()
decoder.eval()
print(f'Loaded decoder from {decoder_path} for perceptual loss')
def runner(inputs, target, weight=None, cache={}):
# the last arguments are so we can 'cache' and pass the decodings outside
inputs_decoded = decoder(inputs)
targets_decoded = target if bake_decodings else decoder(target)
cache['predictions'] = inputs_decoded
cache['labels'] = targets_decoded
if weight is not None:
loss = torch.mean(weight * (inputs_decoded - targets_decoded) ** 2)/torch.mean(weight)
else:
loss = F.mse_loss(inputs_decoded, targets_decoded)
return {'total': loss, 'perceptual_mse': loss}
return runner
def dense_softmax_cross_entropy_loss(inputs, targets, cache={}): # these should be logits (batch_size, n_class)
batch_size, _ = targets.shape
losses = {}
losses['final'] = -1. * torch.sum(torch.softmax(targets.float(), dim=1) * F.log_softmax(inputs.float(), dim=1)) / batch_size
losses['standard'] = losses['final']
return losses
def dense_cross_entropy_loss_(inputs, targets): # these should be logits (batch_size, n_class)
batch_size, _ = targets.shape
return -1. * torch.sum(targets * F.log_softmax(inputs, dim=1)) / batch_size
# def dense_softmax_cross_entropy(inputs, targets, weight=None, cache={}):
# assert weight == None
# cache['predictions'] = inputs
# cache['labels'] = targets
# # print(targets.shape)
# batch_size, _ = targets.shape
# loss = -1. * torch.sum(torch.softmax(targets, dim=1) * F.log_softmax(inputs, dim=1)) / batch_size
# loss = F.mse_loss(inputs, targets.detach())
# return {'total': loss, 'xentropy': loss}
def dense_softmax_cross_entropy(inputs, targets, weight=None, cache={}):
assert weight is None
cache['predictions'] = inputs
cache['labels'] = targets
batch_size, _ = targets.shape
loss = -1. * torch.sum(torch.softmax(targets.detach(), dim=1) * F.log_softmax(inputs, dim=1)) / batch_size
# loss = F.mse_loss(inputs, targets.detach())
return {'total': loss, 'xentropy': loss}
def dense_cross_entropy(inputs, targets, weight=None, cache={}):
assert weight == None
cache['predictions'] = inputs
cache['labels'] = targets
batch_size, _ = targets.shape
loss = -1. * torch.sum(targets.detach() * F.log_softmax(inputs, dim=1)) / batch_size
# loss = F.mse_loss(inputs, targets.detach())
return {'total': loss, 'xentropy': loss}
def perceptual_cross_entropy_loss(decoder_path, bake_decodings):
task = [t for t in SINGLE_IMAGE_TASKS if t in decoder_path][0]
decoder = TaskonomyDecoder(TASKS_TO_CHANNELS[task], feed_forward=task in FEED_FORWARD_TASKS)
checkpoint = torch.load(decoder_path)
decoder.load_state_dict(checkpoint['state_dict'])
decoder.cuda()
decoder.eval()
print(f'Loaded decoder from {decoder_path} for perceptual loss')
def runner(inputs, target, weight=None, cache={}):
# the last arguments are so we can 'cache' and pass the decodings outside
inputs_decoded = decoder(inputs)
targets_decoded = target if bake_decodings else decoder(target)
cache['predictions'] = inputs_decoded
cache['labels'] = targets_decoded
return dense_softmax_cross_entropy_loss_(inputs_decoded, targets_decoded)
return runner
def identity_regularizer(loss_fn, model):
def runner(inputs, target, weight=None, cache={}):
losses = loss_fn(inputs, target, weight, cache)
return losses
return runner
def transfer_regularizer(loss_fn, model, reg_loss_fn='F.l1_loss', coef=1e-3):
def runner(inputs, target, weight=None, cache={}):
orig_losses = loss_fn(inputs, target, weight, cache)
#if isinstance(model, PolicyWithBase):
if type(model).__name__ == "PolicyWithBase":
# Imitation Learning - retreive encodings via the cache
assert 'base_encoding' in cache and 'transfered_encoding' in cache, f'cache is missing keys {cache.keys()}'
regularization_loss = 0
for base_encoding, transfered_encoding in zip(cache['base_encoding'], cache['transfered_encoding']):
regularization_loss += eval(reg_loss_fn)(model.base.perception_unit.sidetuner.net.transfer_network(base_encoding), transfered_encoding)
else:
# Vision Transfers - retreive encodings directly from model attributes
# (cannot do this for IL due to the FrameStacked being iterative)
assert isinstance(model.side_output, torch.Tensor), 'Cannot regularize side network if it is not used'
regularization_loss = eval(reg_loss_fn)(model.transfer_network(model.base_encoding), model.transfered_encoding)
orig_losses.update({
'total': orig_losses['total'] + coef * regularization_loss,
'weight_tying': regularization_loss,
})
return orig_losses
return runner
def perceptual_regularizer(loss_fn, model, coef=1e-3, decoder_path=None, use_transfer=True, reg_loss_fn='F.mse_loss'):
# compares model.base_encoding E(x) and model.transfered_encoding T(E(x) + S(x))
# use_transfer means we will compare exactly above
# use_transfer=False means we will compare model.base_encoding E(x) and model.merged_encoding E(x) + S(x)
# Recall, decoder requires unnormalized inputs!
assert decoder_path is not None, 'Pass in a decoder to which to transform our parameters and regularize on'
task = [t for t in SINGLE_IMAGE_TASKS if t in decoder_path][0]
decoder = TaskonomyDecoder(TASKS_TO_CHANNELS[task], feed_forward=task in FEED_FORWARD_TASKS)
checkpoint = torch.load(decoder_path)
decoder.load_state_dict(checkpoint['state_dict'])
decoder.cuda()
decoder.eval()
if task in FEED_FORWARD_TASKS:
reg_loss_fn = "dense_softmax_cross_entropy_loss_"
else:
reg_loss_fn = "F.l1_loss"
print(f'Loaded decoder from {decoder_path} for perceptual loss')
def runner(inputs, target, weight=None, cache={}):
orig_losses = loss_fn(inputs, target, weight, cache)
if type(model).__name__ == "PolicyWithBase":
# Imitation Learning - retreive encodings via the cache
assert 'base_encoding' in cache, f'cache is missing base {cache.keys()}'
if use_transfer:
assert 'transfered_encoding' in cache, f'cache is missing tied {cache.keys()}'
tied_encodings = cache['transfered_encoding']
else:
assert 'merged_encoding' in cache, f'cache is missing tied{cache.keys()}'
tied_encodings = cache['merged_encoding']
regularization_loss = 0
for base_encoding, tied_encoding in zip(cache['base_encoding'], tied_encodings):
regularization_loss += eval(reg_loss_fn)(decoder(base_encoding), decoder(tied_encoding))
else:
# Vision Transfers - retreive encodings directly from model attributes
# (cannot do this for IL due to the FrameStacked being iterative)
assert isinstance(model.side_output, torch.Tensor), 'Cannot regularize side network if it is not used'
if use_transfer:
tied_encoding = model.transfered_encoding
else:
tied_encoding = model.merged_encoding
losses['weight_tying'] = eval(reg_loss_fn)(decoder(model.base_encoding), decoder(tied_encoding))
regularization_loss = reg_loss_fn(decoder(model.base_encoding), decoder(tied_encoding))
orig_losses.update({
'total': orig_losses['total'] + coef * regularization_loss,
'weight_tying': regularization_loss,
})
return orig_losses
return runner
| 235 | 47.47 | 151 | 20 | 2,680 | python | [{"finding_id": "semgrep_rules.python.lang.correctness.common-mistakes.default-mutable-dict_4f869c82d83d165c_241b7cca", "tool_name": "semgrep", "rule_id": "rules.python.lang.correctness.common-mistakes.default-mutable-dict", "finding_type": "correctness", "severity": "high", "confidence": "medium", "message": "Function softmax_cross_entropy mutates default dict cache. Python only instantiates default function arguments once and shares the instance across the function calls. If the default function argument is mutated, that will modify the instance used by all future function calls. This can cause unexpected results, or lead to security vulnerabilities whereby one function consumer can view or modify the data of another function consumer. Instead, use a default argument (like None) to indicate that no argument was provided and instantiate a new dictionary at that time. For example: `if cache is None: cache = {}`.", "remediation": "", "location": {"file_path": "unknown", "line_start": 8, "line_end": 8, "column_start": 5, "column_end": 34, "code_snippet": "requires login"}, "cwe_id": null, "cwe_name": null, "cvss_score": 7.5, "cvss_vector": null, "owasp_category": null, "references": [{"url": "https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments", "title": null}], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.correctness.common-mistakes.default-mutable-dict", "path": "/tmp/tmpb8jm_z1l/4f869c82d83d165c.py", "start": {"line": 8, "col": 5, "offset": 345}, "end": {"line": 8, "col": 34, "offset": 374}, "extra": {"message": "Function softmax_cross_entropy mutates default dict cache. Python only instantiates default function arguments once and shares the instance across the function calls. If the default function argument is mutated, that will modify the instance used by all future function calls. This can cause unexpected results, or lead to security vulnerabilities whereby one function consumer can view or modify the data of another function consumer. Instead, use a default argument (like None) to indicate that no argument was provided and instantiate a new dictionary at that time. For example: `if cache is None: cache = {}`.", "metadata": {"category": "correctness", "technology": ["python"], "references": ["https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments"]}, "severity": "ERROR", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}, {"finding_id": "semgrep_rules.python.lang.correctness.common-mistakes.default-mutable-dict_4f869c82d83d165c_6770f5a1", "tool_name": "semgrep", "rule_id": "rules.python.lang.correctness.common-mistakes.default-mutable-dict", "finding_type": "correctness", "severity": "high", "confidence": "medium", "message": "Function softmax_cross_entropy mutates default dict cache. Python only instantiates default function arguments once and shares the instance across the function calls. If the default function argument is mutated, that will modify the instance used by all future function calls. This can cause unexpected results, or lead to security vulnerabilities whereby one function consumer can view or modify the data of another function consumer. Instead, use a default argument (like None) to indicate that no argument was provided and instantiate a new dictionary at that time. For example: `if cache is None: cache = {}`.", "remediation": "", "location": {"file_path": "unknown", "line_start": 9, "line_end": 9, "column_start": 5, "column_end": 29, "code_snippet": "requires login"}, "cwe_id": null, "cwe_name": null, "cvss_score": 7.5, "cvss_vector": null, "owasp_category": null, "references": [{"url": "https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments", "title": null}], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.correctness.common-mistakes.default-mutable-dict", "path": "/tmp/tmpb8jm_z1l/4f869c82d83d165c.py", "start": {"line": 9, "col": 5, "offset": 379}, "end": {"line": 9, "col": 29, "offset": 403}, "extra": {"message": "Function softmax_cross_entropy mutates default dict cache. Python only instantiates default function arguments once and shares the instance across the function calls. If the default function argument is mutated, that will modify the instance used by all future function calls. This can cause unexpected results, or lead to security vulnerabilities whereby one function consumer can view or modify the data of another function consumer. Instead, use a default argument (like None) to indicate that no argument was provided and instantiate a new dictionary at that time. For example: `if cache is None: cache = {}`.", "metadata": {"category": "correctness", "technology": ["python"], "references": ["https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments"]}, "severity": "ERROR", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}, {"finding_id": "semgrep_rules.python.lang.correctness.common-mistakes.default-mutable-dict_4f869c82d83d165c_a749fb1e", "tool_name": "semgrep", "rule_id": "rules.python.lang.correctness.common-mistakes.default-mutable-dict", "finding_type": "correctness", "severity": "high", "confidence": "medium", "message": "Function weighted_mse_loss mutates default dict cache. Python only instantiates default function arguments once and shares the instance across the function calls. If the default function argument is mutated, that will modify the instance used by all future function calls. This can cause unexpected results, or lead to security vulnerabilities whereby one function consumer can view or modify the data of another function consumer. Instead, use a default argument (like None) to indicate that no argument was provided and instantiate a new dictionary at that time. For example: `if cache is None: cache = {}`.", "remediation": "", "location": {"file_path": "unknown", "line_start": 31, "line_end": 31, "column_start": 5, "column_end": 34, "code_snippet": "requires login"}, "cwe_id": null, "cwe_name": null, "cvss_score": 7.5, "cvss_vector": null, "owasp_category": null, "references": [{"url": "https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments", "title": null}], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.correctness.common-mistakes.default-mutable-dict", "path": "/tmp/tmpb8jm_z1l/4f869c82d83d165c.py", "start": {"line": 31, "col": 5, "offset": 1565}, "end": {"line": 31, "col": 34, "offset": 1594}, "extra": {"message": "Function weighted_mse_loss mutates default dict cache. Python only instantiates default function arguments once and shares the instance across the function calls. If the default function argument is mutated, that will modify the instance used by all future function calls. This can cause unexpected results, or lead to security vulnerabilities whereby one function consumer can view or modify the data of another function consumer. Instead, use a default argument (like None) to indicate that no argument was provided and instantiate a new dictionary at that time. For example: `if cache is None: cache = {}`.", "metadata": {"category": "correctness", "technology": ["python"], "references": ["https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments"]}, "severity": "ERROR", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}, {"finding_id": "semgrep_rules.python.lang.correctness.common-mistakes.default-mutable-dict_4f869c82d83d165c_c082ff16", "tool_name": "semgrep", "rule_id": "rules.python.lang.correctness.common-mistakes.default-mutable-dict", "finding_type": "correctness", "severity": "high", "confidence": "medium", "message": "Function weighted_mse_loss mutates default dict cache. Python only instantiates default function arguments once and shares the instance across the function calls. If the default function argument is mutated, that will modify the instance used by all future function calls. This can cause unexpected results, or lead to security vulnerabilities whereby one function consumer can view or modify the data of another function consumer. Instead, use a default argument (like None) to indicate that no argument was provided and instantiate a new dictionary at that time. For example: `if cache is None: cache = {}`.", "remediation": "", "location": {"file_path": "unknown", "line_start": 32, "line_end": 32, "column_start": 5, "column_end": 29, "code_snippet": "requires login"}, "cwe_id": null, "cwe_name": null, "cvss_score": 7.5, "cvss_vector": null, "owasp_category": null, "references": [{"url": "https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments", "title": null}], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.correctness.common-mistakes.default-mutable-dict", "path": "/tmp/tmpb8jm_z1l/4f869c82d83d165c.py", "start": {"line": 32, "col": 5, "offset": 1599}, "end": {"line": 32, "col": 29, "offset": 1623}, "extra": {"message": "Function weighted_mse_loss mutates default dict cache. Python only instantiates default function arguments once and shares the instance across the function calls. If the default function argument is mutated, that will modify the instance used by all future function calls. This can cause unexpected results, or lead to security vulnerabilities whereby one function consumer can view or modify the data of another function consumer. Instead, use a default argument (like None) to indicate that no argument was provided and instantiate a new dictionary at that time. For example: `if cache is None: cache = {}`.", "metadata": {"category": "correctness", "technology": ["python"], "references": ["https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments"]}, "severity": "ERROR", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}, {"finding_id": "semgrep_rules.python.lang.correctness.common-mistakes.default-mutable-dict_4f869c82d83d165c_f5889148", "tool_name": "semgrep", "rule_id": "rules.python.lang.correctness.common-mistakes.default-mutable-dict", "finding_type": "correctness", "severity": "high", "confidence": "medium", "message": "Function dense_softmax_cross_entropy mutates default dict cache. Python only instantiates default function arguments once and shares the instance across the function calls. If the default function argument is mutated, that will modify the instance used by all future function calls. This can cause unexpected results, or lead to security vulnerabilities whereby one function consumer can view or modify the data of another function consumer. Instead, use a default argument (like None) to indicate that no argument was provided and instantiate a new dictionary at that time. For example: `if cache is None: cache = {}`.", "remediation": "", "location": {"file_path": "unknown", "line_start": 118, "line_end": 118, "column_start": 5, "column_end": 34, "code_snippet": "requires login"}, "cwe_id": null, "cwe_name": null, "cvss_score": 7.5, "cvss_vector": null, "owasp_category": null, "references": [{"url": "https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments", "title": null}], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.correctness.common-mistakes.default-mutable-dict", "path": "/tmp/tmpb8jm_z1l/4f869c82d83d165c.py", "start": {"line": 118, "col": 5, "offset": 5423}, "end": {"line": 118, "col": 34, "offset": 5452}, "extra": {"message": "Function dense_softmax_cross_entropy mutates default dict cache. Python only instantiates default function arguments once and shares the instance across the function calls. If the default function argument is mutated, that will modify the instance used by all future function calls. This can cause unexpected results, or lead to security vulnerabilities whereby one function consumer can view or modify the data of another function consumer. Instead, use a default argument (like None) to indicate that no argument was provided and instantiate a new dictionary at that time. For example: `if cache is None: cache = {}`.", "metadata": {"category": "correctness", "technology": ["python"], "references": ["https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments"]}, "severity": "ERROR", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}, {"finding_id": "semgrep_rules.python.lang.correctness.common-mistakes.default-mutable-dict_4f869c82d83d165c_61add4d3", "tool_name": "semgrep", "rule_id": "rules.python.lang.correctness.common-mistakes.default-mutable-dict", "finding_type": "correctness", "severity": "high", "confidence": "medium", "message": "Function dense_softmax_cross_entropy mutates default dict cache. Python only instantiates default function arguments once and shares the instance across the function calls. If the default function argument is mutated, that will modify the instance used by all future function calls. This can cause unexpected results, or lead to security vulnerabilities whereby one function consumer can view or modify the data of another function consumer. Instead, use a default argument (like None) to indicate that no argument was provided and instantiate a new dictionary at that time. For example: `if cache is None: cache = {}`.", "remediation": "", "location": {"file_path": "unknown", "line_start": 119, "line_end": 119, "column_start": 5, "column_end": 30, "code_snippet": "requires login"}, "cwe_id": null, "cwe_name": null, "cvss_score": 7.5, "cvss_vector": null, "owasp_category": null, "references": [{"url": "https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments", "title": null}], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.correctness.common-mistakes.default-mutable-dict", "path": "/tmp/tmpb8jm_z1l/4f869c82d83d165c.py", "start": {"line": 119, "col": 5, "offset": 5457}, "end": {"line": 119, "col": 30, "offset": 5482}, "extra": {"message": "Function dense_softmax_cross_entropy mutates default dict cache. Python only instantiates default function arguments once and shares the instance across the function calls. If the default function argument is mutated, that will modify the instance used by all future function calls. This can cause unexpected results, or lead to security vulnerabilities whereby one function consumer can view or modify the data of another function consumer. Instead, use a default argument (like None) to indicate that no argument was provided and instantiate a new dictionary at that time. For example: `if cache is None: cache = {}`.", "metadata": {"category": "correctness", "technology": ["python"], "references": ["https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments"]}, "severity": "ERROR", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}, {"finding_id": "semgrep_rules.python.lang.correctness.common-mistakes.default-mutable-dict_4f869c82d83d165c_00faca8a", "tool_name": "semgrep", "rule_id": "rules.python.lang.correctness.common-mistakes.default-mutable-dict", "finding_type": "correctness", "severity": "high", "confidence": "medium", "message": "Function dense_cross_entropy mutates default dict cache. Python only instantiates default function arguments once and shares the instance across the function calls. If the default function argument is mutated, that will modify the instance used by all future function calls. This can cause unexpected results, or lead to security vulnerabilities whereby one function consumer can view or modify the data of another function consumer. Instead, use a default argument (like None) to indicate that no argument was provided and instantiate a new dictionary at that time. For example: `if cache is None: cache = {}`.", "remediation": "", "location": {"file_path": "unknown", "line_start": 128, "line_end": 128, "column_start": 5, "column_end": 34, "code_snippet": "requires login"}, "cwe_id": null, "cwe_name": null, "cvss_score": 7.5, "cvss_vector": null, "owasp_category": null, "references": [{"url": "https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments", "title": null}], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.correctness.common-mistakes.default-mutable-dict", "path": "/tmp/tmpb8jm_z1l/4f869c82d83d165c.py", "start": {"line": 128, "col": 5, "offset": 5822}, "end": {"line": 128, "col": 34, "offset": 5851}, "extra": {"message": "Function dense_cross_entropy mutates default dict cache. Python only instantiates default function arguments once and shares the instance across the function calls. If the default function argument is mutated, that will modify the instance used by all future function calls. This can cause unexpected results, or lead to security vulnerabilities whereby one function consumer can view or modify the data of another function consumer. Instead, use a default argument (like None) to indicate that no argument was provided and instantiate a new dictionary at that time. For example: `if cache is None: cache = {}`.", "metadata": {"category": "correctness", "technology": ["python"], "references": ["https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments"]}, "severity": "ERROR", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}, {"finding_id": "semgrep_rules.python.lang.correctness.common-mistakes.default-mutable-dict_4f869c82d83d165c_7e76ab99", "tool_name": "semgrep", "rule_id": "rules.python.lang.correctness.common-mistakes.default-mutable-dict", "finding_type": "correctness", "severity": "high", "confidence": "medium", "message": "Function dense_cross_entropy mutates default dict cache. Python only instantiates default function arguments once and shares the instance across the function calls. If the default function argument is mutated, that will modify the instance used by all future function calls. This can cause unexpected results, or lead to security vulnerabilities whereby one function consumer can view or modify the data of another function consumer. Instead, use a default argument (like None) to indicate that no argument was provided and instantiate a new dictionary at that time. For example: `if cache is None: cache = {}`.", "remediation": "", "location": {"file_path": "unknown", "line_start": 129, "line_end": 129, "column_start": 5, "column_end": 30, "code_snippet": "requires login"}, "cwe_id": null, "cwe_name": null, "cvss_score": 7.5, "cvss_vector": null, "owasp_category": null, "references": [{"url": "https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments", "title": null}], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.correctness.common-mistakes.default-mutable-dict", "path": "/tmp/tmpb8jm_z1l/4f869c82d83d165c.py", "start": {"line": 129, "col": 5, "offset": 5856}, "end": {"line": 129, "col": 30, "offset": 5881}, "extra": {"message": "Function dense_cross_entropy mutates default dict cache. Python only instantiates default function arguments once and shares the instance across the function calls. If the default function argument is mutated, that will modify the instance used by all future function calls. This can cause unexpected results, or lead to security vulnerabilities whereby one function consumer can view or modify the data of another function consumer. Instead, use a default argument (like None) to indicate that no argument was provided and instantiate a new dictionary at that time. For example: `if cache is None: cache = {}`.", "metadata": {"category": "correctness", "technology": ["python"], "references": ["https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments"]}, "severity": "ERROR", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}, {"finding_id": "semgrep_rules.python.lang.security.audit.eval-detected_4f869c82d83d165c_c54f253b", "tool_name": "semgrep", "rule_id": "rules.python.lang.security.audit.eval-detected", "finding_type": "security", "severity": "medium", "confidence": "low", "message": "Detected the use of eval(). eval() can be dangerous if used to evaluate dynamic content. If this content can be input from outside the program, this may be a code injection vulnerability. Ensure evaluated content is not definable by external sources.", "remediation": "", "location": {"file_path": "unknown", "line_start": 169, "line_end": 169, "column_start": 40, "column_end": 57, "code_snippet": "requires login"}, "cwe_id": "CWE-95: Improper Neutralization of Directives in Dynamically Evaluated Code ('Eval Injection')", "cwe_name": null, "cvss_score": 5.0, "cvss_vector": null, "owasp_category": "A03:2021 - Injection", "references": [{"url": "https://owasp.org/Top10/A03_2021-Injection", "title": null}], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.security.audit.eval-detected", "path": "/tmp/tmpb8jm_z1l/4f869c82d83d165c.py", "start": {"line": 169, "col": 40, "offset": 7840}, "end": {"line": 169, "col": 57, "offset": 7857}, "extra": {"message": "Detected the use of eval(). eval() can be dangerous if used to evaluate dynamic content. If this content can be input from outside the program, this may be a code injection vulnerability. Ensure evaluated content is not definable by external sources.", "metadata": {"source-rule-url": "https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b307-eval", "cwe": ["CWE-95: Improper Neutralization of Directives in Dynamically Evaluated Code ('Eval Injection')"], "owasp": ["A03:2021 - Injection", "A05:2025 - Injection"], "asvs": {"control_id": "5.2.4 Dyanmic Code Execution Features", "control_url": "https://github.com/OWASP/ASVS/blob/master/4.0/en/0x13-V5-Validation-Sanitization-Encoding.md#v52-sanitization-and-sandboxing-requirements", "section": "V5: Validation, Sanitization and Encoding Verification Requirements", "version": "4"}, "category": "security", "technology": ["python"], "references": ["https://owasp.org/Top10/A03_2021-Injection"], "subcategory": ["audit"], "likelihood": "LOW", "impact": "HIGH", "confidence": "LOW"}, "severity": "WARNING", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}, {"finding_id": "semgrep_rules.python.lang.security.audit.eval-detected_4f869c82d83d165c_80dc9d3e", "tool_name": "semgrep", "rule_id": "rules.python.lang.security.audit.eval-detected", "finding_type": "security", "severity": "medium", "confidence": "low", "message": "Detected the use of eval(). eval() can be dangerous if used to evaluate dynamic content. If this content can be input from outside the program, this may be a code injection vulnerability. Ensure evaluated content is not definable by external sources.", "remediation": "", "location": {"file_path": "unknown", "line_start": 174, "line_end": 174, "column_start": 35, "column_end": 52, "code_snippet": "requires login"}, "cwe_id": "CWE-95: Improper Neutralization of Directives in Dynamically Evaluated Code ('Eval Injection')", "cwe_name": null, "cvss_score": 5.0, "cvss_vector": null, "owasp_category": "A03:2021 - Injection", "references": [{"url": "https://owasp.org/Top10/A03_2021-Injection", "title": null}], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.security.audit.eval-detected", "path": "/tmp/tmpb8jm_z1l/4f869c82d83d165c.py", "start": {"line": 174, "col": 35, "offset": 8277}, "end": {"line": 174, "col": 52, "offset": 8294}, "extra": {"message": "Detected the use of eval(). eval() can be dangerous if used to evaluate dynamic content. If this content can be input from outside the program, this may be a code injection vulnerability. Ensure evaluated content is not definable by external sources.", "metadata": {"source-rule-url": "https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b307-eval", "cwe": ["CWE-95: Improper Neutralization of Directives in Dynamically Evaluated Code ('Eval Injection')"], "owasp": ["A03:2021 - Injection", "A05:2025 - Injection"], "asvs": {"control_id": "5.2.4 Dyanmic Code Execution Features", "control_url": "https://github.com/OWASP/ASVS/blob/master/4.0/en/0x13-V5-Validation-Sanitization-Encoding.md#v52-sanitization-and-sandboxing-requirements", "section": "V5: Validation, Sanitization and Encoding Verification Requirements", "version": "4"}, "category": "security", "technology": ["python"], "references": ["https://owasp.org/Top10/A03_2021-Injection"], "subcategory": ["audit"], "likelihood": "LOW", "impact": "HIGH", "confidence": "LOW"}, "severity": "WARNING", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}, {"finding_id": "semgrep_rules.python.lang.security.audit.eval-detected_4f869c82d83d165c_98c2aa39", "tool_name": "semgrep", "rule_id": "rules.python.lang.security.audit.eval-detected", "finding_type": "security", "severity": "medium", "confidence": "low", "message": "Detected the use of eval(). eval() can be dangerous if used to evaluate dynamic content. If this content can be input from outside the program, this may be a code injection vulnerability. Ensure evaluated content is not definable by external sources.", "remediation": "", "location": {"file_path": "unknown", "line_start": 217, "line_end": 217, "column_start": 40, "column_end": 57, "code_snippet": "requires login"}, "cwe_id": "CWE-95: Improper Neutralization of Directives in Dynamically Evaluated Code ('Eval Injection')", "cwe_name": null, "cvss_score": 5.0, "cvss_vector": null, "owasp_category": "A03:2021 - Injection", "references": [{"url": "https://owasp.org/Top10/A03_2021-Injection", "title": null}], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.security.audit.eval-detected", "path": "/tmp/tmpb8jm_z1l/4f869c82d83d165c.py", "start": {"line": 217, "col": 40, "offset": 10458}, "end": {"line": 217, "col": 57, "offset": 10475}, "extra": {"message": "Detected the use of eval(). eval() can be dangerous if used to evaluate dynamic content. If this content can be input from outside the program, this may be a code injection vulnerability. Ensure evaluated content is not definable by external sources.", "metadata": {"source-rule-url": "https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b307-eval", "cwe": ["CWE-95: Improper Neutralization of Directives in Dynamically Evaluated Code ('Eval Injection')"], "owasp": ["A03:2021 - Injection", "A05:2025 - Injection"], "asvs": {"control_id": "5.2.4 Dyanmic Code Execution Features", "control_url": "https://github.com/OWASP/ASVS/blob/master/4.0/en/0x13-V5-Validation-Sanitization-Encoding.md#v52-sanitization-and-sandboxing-requirements", "section": "V5: Validation, Sanitization and Encoding Verification Requirements", "version": "4"}, "category": "security", "technology": ["python"], "references": ["https://owasp.org/Top10/A03_2021-Injection"], "subcategory": ["audit"], "likelihood": "LOW", "impact": "HIGH", "confidence": "LOW"}, "severity": "WARNING", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}, {"finding_id": "semgrep_rules.python.lang.security.audit.eval-detected_4f869c82d83d165c_aef43c00", "tool_name": "semgrep", "rule_id": "rules.python.lang.security.audit.eval-detected", "finding_type": "security", "severity": "medium", "confidence": "low", "message": "Detected the use of eval(). eval() can be dangerous if used to evaluate dynamic content. If this content can be input from outside the program, this may be a code injection vulnerability. Ensure evaluated content is not definable by external sources.", "remediation": "", "location": {"file_path": "unknown", "line_start": 226, "line_end": 226, "column_start": 38, "column_end": 55, "code_snippet": "requires login"}, "cwe_id": "CWE-95: Improper Neutralization of Directives in Dynamically Evaluated Code ('Eval Injection')", "cwe_name": null, "cvss_score": 5.0, "cvss_vector": null, "owasp_category": "A03:2021 - Injection", "references": [{"url": "https://owasp.org/Top10/A03_2021-Injection", "title": null}], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.security.audit.eval-detected", "path": "/tmp/tmpb8jm_z1l/4f869c82d83d165c.py", "start": {"line": 226, "col": 38, "offset": 11010}, "end": {"line": 226, "col": 55, "offset": 11027}, "extra": {"message": "Detected the use of eval(). eval() can be dangerous if used to evaluate dynamic content. If this content can be input from outside the program, this may be a code injection vulnerability. Ensure evaluated content is not definable by external sources.", "metadata": {"source-rule-url": "https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b307-eval", "cwe": ["CWE-95: Improper Neutralization of Directives in Dynamically Evaluated Code ('Eval Injection')"], "owasp": ["A03:2021 - Injection", "A05:2025 - Injection"], "asvs": {"control_id": "5.2.4 Dyanmic Code Execution Features", "control_url": "https://github.com/OWASP/ASVS/blob/master/4.0/en/0x13-V5-Validation-Sanitization-Encoding.md#v52-sanitization-and-sandboxing-requirements", "section": "V5: Validation, Sanitization and Encoding Verification Requirements", "version": "4"}, "category": "security", "technology": ["python"], "references": ["https://owasp.org/Top10/A03_2021-Injection"], "subcategory": ["audit"], "likelihood": "LOW", "impact": "HIGH", "confidence": "LOW"}, "severity": "WARNING", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}] | 12 | true | [
"CWE-95",
"CWE-95",
"CWE-95",
"CWE-95"
] | [
"rules.python.lang.security.audit.eval-detected",
"rules.python.lang.security.audit.eval-detected",
"rules.python.lang.security.audit.eval-detected",
"rules.python.lang.security.audit.eval-detected"
] | [
"security",
"security",
"security",
"security"
] | [
"LOW",
"LOW",
"LOW",
"LOW"
] | [
"MEDIUM",
"MEDIUM",
"MEDIUM",
"MEDIUM"
] | [
169,
174,
217,
226
] | [
169,
174,
217,
226
] | [
40,
35,
40,
38
] | [
57,
52,
57,
55
] | [
"A03:2021 - Injection",
"A03:2021 - Injection",
"A03:2021 - Injection",
"A03:2021 - Injection"
] | [
"Detected the use of eval(). eval() can be dangerous if used to evaluate dynamic content. If this content can be input from outside the program, this may be a code injection vulnerability. Ensure evaluated content is not definable by external sources.",
"Detected the use of eval(). eval() can be dangerous if used... | [
5,
5,
5,
5
] | [
"LOW",
"LOW",
"LOW",
"LOW"
] | [
"HIGH",
"HIGH",
"HIGH",
"HIGH"
] | losses.py | /evkit/utils/losses.py | lilujunai/side-tuning | MIT | |
2024-11-18T18:05:44.364393+00:00 | 1,611,199,867,000 | ee5b19626616ec7568a5cb774bc5a76529f3c51e | 3 | {
"blob_id": "ee5b19626616ec7568a5cb774bc5a76529f3c51e",
"branch_name": "refs/heads/main",
"committer_date": 1611199867000,
"content_id": "18f2c7b9a15197f4a4144413fbc6b994a8d3c99f",
"detected_licenses": [
"MIT"
],
"directory_id": "6803e1834d76c7a9c1fd2484bbfb438615c341a2",
"extension": "py",
"filename": "d22b.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": 331503553,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2146,
"license": "MIT",
"license_type": "permissive",
"path": "/d22b.py",
"provenance": "stack-edu-0054.json.gz:568765",
"repo_name": "jogloran/advent-of-code-2020",
"revision_date": 1611199867000,
"revision_id": "9804f1eb8d94c991d9aa3348f01f4bf65c195849",
"snapshot_id": "50b2eb208f10ac9c832dd35cd3b07d8b27d09ad2",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/jogloran/advent-of-code-2020/9804f1eb8d94c991d9aa3348f01f4bf65c195849/d22b.py",
"visit_date": "2023-02-22T00:52:13.546412"
} | 2.921875 | stackv2 | from more_itertools import split_at
grps = split_at(map(str.rstrip, open('d22.txt')), pred=lambda e: e == '')
grps = list(grps)
grp1 = list(map(int, grps[0][1:]))
grp2 = list(map(int, grps[1][1:]))
FIRST_DECK_WON = 0
SECOND_DECK_WON = 1
old_print=print
print=lambda *args: None
def match(grp1, grp2, depth=0):
memo = set()
print('Match')
print('-' * 30)
round = 1
while grp1 and grp2:
print(f'Round {round} (Game {depth + 1})'); round += 1
print(f"Player 1's deck: {','.join(map(str,grp1))}")
print(f"Player 2's deck: {','.join(map(str,grp2))}\n")
if (tuple(grp1), tuple(grp2)) in memo:
print('Game repeat detected')
return FIRST_DECK_WON
if len(grp1) > grp1[0] and len(grp2) > grp2[0]:
memo.add((tuple(grp1), tuple(grp2)))
which_deck_won = match(grp1[:][1:1+grp1[0]], grp2[:][1:1+grp2[0]], depth+1)
print(f"Returning from sub-game {depth+1}")
print("<" * 30)
if which_deck_won == FIRST_DECK_WON:
print(f'Player 1 won sub-game {depth+1}')
# if player 1 wins, then the order of cards added to player 1's deck
# is P1's winning card, _then_ P2's losing card
grp1.append(grp1[0])
grp1.append(grp2[0])
else:
print(f'Player 2 won sub-game {depth+1}')
grp2.append(grp2[0])
grp2.append(grp1[0])
elif grp1[0] < grp2[0]:
# p2 wins
memo.add((tuple(grp1), tuple(grp2)))
grp2.append(grp2[0])
grp2.append(grp1[0])
else:
# p1 wins
memo.add((tuple(grp1), tuple(grp2)))
grp1.append(grp1[0])
grp1.append(grp2[0])
del grp1[0]
del grp2[0]
winner = SECOND_DECK_WON if not grp1 else FIRST_DECK_WON
return winner
winner = match(grp1, grp2)
winner = grp2 if winner == SECOND_DECK_WON else grp1
pts = sum((len(winner) - pos) * val for pos, val in enumerate(winner))
old_print(pts)
# return (SECOND_DECK_WON if not grp1 else FIRST_DECK_WON), pts | 61 | 34.2 | 87 | 17 | 668 | python | [{"finding_id": "semgrep_rules.python.lang.best-practice.unspecified-open-encoding_55824a47a9bd1ce1_67992cf1", "tool_name": "semgrep", "rule_id": "rules.python.lang.best-practice.unspecified-open-encoding", "finding_type": "best-practice", "severity": "medium", "confidence": "medium", "message": "Missing 'encoding' parameter. 'open()' uses device locale encodings by default, corrupting files with special characters. Specify the encoding to ensure cross-platform support when opening files in text mode (e.g. encoding=\"utf-8\").", "remediation": "", "location": {"file_path": "unknown", "line_start": 2, "line_end": 2, "column_start": 33, "column_end": 48, "code_snippet": "requires login"}, "cwe_id": null, "cwe_name": null, "cvss_score": 5.0, "cvss_vector": null, "owasp_category": null, "references": [{"url": "https://www.python.org/dev/peps/pep-0597/", "title": null}, {"url": "https://docs.python.org/3/library/functions.html#open", "title": null}], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.best-practice.unspecified-open-encoding", "path": "/tmp/tmpb8jm_z1l/55824a47a9bd1ce1.py", "start": {"line": 2, "col": 33, "offset": 68}, "end": {"line": 2, "col": 48, "offset": 83}, "extra": {"message": "Missing 'encoding' parameter. 'open()' uses device locale encodings by default, corrupting files with special characters. Specify the encoding to ensure cross-platform support when opening files in text mode (e.g. encoding=\"utf-8\").", "metadata": {"category": "best-practice", "technology": ["python"], "references": ["https://www.python.org/dev/peps/pep-0597/", "https://docs.python.org/3/library/functions.html#open"]}, "severity": "WARNING", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}, {"finding_id": "semgrep_rules.python.lang.maintainability.return-not-in-function_55824a47a9bd1ce1_909ab579", "tool_name": "semgrep", "rule_id": "rules.python.lang.maintainability.return-not-in-function", "finding_type": "maintainability", "severity": "medium", "confidence": "medium", "message": "`return` only makes sense inside a function", "remediation": "", "location": {"file_path": "unknown", "line_start": 2, "line_end": 2, "column_start": 66, "column_end": 73, "code_snippet": "requires login"}, "cwe_id": null, "cwe_name": null, "cvss_score": 5.0, "cvss_vector": null, "owasp_category": null, "references": [], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.maintainability.return-not-in-function", "path": "/tmp/tmpb8jm_z1l/55824a47a9bd1ce1.py", "start": {"line": 2, "col": 66, "offset": 101}, "end": {"line": 2, "col": 73, "offset": 108}, "extra": {"message": "`return` only makes sense inside a function", "metadata": {"category": "maintainability", "technology": ["python"]}, "severity": "WARNING", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}, {"finding_id": "semgrep_rules.python.lang.maintainability.return-not-in-function_55824a47a9bd1ce1_faddd80a", "tool_name": "semgrep", "rule_id": "rules.python.lang.maintainability.return-not-in-function", "finding_type": "maintainability", "severity": "medium", "confidence": "medium", "message": "`return` only makes sense inside a function", "remediation": "", "location": {"file_path": "unknown", "line_start": 11, "line_end": 11, "column_start": 21, "column_end": 25, "code_snippet": "requires login"}, "cwe_id": null, "cwe_name": null, "cvss_score": 5.0, "cvss_vector": null, "owasp_category": null, "references": [], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.maintainability.return-not-in-function", "path": "/tmp/tmpb8jm_z1l/55824a47a9bd1ce1.py", "start": {"line": 11, "col": 21, "offset": 275}, "end": {"line": 11, "col": 25, "offset": 279}, "extra": {"message": "`return` only makes sense inside a function", "metadata": {"category": "maintainability", "technology": ["python"]}, "severity": "WARNING", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}] | 3 | true | [
"",
""
] | [
"rules.python.lang.maintainability.return-not-in-function",
"rules.python.lang.maintainability.return-not-in-function"
] | [
"maintainability",
"maintainability"
] | [
"MEDIUM",
"MEDIUM"
] | [
"MEDIUM",
"MEDIUM"
] | [
2,
11
] | [
2,
11
] | [
66,
21
] | [
73,
25
] | [
"",
""
] | [
"`return` only makes sense inside a function",
"`return` only makes sense inside a function"
] | [
5,
5
] | [
"",
""
] | [
"",
""
] | d22b.py | /d22b.py | jogloran/advent-of-code-2020 | MIT | |
2024-11-18T18:05:45.157547+00:00 | 1,430,934,893,000 | 9936379ea8ae076b9b0c4ce5b322d8a12497e38a | 2 | {
"blob_id": "9936379ea8ae076b9b0c4ce5b322d8a12497e38a",
"branch_name": "refs/heads/master",
"committer_date": 1431212320000,
"content_id": "92a5167070c5a64bd70518f188bd927997a14043",
"detected_licenses": [
"MIT"
],
"directory_id": "7492f373430262e8ba95c4cc52517ed23107dc67",
"extension": "py",
"filename": "dns_formatter.py",
"fork_events_count": 0,
"gha_created_at": 1426193181000,
"gha_event_created_at": 1426193181000,
"gha_language": null,
"gha_license_id": null,
"github_id": 32101544,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4191,
"license": "MIT",
"license_type": "permissive",
"path": "/dns_formatter.py",
"provenance": "stack-edu-0054.json.gz:568776",
"repo_name": "sargon/icvpn-scripts",
"revision_date": 1430934893000,
"revision_id": "89989365ebbfcd6bbdf7325a25e14465723bc327",
"snapshot_id": "6356ef80baabb7511e3132af1c9a91acaf29e427",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/sargon/icvpn-scripts/89989365ebbfcd6bbdf7325a25e14465723bc327/dns_formatter.py",
"visit_date": "2021-01-17T22:54:39.893859"
} | 2.359375 | stackv2 | from formatter import Formatter
from textwrap import dedent
from socket import AF_INET, AF_INET6, inet_pton, error as socket_error
def try_inet_pton(af, ip):
try:
inet_pton(af, ip)
return True
except socket_error:
return False
class _DNSFormatter(Formatter):
filters = {
"v4": lambda value: try_inet_pton(AF_INET, value),
"v6": lambda value: try_inet_pton(AF_INET6, value),
}
def populate_argument_parser(self, parser):
parser.add_argument(
"--filter",
dest="filter",
help="""Only include certain servers.
Possible choices: %s
""" %
", ".join(self.filters.keys()),
choices=list(self.filters.keys()))
def _map_communities(self, arguments, communities):
filters = [filters[options.filter]] if arguments.filter else []
filtered = dict()
for community, data in communities:
try:
domains = data['domains']
nameservers = data['nameservers']
except (TypeError, KeyError):
continue
servers = filter(lambda d: all(f(d) for f in filters), nameservers)
servers = list(servers)
servers = list(filter(lambda d: all(f(d) for f in filters), nameservers))
if len(domains) == 0 or len(servers) == 0:
filtered[community] = None
else:
filtered[community] = dict({'domains': domains, 'servers': servers})
return filtered.items()
def generate_config(self, arguments, communities):
communities = self._map_communities(arguments, communities)
for community, data in communities:
self.add_comment(community)
if data is None:
self.add_comment("No valid domains found")
else:
self._format_config(data['domains'], data['servers'])
class DnsmasqFormatter(_DNSFormatter):
def _format_config(self, domains, servers):
for domain in domains:
for server in servers:
self.config.append("server=/%s/%s" % (domain, server))
class BindFormatter(_DNSFormatter):
def _format_config(self, domains, servers):
for domain in domains:
self.config.append(dedent("""
zone "%s" {
type static-stub;
server-addresses { %s; };
};
""" % (domain, "; ".join(servers))).lstrip())
class BindForwardFormatter(_DNSFormatter):
def _format_config(self, domains, servers):
for domain in domains:
self.config.append(dedent("""
zone "%s" {
type forward;
forwarders { %s; };
forward only;
};
""" % (domain, "; ".join(servers))).lstrip())
class UnboundForwardFormatter(_DNSFormatter):
def generate_config(self, arguments, communities):
communities = self._map_communities(arguments, communities)
buffer = []
self.add_comment(
"""
This file is automatically generated.
""")
self.config.append('server:')
self.config.append('\tlocal-zone: "10.in-addr.arpa" nodefault')
for community, data in communities:
if data is None:
self.add_comment("No valid domains found")
continue
self.config.append('\n\t# %s' % community)
for domain in data['domains']:
if domain.endswith('.arpa'):
self.config.append('\tlocal-zone: "%s" nodefault' % domain)
else:
self.config.append('\tdomain-insecure: "%s"' % domain)
buffer.append('\n#\n# %s\n#\n' % community)
for domain in data['domains']:
buffer.append('forward-zone:')
buffer.append('\tname: "%s"' % domain)
for server in data['servers']:
buffer.append('\tforward-addr: %s' % server)
self.config = self.config + buffer
| 139 | 29.15 | 85 | 20 | 851 | python | [{"finding_id": "semgrep_rules.python.lang.maintainability.return-not-in-function_7d37b03fc19e01cb_971f0af9", "tool_name": "semgrep", "rule_id": "rules.python.lang.maintainability.return-not-in-function", "finding_type": "maintainability", "severity": "medium", "confidence": "medium", "message": "`return` only makes sense inside a function", "remediation": "", "location": {"file_path": "unknown", "line_start": 17, "line_end": 17, "column_start": 29, "column_end": 58, "code_snippet": "requires login"}, "cwe_id": null, "cwe_name": null, "cvss_score": 5.0, "cvss_vector": null, "owasp_category": null, "references": [], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.maintainability.return-not-in-function", "path": "/tmp/tmpb8jm_z1l/7d37b03fc19e01cb.py", "start": {"line": 17, "col": 29, "offset": 340}, "end": {"line": 17, "col": 58, "offset": 369}, "extra": {"message": "`return` only makes sense inside a function", "metadata": {"category": "maintainability", "technology": ["python"]}, "severity": "WARNING", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}, {"finding_id": "semgrep_rules.python.lang.maintainability.return-not-in-function_7d37b03fc19e01cb_81fc3254", "tool_name": "semgrep", "rule_id": "rules.python.lang.maintainability.return-not-in-function", "finding_type": "maintainability", "severity": "medium", "confidence": "medium", "message": "`return` only makes sense inside a function", "remediation": "", "location": {"file_path": "unknown", "line_start": 18, "line_end": 18, "column_start": 29, "column_end": 59, "code_snippet": "requires login"}, "cwe_id": null, "cwe_name": null, "cvss_score": 5.0, "cvss_vector": null, "owasp_category": null, "references": [], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.maintainability.return-not-in-function", "path": "/tmp/tmpb8jm_z1l/7d37b03fc19e01cb.py", "start": {"line": 18, "col": 29, "offset": 399}, "end": {"line": 18, "col": 59, "offset": 429}, "extra": {"message": "`return` only makes sense inside a function", "metadata": {"category": "maintainability", "technology": ["python"]}, "severity": "WARNING", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}] | 2 | true | [
"",
""
] | [
"rules.python.lang.maintainability.return-not-in-function",
"rules.python.lang.maintainability.return-not-in-function"
] | [
"maintainability",
"maintainability"
] | [
"MEDIUM",
"MEDIUM"
] | [
"MEDIUM",
"MEDIUM"
] | [
17,
18
] | [
17,
18
] | [
29,
29
] | [
58,
59
] | [
"",
""
] | [
"`return` only makes sense inside a function",
"`return` only makes sense inside a function"
] | [
5,
5
] | [
"",
""
] | [
"",
""
] | dns_formatter.py | /dns_formatter.py | sargon/icvpn-scripts | MIT | |
2024-11-18T18:05:50.995077+00:00 | 1,681,380,941,000 | e318ef477e6416a2771f439d3ccca329e22e093b | 3 | {
"blob_id": "e318ef477e6416a2771f439d3ccca329e22e093b",
"branch_name": "refs/heads/master",
"committer_date": 1681380941000,
"content_id": "ac6bb68abe5cfe57022afe0a29bb34005b6b4d36",
"detected_licenses": [
"Apache-2.0"
],
"directory_id": "af84dbfbdca0ee1a354924881b6578c37a66efcf",
"extension": "py",
"filename": "NCF.py",
"fork_events_count": 0,
"gha_created_at": 1498319300000,
"gha_event_created_at": 1688678201000,
"gha_language": "Jupyter Notebook",
"gha_license_id": "Apache-2.0",
"github_id": 95307122,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10756,
"license": "Apache-2.0",
"license_type": "permissive",
"path": "/ML/DL/ncf/NCF.py",
"provenance": "stack-edu-0054.json.gz:568820",
"repo_name": "Johnwei386/Warehouse",
"revision_date": 1681380941000,
"revision_id": "77da078a176930c0107431b7a0ff7b01d6634ba7",
"snapshot_id": "96db3b3b7c258b41688395942f766c2f4299aa56",
"src_encoding": "UTF-8",
"star_events_count": 3,
"url": "https://raw.githubusercontent.com/Johnwei386/Warehouse/77da078a176930c0107431b7a0ff7b01d6634ba7/ML/DL/ncf/NCF.py",
"visit_date": "2023-07-19T22:12:33.331111"
} | 2.828125 | stackv2 | # _*_ coding:utf8 _*_
import numpy as np
import pandas as pd
import tensorflow as tf
import sys
#import ncf.metrics
import metrics
class NCF(object):
def __init__(self, embed_size, user_size, item_size, lr,
optim, initializer, loss_func, activation_func,
regularizer_rate, iterator, topk, dropout, is_training):
"""
Important Arguments.
embed_size: The final embedding size for users and items.
optim: The optimization method chosen in this model.
initializer: The initialization method.
loss_func: Loss function, we choose the cross entropy.
regularizer_rate: L2 is chosen, this represents the L2 rate.
iterator: Input dataset.
topk: For evaluation, computing the topk items.
"""
self.embed_size = embed_size # 16
self.user_size = user_size # 1508
self.item_size = item_size # 2071
self.lr = lr
self.initializer = initializer
self.loss_func = loss_func
self.activation_func = activation_func
self.regularizer_rate = regularizer_rate
self.optim = optim
self.topk = topk # 10
self.dropout = dropout
self.is_training = is_training
self.iterator = iterator
def get_data(self):
sample = self.iterator.get_next() # 得到Dataset中的数据
self.user = sample['user']
self.item = sample['item']
# 转换tensor为一个新类型
self.label = tf.cast(sample['label'],tf.float32)
def inference(self):
# 设置参数初始化方式、损失函数、参数更新方式(优化器)
""" Initialize important settings """
self.regularizer = tf.contrib.layers.l2_regularizer(self.regularizer_rate)
if self.initializer == 'Normal':
self.initializer = tf.truncated_normal_initializer(stddev=0.01)
elif self.initializer == 'Xavier_Normal':
self.initializer = tf.contrib.layers.xavier_initializer()
else:
self.initializer = tf.glorot_uniform_initializer()
if self.activation_func == 'ReLU':
self.activation_func = tf.nn.relu
elif self.activation_func == 'Leaky_ReLU':
self.activation_func = tf.nn.leaky_relu
elif self.activation_func == 'ELU':
self.activation_func = tf.nn.elu
if self.loss_func == 'cross_entropy':
self.loss_func = tf.nn.sigmoid_cross_entropy_with_logits
if self.optim == 'SGD':
self.optim = tf.train.GradientDescentOptimizer(self.lr,name='SGD')
elif self.optim == 'RMSProp':
self.optim = tf.train.RMSPropOptimizer(self.lr,
decay=0.9,
momentum=0.0,
name='RMSProp')
elif self.optim == 'Adam':
self.optim = tf.train.AdamOptimizer(self.lr, name='Adam')
def create_model(self):
with tf.name_scope('input'):
# [0,1,...,0]指示某个用户的one-hot编码矩阵,大小为 Nx1508
# N为样本总数,训练集就是训练集总数,测试集就是测试集总数,1508是用户数
self.user_onehot = tf.one_hot(self.user,self.user_size,name='user_onehot')
# Nx2071,指示那个item被选中,2071为item的数量
self.item_onehot = tf.one_hot(self.item,self.item_size,name='item_onehot')
with tf.name_scope('embed'):
# inputs: 输入数据,这里是大小为 Nx1508 的Tensor张量数据
# units: 隐藏层神经元个数, 预置为16
# 激活函数为Relu,用Xavier方法初始化参数,使用L2范数作为正则化参数的惩罚项
# [Nx1508] x [1508x16] = Nx16
self.user_embed_GMF = tf.layers.dense(inputs = self.user_onehot,
units = self.embed_size,
activation = self.activation_func,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='user_embed_GMF')
# [Nx2071] x [2071x16]= Nx16
self.item_embed_GMF = tf.layers.dense(inputs=self.item_onehot,
units=self.embed_size,
activation=self.activation_func,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='item_embed_GMF')
# [Nx1508] x [1508x16] = Nx16
self.user_embed_MLP = tf.layers.dense(inputs=self.user_onehot,
units=self.embed_size,
activation=self.activation_func,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='user_embed_MLP')
# [Nx2071] x [2071x16]= Nx16
self.item_embed_MLP = tf.layers.dense(inputs=self.item_onehot,
units=self.embed_size,
activation=self.activation_func,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='item_embed_MLP')
with tf.name_scope("GMF"):
# [Nx16] x [Nx16] = [Nx16] 逐元素相加,输出一个等shape的矩阵
self.GMF = tf.multiply(self.user_embed_GMF, self.item_embed_GMF,name='GMF')
# 多层感知器网络
with tf.name_scope("MLP"):
# 按列拼接两个Tensor张量,[Nx16]与[Nx16]按列拼接等于[Nx32]
self.interaction = tf.concat([self.user_embed_MLP, self.item_embed_MLP],
axis=-1, name='interaction')
print(self.interaction.shape)
# [Nx32] x [32x32] = [Nx32]
self.layer1_MLP = tf.layers.dense(inputs=self.interaction,
units=self.embed_size * 2,
activation=self.activation_func,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='layer1_MLP')
# 使用dropout方法优化神经元的激活
self.layer1_MLP = tf.layers.dropout(self.layer1_MLP, rate=self.dropout)
print(self.layer1_MLP.shape)
# [Nx32] x [32x16] = [Nx16]
self.layer2_MLP = tf.layers.dense(inputs=self.layer1_MLP,
units=self.embed_size,
activation=self.activation_func,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='layer2_MLP')
self.layer2_MLP = tf.layers.dropout(self.layer2_MLP, rate=self.dropout)
print(self.layer2_MLP.shape)
# [Nx16] x [16x8] = [Nx8]
self.layer3_MLP = tf.layers.dense(inputs=self.layer2_MLP,
units=self.embed_size // 2,
activation=self.activation_func,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='layer3_MLP')
self.layer3_MLP = tf.layers.dropout(self.layer3_MLP, rate=self.dropout)
print(self.layer3_MLP.shape)
#得到预测值
with tf.name_scope('concatenation'):
# [Nx16] 按列拼接 [Nx8] = [Nx24]
self.concatenation = tf.concat([self.GMF,self.layer3_MLP],
axis=-1,name='concatenation')
# [Nx24] x [24x1] = [Nx1]
self.logits = tf.layers.dense(inputs= self.concatenation,
units = 1,
activation=None,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='predict')
print(self.logits.shape)
# 转化[Nx1]矩阵为1D数组,为(N,)
self.logits_dense = tf.reshape(self.logits,[-1])
print(self.logits_dense.shape)
with tf.name_scope("loss"):
self.loss = tf.reduce_mean(self.loss_func(
labels=self.label, logits=self.logits_dense, name='loss'))
with tf.name_scope("optimzation"):
self.optimzer = self.optim.minimize(self.loss)
def eval(self):
with tf.name_scope("evaluation"):
self.item_replica = self.item
_, self.indice = tf.nn.top_k(tf.sigmoid(self.logits_dense), self.topk)
def summary(self):
""" Create summaries to write on tensorboard. """
self.writer = tf.summary.FileWriter('./graphs/NCF', tf.get_default_graph())
with tf.name_scope("summaries"):
tf.summary.scalar('loss', self.loss)
tf.summary.histogram('histogram loss', self.loss)
self.summary_op = tf.summary.merge_all()
def build(self):
self.get_data()
self.inference()
self.create_model()
self.eval()
self.summary()
self.saver = tf.train.Saver(tf.global_variables())
def step(self, session, step):
""" Train the model step by step. """
if self.is_training:
loss, optim, summaries = session.run(
[self.loss, self.optimzer, self.summary_op])
self.writer.add_summary(summaries, global_step=step)
else:
indice, item = session.run([self.indice, self.item_replica])
prediction = np.take(item, indice)
return prediction, item | 227 | 44.54 | 87 | 16 | 2,121 | python | [{"finding_id": "semgrep_rules.python.lang.maintainability.is-function-without-parentheses_8de0869020318ad0_586c3271", "tool_name": "semgrep", "rule_id": "rules.python.lang.maintainability.is-function-without-parentheses", "finding_type": "maintainability", "severity": "medium", "confidence": "medium", "message": "Is \"is_training\" a function or an attribute? If it is a function, you may have meant self.is_training() because self.is_training is always true.", "remediation": "", "location": {"file_path": "unknown", "line_start": 37, "line_end": 37, "column_start": 9, "column_end": 25, "code_snippet": "requires login"}, "cwe_id": null, "cwe_name": null, "cvss_score": 5.0, "cvss_vector": null, "owasp_category": null, "references": [], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.maintainability.is-function-without-parentheses", "path": "/tmp/tmpb8jm_z1l/8de0869020318ad0.py", "start": {"line": 37, "col": 9, "offset": 1216}, "end": {"line": 37, "col": 25, "offset": 1232}, "extra": {"message": "Is \"is_training\" a function or an attribute? If it is a function, you may have meant self.is_training() because self.is_training is always true.", "metadata": {"category": "maintainability", "technology": ["python"]}, "severity": "WARNING", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}, {"finding_id": "semgrep_rules.python.lang.maintainability.is-function-without-parentheses_8de0869020318ad0_dff0c273", "tool_name": "semgrep", "rule_id": "rules.python.lang.maintainability.is-function-without-parentheses", "finding_type": "maintainability", "severity": "medium", "confidence": "medium", "message": "Is \"is_training\" a function or an attribute? If it is a function, you may have meant self.is_training() because self.is_training is always true.", "remediation": "", "location": {"file_path": "unknown", "line_start": 220, "line_end": 220, "column_start": 12, "column_end": 28, "code_snippet": "requires login"}, "cwe_id": null, "cwe_name": null, "cvss_score": 5.0, "cvss_vector": null, "owasp_category": null, "references": [], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.maintainability.is-function-without-parentheses", "path": "/tmp/tmpb8jm_z1l/8de0869020318ad0.py", "start": {"line": 220, "col": 12, "offset": 10393}, "end": {"line": 220, "col": 28, "offset": 10409}, "extra": {"message": "Is \"is_training\" a function or an attribute? If it is a function, you may have meant self.is_training() because self.is_training is always true.", "metadata": {"category": "maintainability", "technology": ["python"]}, "severity": "WARNING", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}] | 2 | true | [
"",
""
] | [
"rules.python.lang.maintainability.is-function-without-parentheses",
"rules.python.lang.maintainability.is-function-without-parentheses"
] | [
"maintainability",
"maintainability"
] | [
"MEDIUM",
"MEDIUM"
] | [
"MEDIUM",
"MEDIUM"
] | [
37,
220
] | [
37,
220
] | [
9,
12
] | [
25,
28
] | [
"",
""
] | [
"Is \"is_training\" a function or an attribute? If it is a function, you may have meant self.is_training() because self.is_training is always true.",
"Is \"is_training\" a function or an attribute? If it is a function, you may have meant self.is_training() because self.is_training is always true."
] | [
5,
5
] | [
"",
""
] | [
"",
""
] | NCF.py | /ML/DL/ncf/NCF.py | Johnwei386/Warehouse | Apache-2.0 | |
2024-11-18T18:05:52.300596+00:00 | 1,599,152,646,000 | 9185695cb36d6614095277825baed9bb945a49c2 | 2 | {
"blob_id": "9185695cb36d6614095277825baed9bb945a49c2",
"branch_name": "refs/heads/master",
"committer_date": 1599152646000,
"content_id": "048a36508be52d7c1697a055f4fb3d12039a6fca",
"detected_licenses": [
"MIT"
],
"directory_id": "3bc4d9a4f7744126cf5af8b56c75ec188476f6aa",
"extension": "py",
"filename": "assess.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": 290616074,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9477,
"license": "MIT",
"license_type": "permissive",
"path": "/assess.py",
"provenance": "stack-edu-0054.json.gz:568839",
"repo_name": "kravitsjacob/phase_2_assessment",
"revision_date": 1599152646000,
"revision_id": "b0a15b8b4e9e98de61a029479fc658a0f3d53b3d",
"snapshot_id": "8e3b06e84d55b3c0e9a557e4900099e4b8b6d69d",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/kravitsjacob/phase_2_assessment/b0a15b8b4e9e98de61a029479fc658a0f3d53b3d/assess.py",
"visit_date": "2022-12-13T03:58:06.071793"
} | 2.390625 | stackv2 |
# Import Packages
import pandas as pd
import os
import sklearn as sk
import numpy as np
import pickle
from imblearn.over_sampling import ADASYN
from sklearn.metrics import roc_curve, auc
# Global Vars
pathto_data = '/app_io'
pathto_spacefeats = os.path.join(pathto_data, 'spatial_features_model', 'output')
pathto_damdata = os.path.join(pathto_data, 'phase_1_optimization', 'input', 'MA_U.csv')
pathto_deployidx = os.path.join(pathto_data, 'phase_1_optimization', 'input', 'deploy_idx.pkl')
pathto_phase1_results = os.path.join(pathto_data, 'phase_1_results_convert', 'output', 'results.csv')
pathto_solution_classifications = os.path.join(pathto_data, 'phase_2_assessment', 'output', 'solution_classifications')
pathto_assessment_objectives = os.path.join(pathto_data, 'phase_2_assessment', 'output', 'assessment_objectives')
parameter_names = ['N_length', 'N_width', 'n_estimators', 'min_samples_split', 'min_samples_leaf',
'min_weight_fraction_leaf', 'max_depth', 'max_features', 'max_leaf_nodes']
objective_names = ['P2_accuracy', 'P2_FPR', 'P2_TPR', 'P1_AUROCC']
feature_names = ['Dam Height (ft)', 'Dam Length (ft)', 'Reservoir Size (acre-ft)', 'Maximum Downstream Slope (%)',
'Downstream Houses', 'Downstream Population', 'Building Exposure ($1000)',
'Building Footprint (1000 sq. ft.)', 'Content Exposure ($1000)']
predicted_name = 'Hazard'
positive_lab = 'NH'
def parameter_converter(params):
"""
Convert parameter to valid types
:param params: tuple
current parameters of default types
:return: dict
All the corresponding parameters in required types
"""
# Parse Ints
for i, val in enumerate(params):
if val.is_integer():
params[i] = int(val)
# Convert to Dictionary
param_dict = dict(zip(parameter_names, params))
return param_dict
def get_features(param_dict):
"""
Retrive the corresponding spatial and non-spatial feature values
:param param_dict: dict
All the corresponding simulation parameters
:return: DataFrame
Spatial and non-spatial dam hazard feature values
"""
# Import Spatial Features
df_name = 'N_length_' + str(param_dict['N_length']) + '_N_width_' + str(param_dict['N_width'])
space_feats = pd.read_hdf(os.path.join(pathto_spacefeats, 'spatial_feats.h5'), df_name)
# Import Non-Spatial Features
data = pd.read_csv(pathto_damdata)
# Merge Features
data = space_feats.join(data)
data.index = data['RECORDID']
# Rename Columns
data = data.rename(index=str, columns={'HAZARD': predicted_name, 'DAM_HEIGHT': feature_names[0],
'DAM_LENGTH': feature_names[1], 'NORMAL_STORAGE': feature_names[2],
'Slope_max': feature_names[3], 'hous_sum': feature_names[4],
'pop_sum': feature_names[5], 'buil_sum': feature_names[6],
'foot_sum': feature_names[7], 'cont_sum': feature_names[8]})
# Extract Features
data = data[feature_names+[predicted_name]]
# Export
return data
def preprocessor(df):
"""
Processing the feature values before classification
:param df: DataFrame
Feature values
:return: DataFrame
Processed feature values
"""
# Combine Categories
df = df.replace(to_replace=['L', 'S', 'H'], value=['NH', 'NH', 'H'])
# Replace nans with median
df = df.fillna(df.median())
# Specify Objective
y = df[predicted_name]
# Shape Data
X = np.array(df[feature_names])
y = np.array(y)
return X, y
def train_model(ml_params, data):
"""
Train the random forest to the current set of hyperparameters (no cross-validation)
:param ml_params: dict
Current set of hyperparameters
:param data: DataFrame
The current set of dams with features and true hazard classifications
:return: RandomForestClassifier
Trained random forest
"""
# Initialized Vars
random_state = 1008
# Process Data
X, y = preprocessor(data)
# Resample the training data to deal with class imbalance
method = ADASYN(random_state=random_state)
X_res, y_res = method.fit_sample(X, y)
# Create Model
clf = sk.ensemble.RandomForestClassifier(n_jobs=-1, random_state=random_state,
n_estimators=ml_params['n_estimators'],
min_samples_split=ml_params['min_samples_split'],
min_samples_leaf=ml_params['min_samples_leaf'],
min_weight_fraction_leaf=ml_params['min_weight_fraction_leaf'],
max_depth=ml_params['max_depth'],
max_features=ml_params['max_features'],
max_leaf_nodes=ml_params['max_leaf_nodes'])
# Fit model to train data
clf.fit(X_res, y_res)
# Export
return clf
def predict_values(model, data):
"""
Predict values based on a trained random forest
:param model: RandomForestClassifier
Trained random forest
:param data: DataFrame
The current set of dams with features and true hazard classifications
:return: DataFrame
The current set of dams with features, true hazard classifications, and predicted hazard
classifications
"""
# Process Data
X, y = preprocessor(data)
# Predicted Values
y_pred = model.predict(X)
# Append Predicted Value
data['True Hazard Class'] = y
data['Predicted Hazard Class'] = y_pred
# Area Under ROC Curve
y_score = model.predict_proba(X)[:, 1]
false_positive, true_positive, _ = roc_curve(y, y_score, pos_label=positive_lab)
AUROCC = auc(false_positive, true_positive)
data['AUROCC'] = AUROCC
return data
def CM(row):
"""
Confusion matrix function to classify true positive, false positive, false negative, or true negative
classifications
:param row: Series
Predicted and true classification of the current dam being evaluated
:return: str
Classification type
"""
if row['True Hazard Class'] == 'H' and row['Predicted Hazard Class'] == 'H':
return 'TN'
elif row['True Hazard Class'] == 'NH' and row['Predicted Hazard Class'] == 'NH':
return 'TP'
elif row['True Hazard Class'] == 'H' and row['Predicted Hazard Class'] == 'NH':
return 'FP'
elif row['True Hazard Class'] == 'NH' and row['Predicted Hazard Class'] == 'H':
return 'FN'
def get_obj(df):
"""
Calculate objective values
:param df: dataframe
Phase 2 classifications of current solution
:return:
Phase 2 objective values
"""
# Extract Errors
TP = df['error'].value_counts()['TP']
TN = df['error'].value_counts()['TN']
FP = df['error'].value_counts()['FP']
FN = df['error'].value_counts()['FN']
# Calculate Objectives
accuracy = (TP+TN)/(TP+TN+FP+FN)
FPR = FP/(FP+TN)
TPR = TP/(TP+FN)
AUROCC = df['AUROCC'][0]
return pd.Series([FN, FP, TN, TP, accuracy, FPR, TPR, AUROCC], index=['P2_FN', 'P2_FP', 'P2_TN', 'P2_TP']+objective_names)
def simulation(vars, name):
"""
Evaluate a dam hazard potential 'simulation' with a given set of spatial parameters and random forest
hyperparameters
:param vars: tuple
set of spatial and nonspatial parameters
:param name: str
Name of current solution
:return: Series
Phase 2 objective values
"""
# Convert Parameters
param_dict = parameter_converter(vars)
# Get Features
data = get_features(param_dict)
# Get Deployment Indexes
with open(pathto_deployidx, 'rb') as f:
deploy_idx = pickle.load(f)
# Train Model on All But Deployment Features
model = train_model(param_dict, data.drop(deploy_idx))
# Predict Deployment Features
df = predict_values(model, data.loc[deploy_idx])
# Compute Confusion Matrix
df['error'] = df.apply(CM, axis=1)
# Export Classifications
df.to_csv(os.path.join(pathto_solution_classifications, 'solution_'+str(int(name)) + '.csv'), index=False)
# Compute Objectives
objs = get_obj(df)
print(objs)
return objs
def main():
# Import Reference Set
df = pd.read_table(pathto_phase1_results, sep=',').infer_objects()
# Use All Solutions
df['solution_num'] = list(df.index)
# Run Simulation
objs_df = df.apply(lambda row: simulation(row[parameter_names].tolist(), row['solution_num']), axis=1)
rep_df = pd.concat([df, objs_df], axis=1)
# Export Representative Solution
rep_df.to_csv(os.path.join(pathto_assessment_objectives, 'assessment_results.csv'), index=False, header=True, sep=',')
return 0
if __name__ == '__main__':
main()
| 238 | 37.82 | 126 | 15 | 2,175 | python | [{"finding_id": "semgrep_rules.python.lang.security.deserialization.avoid-pickle_6951a5a14fd5279c_3e755086", "tool_name": "semgrep", "rule_id": "rules.python.lang.security.deserialization.avoid-pickle", "finding_type": "security", "severity": "medium", "confidence": "low", "message": "Avoid using `pickle`, which is known to lead to code execution vulnerabilities. When unpickling, the serialized data could be manipulated to run arbitrary code. Instead, consider serializing the relevant data as JSON or a similar text-based serialization format.", "remediation": "", "location": {"file_path": "unknown", "line_start": 209, "line_end": 209, "column_start": 22, "column_end": 36, "code_snippet": "requires login"}, "cwe_id": "CWE-502: Deserialization of Untrusted Data", "cwe_name": null, "cvss_score": 5.0, "cvss_vector": null, "owasp_category": "A08:2017 - Insecure Deserialization", "references": [{"url": "https://docs.python.org/3/library/pickle.html", "title": null}], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.security.deserialization.avoid-pickle", "path": "/tmp/tmpb8jm_z1l/6951a5a14fd5279c.py", "start": {"line": 209, "col": 22, "offset": 8176}, "end": {"line": 209, "col": 36, "offset": 8190}, "extra": {"message": "Avoid using `pickle`, which is known to lead to code execution vulnerabilities. When unpickling, the serialized data could be manipulated to run arbitrary code. Instead, consider serializing the relevant data as JSON or a similar text-based serialization format.", "metadata": {"owasp": ["A08:2017 - Insecure Deserialization", "A08:2021 - Software and Data Integrity Failures", "A08:2025 - Software or Data Integrity Failures"], "cwe": ["CWE-502: Deserialization of Untrusted Data"], "references": ["https://docs.python.org/3/library/pickle.html"], "category": "security", "technology": ["python"], "cwe2022-top25": true, "cwe2021-top25": true, "subcategory": ["audit"], "likelihood": "LOW", "impact": "MEDIUM", "confidence": "LOW"}, "severity": "WARNING", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}] | 1 | true | [
"CWE-502"
] | [
"rules.python.lang.security.deserialization.avoid-pickle"
] | [
"security"
] | [
"LOW"
] | [
"MEDIUM"
] | [
209
] | [
209
] | [
22
] | [
36
] | [
"A08:2017 - Insecure Deserialization"
] | [
"Avoid using `pickle`, which is known to lead to code execution vulnerabilities. When unpickling, the serialized data could be manipulated to run arbitrary code. Instead, consider serializing the relevant data as JSON or a similar text-based serialization format."
] | [
5
] | [
"LOW"
] | [
"MEDIUM"
] | assess.py | /assess.py | kravitsjacob/phase_2_assessment | MIT | |
2024-11-18T18:05:52.919914+00:00 | 1,625,584,497,000 | 7d79c06f8af3e0bcfa88e9703b177b44978569de | 2 | {
"blob_id": "7d79c06f8af3e0bcfa88e9703b177b44978569de",
"branch_name": "refs/heads/master",
"committer_date": 1625584497000,
"content_id": "162fc4cf197c00de108895d7889162b40d2c8cc9",
"detected_licenses": [
"Apache-2.0"
],
"directory_id": "279c70e0c1ee0625bf0940d6644def5c6f294c43",
"extension": "py",
"filename": "sources.py",
"fork_events_count": 0,
"gha_created_at": 1627422472000,
"gha_event_created_at": 1627422472000,
"gha_language": null,
"gha_license_id": "Apache-2.0",
"github_id": 390135802,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3603,
"license": "Apache-2.0",
"license_type": "permissive",
"path": "/registry/sources.py",
"provenance": "stack-edu-0054.json.gz:568847",
"repo_name": "yongleyuan/open-science-pool-registry",
"revision_date": 1625584497000,
"revision_id": "6006e5bff640119ed9cd4bdb1afc8dccc3890dd0",
"snapshot_id": "8ca81a4c1ed5b4047b1a822948ca8d0b718fa5ac",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/yongleyuan/open-science-pool-registry/6006e5bff640119ed9cd4bdb1afc8dccc3890dd0/registry/sources.py",
"visit_date": "2023-06-28T02:23:05.147816"
} | 2.34375 | stackv2 | import re
try: # py3
from configparser import ConfigParser
except ImportError: # py2
from ConfigParser import ConfigParser
import xml.etree.ElementTree as ET
import http.client
import urllib.error
import urllib.request
from flask import current_app, request
from .exceptions import ConfigurationError
TOPOLOGY_RG = "https://topology.opensciencegrid.org/rgsummary/xml"
def get_user_info():
try:
return current_app.config["USER_INFO_FAKE"]
except:
pass
result = {
"idp": request.environ.get("OIDC_CLAIM_idp_name", None),
"id": request.environ.get("OIDC_CLAIM_osgid", None),
"name": request.environ.get("OIDC_CLAIM_name", None),
"email": request.environ.get("OIDC_CLAIM_email", None)
}
current_app.logger.debug("Authenticated user info is {}".format(str(result)))
return result
def is_signed_up(user_info):
return user_info.get("id")
def get_sources(user_info):
"""
Query topology to get a list of valid CEs and their managers
"""
osgid = user_info.get("id")
if not osgid:
return []
# URL for all Production CE resources
# topology_url = TOPOLOGY_RG + '?gridtype=on&gridtype_1=on&service_on&service_1=on'
# URL for all Execution Endpoint resources
topology_url = TOPOLOGY_RG + '?service=on&service_157=on'
try:
response = urllib.request.urlopen(topology_url)
topology_xml = response.read()
except (urllib.error.URLError, http.client.HTTPException):
raise TopologyError('Error retrieving OSG Topology registrations')
try:
topology_et = ET.fromstring(topology_xml)
except ET.ParseError:
if not topology_xml:
msg = 'OSG Topology query returned empty response'
else:
msg = 'OSG Topology query returned malformed XML'
raise TopologyError(msg)
os_pool_resources = []
resources = topology_et.findall('./ResourceGroup/Resources/Resource')
if not resources:
raise TopologyError('Failed to find any OSG Topology resources')
for resource in resources:
try:
fqdn = resource.find('./FQDN').text.strip()
except AttributeError:
# skip malformed resource missing an FQDN
continue
active = False
try:
active = resource.find('./Active').text.strip().lower() == "true"
except AttributeError:
continue
if not active:
continue
try:
services = [service.find("./Name").text.strip()
for service in resource.findall("./Services/Service")]
except AttributeError:
continue
if ('Execution Endpoint' not in services) and ('Submit Node' not in services):
continue
try:
admin_contacts = [contact_list.find('./Contacts')
for contact_list in resource.findall('./ContactLists/ContactList')
if contact_list.findtext('./ContactType', '').strip() == 'Administrative Contact']
except AttributeError:
# skip malformed resource missing contacts
continue
for contact_list in admin_contacts:
for contact in contact_list.findall("./Contact"):
if contact.findtext('./CILogonID', '').strip() == osgid:
os_pool_resources.append(fqdn)
return os_pool_resources
SOURCE_CHECK = re.compile(r"^[a-zA-Z][-.0-9a-zA-Z]*$")
def is_valid_source_name(source_name):
return bool(SOURCE_CHECK.match(source_name))
| 114 | 30.61 | 112 | 19 | 777 | python | [{"finding_id": "semgrep_rules.python.lang.security.use-defused-xml_2d92585431a64ceb_1c0d98ab", "tool_name": "semgrep", "rule_id": "rules.python.lang.security.use-defused-xml", "finding_type": "security", "severity": "high", "confidence": "low", "message": "The Python documentation recommends using `defusedxml` instead of `xml` because the native Python `xml` library is vulnerable to XML External Entity (XXE) attacks. These attacks can leak confidential data and \"XML bombs\" can cause denial of service.", "remediation": "", "location": {"file_path": "unknown", "line_start": 8, "line_end": 8, "column_start": 1, "column_end": 35, "code_snippet": "requires login"}, "cwe_id": "CWE-611: Improper Restriction of XML External Entity Reference", "cwe_name": null, "cvss_score": 7.5, "cvss_vector": null, "owasp_category": "A04:2017 - XML External Entities (XXE)", "references": [{"url": "https://docs.python.org/3/library/xml.html", "title": null}, {"url": "https://github.com/tiran/defusedxml", "title": null}, {"url": "https://owasp.org/www-community/vulnerabilities/XML_External_Entity_(XXE)_Processing", "title": null}], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.security.use-defused-xml", "path": "/tmp/tmpb8jm_z1l/2d92585431a64ceb.py", "start": {"line": 8, "col": 1, "offset": 135}, "end": {"line": 8, "col": 35, "offset": 169}, "extra": {"message": "The Python documentation recommends using `defusedxml` instead of `xml` because the native Python `xml` library is vulnerable to XML External Entity (XXE) attacks. These attacks can leak confidential data and \"XML bombs\" can cause denial of service.", "metadata": {"owasp": ["A04:2017 - XML External Entities (XXE)", "A05:2021 - Security Misconfiguration", "A02:2025 - Security Misconfiguration"], "cwe": ["CWE-611: Improper Restriction of XML External Entity Reference"], "references": ["https://docs.python.org/3/library/xml.html", "https://github.com/tiran/defusedxml", "https://owasp.org/www-community/vulnerabilities/XML_External_Entity_(XXE)_Processing"], "category": "security", "technology": ["python"], "cwe2022-top25": true, "cwe2021-top25": true, "subcategory": ["audit"], "likelihood": "LOW", "impact": "MEDIUM", "confidence": "LOW"}, "severity": "ERROR", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}] | 1 | true | [
"CWE-611"
] | [
"rules.python.lang.security.use-defused-xml"
] | [
"security"
] | [
"LOW"
] | [
"HIGH"
] | [
8
] | [
8
] | [
1
] | [
35
] | [
"A04:2017 - XML External Entities (XXE)"
] | [
"The Python documentation recommends using `defusedxml` instead of `xml` because the native Python `xml` library is vulnerable to XML External Entity (XXE) attacks. These attacks can leak confidential data and \"XML bombs\" can cause denial of service."
] | [
7.5
] | [
"LOW"
] | [
"MEDIUM"
] | sources.py | /registry/sources.py | yongleyuan/open-science-pool-registry | Apache-2.0 | |
2024-11-18T18:05:53.886832+00:00 | 1,514,819,429,000 | 360ee762aee9a172e998864ffe800eb47944b45b | 2 | {
"blob_id": "360ee762aee9a172e998864ffe800eb47944b45b",
"branch_name": "refs/heads/master",
"committer_date": 1514819429000,
"content_id": "de0cc0d573d2a9d26887c60b416eb7d3793d2e74",
"detected_licenses": [
"Apache-2.0"
],
"directory_id": "087ba04bd4fae34a3f90b002b6fcb736728a4467",
"extension": "py",
"filename": "collections.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": 93305291,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4545,
"license": "Apache-2.0",
"license_type": "permissive",
"path": "/freestyle/collections.py",
"provenance": "stack-edu-0054.json.gz:568859",
"repo_name": "thautwarm/Stardust",
"revision_date": 1514819429000,
"revision_id": "3fa3927792958c02e51e4e5a6a5c74b6b2ecf37d",
"snapshot_id": "bbb9593a1419f7e2f23af230436dd2603c24cba0",
"src_encoding": "UTF-8",
"star_events_count": 2,
"url": "https://raw.githubusercontent.com/thautwarm/Stardust/3fa3927792958c02e51e4e5a6a5c74b6b2ecf37d/freestyle/collections.py",
"visit_date": "2021-01-23T16:49:47.024514"
} | 2.421875 | stackv2 | from typing import List, Dict, Any
from collections import defaultdict, deque
from itertools import tee
import pandas as pd
import numpy as np
import numba as nb
from copy import deepcopy
from functools import reduce
class op:
@staticmethod
def mul(a, b): return a * b
def sub(a, b): return a - b
def add(a, b): return a + b
def div(a, b): return a / b
def mod(a, b): return a % b
def anno(a, b): return a@b
def e_method(method, a, b): return eval(f"a.{method}(b)")
def e_func(func, a, b): return eval(f"{func}(a,b)")
class block:
pass
class globals_manager:
def __new__(self, global_vars=None):
try:
return self.globals_
except AttributeError:
self.globals_ = global_vars
def lisp(*targs, **kwargs):
argNums = len(targs)
if not argNums:
return None
elif argNums is 1:
value, = targs
return value
else:
f, *ttargs = targs
ttargs = map(lambda x: lisp(x), ttargs)
kw = dict(map(lambda x: (x, lisp(kwargs[x])), kwargs))
return f(*ttargs, **kw)
class richIterator:
def __init__(self, *args, **kwargs):
super(richIterator, self).__init__(*args, **kwargs)
self.recovery_vars = {}
def filter(self, f):
return richGenerator((each for each in self if f(each)))
def recovery(self):
globals_vars = globals_manager(None)
if self.recovery_vars:
recovery_vars = self.recovery_vars
for key in recovery_vars:
globals_vars[key] = recovery_vars[key]
def __matmul__(self, f):
return f(self)
def groupBy(self, f, containerType=list):
if containerType is list:
res: Dict[Any, eval("self.__class__")] = defaultdict(
eval("self.__class__"))
for each in self:
res[f(each)].append(each)
elif containerType is set:
res: Dict = dict()
for each in self:
key = f(each)
if key not in res:
res[key] = each
else:
return TypeError(f"method .groupBy for containerType '{containerType}'\
is not defined yet,\
you can define it by yourself.")
return richDict(res)
def let(self, **kwargs):
globals_vars = globals_manager(None)
if 'this' not in kwargs:
kwargs['this'] = self
for key in kwargs:
if key in globals_vars:
value = globals_vars[key]
self.recovery_vars[key] = value if value != "this" else self
value = kwargs[key]
globals_vars[key] = value if value != "this" else self
return self
def then(self, *args, **kwargs):
ret = lisp(*args, **kwargs)
self.recovery()
return ret
def map(self, f, *args, **kwargs):
args = (self,) + args
return richIterator.thenMap(f, *args, **kwargs)
def mapIndexed(self, f: "function<Int,T>", *args, **kwargs):
args = (range(len(self)), self) + args
return richIterator.thenMap(f, *args, *kwargs)
def connectedWith(self,cases:tuple):
def test(item):
for case_judge, case_action in cases:
if case_action(item):
return case_action(item)
return None
return richGenerator(map(test, self))
def tolist(self):
return [each for each in self]
def totuple(self):
return tuple(each for each in self)
def toset(self):
return set(self)
def todict(self):
return dict(zip(self))
def zip(self, iterator):
return zip(self, iterator)
def togen(self):
return richGenerator(self)
@staticmethod
def thenMap(f, *args, **kwargs):
if kwargs:
kwargsKeys = kwargs.keys()
kwargsValues = zip(* kwargs.values())
args = zip(*args)
if kwargs:
return richGenerator(f(*arg, **dict(zip(kwargsKeys, kwargsValue))) for arg, kwargsValue in zip(args, kwargsValues))
else:
return richGenerator(f(*arg) for arg in args)
class generator:
def __init__(self, iterable):
self.obj = iterable
def __iter__(self):
for each in self.obj:
yield each
def togen(self):
return self.obj
class richGenerator(richIterator, generator):
pass
class richDict(richIterator, dict):
pass
| 176 | 24.82 | 127 | 19 | 1,075 | python | [{"finding_id": "semgrep_rules.python.lang.security.audit.eval-detected_ab989d7f8edafb13_5e4e872b", "tool_name": "semgrep", "rule_id": "rules.python.lang.security.audit.eval-detected", "finding_type": "security", "severity": "medium", "confidence": "low", "message": "Detected the use of eval(). eval() can be dangerous if used to evaluate dynamic content. If this content can be input from outside the program, this may be a code injection vulnerability. Ensure evaluated content is not definable by external sources.", "remediation": "", "location": {"file_path": "unknown", "line_start": 30, "line_end": 30, "column_start": 40, "column_end": 62, "code_snippet": "requires login"}, "cwe_id": "CWE-95: Improper Neutralization of Directives in Dynamically Evaluated Code ('Eval Injection')", "cwe_name": null, "cvss_score": 5.0, "cvss_vector": null, "owasp_category": "A03:2021 - Injection", "references": [{"url": "https://owasp.org/Top10/A03_2021-Injection", "title": null}], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.security.audit.eval-detected", "path": "/tmp/tmpb8jm_z1l/ab989d7f8edafb13.py", "start": {"line": 30, "col": 40, "offset": 488}, "end": {"line": 30, "col": 62, "offset": 510}, "extra": {"message": "Detected the use of eval(). eval() can be dangerous if used to evaluate dynamic content. If this content can be input from outside the program, this may be a code injection vulnerability. Ensure evaluated content is not definable by external sources.", "metadata": {"source-rule-url": "https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b307-eval", "cwe": ["CWE-95: Improper Neutralization of Directives in Dynamically Evaluated Code ('Eval Injection')"], "owasp": ["A03:2021 - Injection", "A05:2025 - Injection"], "asvs": {"control_id": "5.2.4 Dyanmic Code Execution Features", "control_url": "https://github.com/OWASP/ASVS/blob/master/4.0/en/0x13-V5-Validation-Sanitization-Encoding.md#v52-sanitization-and-sandboxing-requirements", "section": "V5: Validation, Sanitization and Encoding Verification Requirements", "version": "4"}, "category": "security", "technology": ["python"], "references": ["https://owasp.org/Top10/A03_2021-Injection"], "subcategory": ["audit"], "likelihood": "LOW", "impact": "HIGH", "confidence": "LOW"}, "severity": "WARNING", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}, {"finding_id": "semgrep_rules.python.lang.security.audit.eval-detected_ab989d7f8edafb13_1ab10e57", "tool_name": "semgrep", "rule_id": "rules.python.lang.security.audit.eval-detected", "finding_type": "security", "severity": "medium", "confidence": "low", "message": "Detected the use of eval(). eval() can be dangerous if used to evaluate dynamic content. If this content can be input from outside the program, this may be a code injection vulnerability. Ensure evaluated content is not definable by external sources.", "remediation": "", "location": {"file_path": "unknown", "line_start": 33, "line_end": 33, "column_start": 36, "column_end": 56, "code_snippet": "requires login"}, "cwe_id": "CWE-95: Improper Neutralization of Directives in Dynamically Evaluated Code ('Eval Injection')", "cwe_name": null, "cvss_score": 5.0, "cvss_vector": null, "owasp_category": "A03:2021 - Injection", "references": [{"url": "https://owasp.org/Top10/A03_2021-Injection", "title": null}], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.security.audit.eval-detected", "path": "/tmp/tmpb8jm_z1l/ab989d7f8edafb13.py", "start": {"line": 33, "col": 36, "offset": 548}, "end": {"line": 33, "col": 56, "offset": 568}, "extra": {"message": "Detected the use of eval(). eval() can be dangerous if used to evaluate dynamic content. If this content can be input from outside the program, this may be a code injection vulnerability. Ensure evaluated content is not definable by external sources.", "metadata": {"source-rule-url": "https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b307-eval", "cwe": ["CWE-95: Improper Neutralization of Directives in Dynamically Evaluated Code ('Eval Injection')"], "owasp": ["A03:2021 - Injection", "A05:2025 - Injection"], "asvs": {"control_id": "5.2.4 Dyanmic Code Execution Features", "control_url": "https://github.com/OWASP/ASVS/blob/master/4.0/en/0x13-V5-Validation-Sanitization-Encoding.md#v52-sanitization-and-sandboxing-requirements", "section": "V5: Validation, Sanitization and Encoding Verification Requirements", "version": "4"}, "category": "security", "technology": ["python"], "references": ["https://owasp.org/Top10/A03_2021-Injection"], "subcategory": ["audit"], "likelihood": "LOW", "impact": "HIGH", "confidence": "LOW"}, "severity": "WARNING", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}, {"finding_id": "semgrep_rules.python.lang.correctness.baseclass-attribute-override_ab989d7f8edafb13_d09cadcf", "tool_name": "semgrep", "rule_id": "rules.python.lang.correctness.baseclass-attribute-override", "finding_type": "correctness", "severity": "medium", "confidence": "medium", "message": "Class richGenerator inherits from both `richIterator` and `generator` which both have a method named `$F`; one of these methods will be overwritten.", "remediation": "", "location": {"file_path": "unknown", "line_start": 172, "line_end": 172, "column_start": 7, "column_end": 20, "code_snippet": "requires login"}, "cwe_id": null, "cwe_name": null, "cvss_score": 5.0, "cvss_vector": null, "owasp_category": null, "references": [{"url": "https://docs.python.org/3/tutorial/classes.html#multiple-inheritance", "title": null}], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.correctness.baseclass-attribute-override", "path": "/tmp/tmpb8jm_z1l/ab989d7f8edafb13.py", "start": {"line": 172, "col": 7, "offset": 4450}, "end": {"line": 172, "col": 20, "offset": 4463}, "extra": {"message": "Class richGenerator inherits from both `richIterator` and `generator` which both have a method named `$F`; one of these methods will be overwritten.", "metadata": {"category": "correctness", "references": ["https://docs.python.org/3/tutorial/classes.html#multiple-inheritance"], "technology": ["python"]}, "severity": "WARNING", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}] | 3 | true | [
"CWE-95",
"CWE-95"
] | [
"rules.python.lang.security.audit.eval-detected",
"rules.python.lang.security.audit.eval-detected"
] | [
"security",
"security"
] | [
"LOW",
"LOW"
] | [
"MEDIUM",
"MEDIUM"
] | [
30,
33
] | [
30,
33
] | [
40,
36
] | [
62,
56
] | [
"A03:2021 - Injection",
"A03:2021 - Injection"
] | [
"Detected the use of eval(). eval() can be dangerous if used to evaluate dynamic content. If this content can be input from outside the program, this may be a code injection vulnerability. Ensure evaluated content is not definable by external sources.",
"Detected the use of eval(). eval() can be dangerous if used... | [
5,
5
] | [
"LOW",
"LOW"
] | [
"HIGH",
"HIGH"
] | collections.py | /freestyle/collections.py | thautwarm/Stardust | Apache-2.0 | |
2024-11-18T18:05:55.246166+00:00 | 1,657,390,380,000 | dfdbab317ffa3b29e94e2c2e170bb41d630eec72 | 2 | {
"blob_id": "dfdbab317ffa3b29e94e2c2e170bb41d630eec72",
"branch_name": "refs/heads/main",
"committer_date": 1657390380000,
"content_id": "c109cd2fd026f8414ae5d53012232891f107e2a6",
"detected_licenses": [
"Apache-2.0"
],
"directory_id": "74a2da6daa4fb7acc42e7eb2f1aa2a3fcd983a53",
"extension": "py",
"filename": "fetchers.py",
"fork_events_count": 39,
"gha_created_at": 1354564877000,
"gha_event_created_at": 1694006362000,
"gha_language": "Python",
"gha_license_id": "Apache-2.0",
"github_id": 6988540,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15881,
"license": "Apache-2.0",
"license_type": "permissive",
"path": "/openid/fetchers.py",
"provenance": "stack-edu-0054.json.gz:568873",
"repo_name": "necaris/python3-openid",
"revision_date": 1657390380000,
"revision_id": "92235252f0a18b702bc86f17c0c5f5a9d68967a7",
"snapshot_id": "2a95387d266a31bebc81b41c5aeb5fb4ae2d4297",
"src_encoding": "UTF-8",
"star_events_count": 41,
"url": "https://raw.githubusercontent.com/necaris/python3-openid/92235252f0a18b702bc86f17c0c5f5a9d68967a7/openid/fetchers.py",
"visit_date": "2023-08-24T15:19:18.180102"
} | 2.328125 | stackv2 | # -*- test-case-name: openid.test.test_fetchers -*-
"""
This module contains the HTTP fetcher interface and several implementations.
"""
__all__ = [
'fetch', 'getDefaultFetcher', 'setDefaultFetcher', 'HTTPResponse',
'HTTPFetcher', 'createHTTPFetcher', 'HTTPFetchingError', 'HTTPError'
]
import urllib.request
import urllib.error
import urllib.parse
import http.client
import time
import io
import sys
import contextlib
import openid
import openid.urinorm
# Try to import httplib2 for caching support
# http://bitworking.org/projects/httplib2/
try:
import httplib2
except ImportError:
# httplib2 not available
httplib2 = None
# try to import pycurl, which will let us use CurlHTTPFetcher
try:
import pycurl
except ImportError:
pycurl = None
USER_AGENT = "python-openid/%s (%s)" % (openid.__version__, sys.platform)
MAX_RESPONSE_KB = 1024
def fetch(url, body=None, headers=None):
"""Invoke the fetch method on the default fetcher. Most users
should need only this method.
@raises Exception: any exceptions that may be raised by the default fetcher
"""
fetcher = getDefaultFetcher()
return fetcher.fetch(url, body, headers)
def createHTTPFetcher():
"""Create a default HTTP fetcher instance
prefers Curl to urllib2."""
if pycurl is None:
fetcher = Urllib2Fetcher()
else:
fetcher = CurlHTTPFetcher()
return fetcher
# Contains the currently set HTTP fetcher. If it is set to None, the
# library will call createHTTPFetcher() to set it. Do not access this
# variable outside of this module.
_default_fetcher = None
def getDefaultFetcher():
"""Return the default fetcher instance
if no fetcher has been set, it will create a default fetcher.
@return: the default fetcher
@rtype: HTTPFetcher
"""
global _default_fetcher
if _default_fetcher is None:
setDefaultFetcher(createHTTPFetcher())
return _default_fetcher
def setDefaultFetcher(fetcher, wrap_exceptions=True):
"""Set the default fetcher
@param fetcher: The fetcher to use as the default HTTP fetcher
@type fetcher: HTTPFetcher
@param wrap_exceptions: Whether to wrap exceptions thrown by the
fetcher wil HTTPFetchingError so that they may be caught
easier. By default, exceptions will be wrapped. In general,
unwrapped fetchers are useful for debugging of fetching errors
or if your fetcher raises well-known exceptions that you would
like to catch.
@type wrap_exceptions: bool
"""
global _default_fetcher
if fetcher is None or not wrap_exceptions:
_default_fetcher = fetcher
else:
_default_fetcher = ExceptionWrappingFetcher(fetcher)
def usingCurl():
"""Whether the currently set HTTP fetcher is a Curl HTTP fetcher."""
fetcher = getDefaultFetcher()
if isinstance(fetcher, ExceptionWrappingFetcher):
fetcher = fetcher.fetcher
return isinstance(fetcher, CurlHTTPFetcher)
class HTTPResponse(object):
"""XXX document attributes"""
headers = None
status = None
body = None
final_url = None
def __init__(self, final_url=None, status=None, headers=None, body=None):
self.final_url = final_url
self.status = status
self.headers = headers
self.body = body
def __repr__(self):
return "<%s status %s for %s>" % (self.__class__.__name__, self.status,
self.final_url)
class HTTPFetcher(object):
"""
This class is the interface for openid HTTP fetchers. This
interface is only important if you need to write a new fetcher for
some reason.
"""
def fetch(self, url, body=None, headers=None):
"""
This performs an HTTP POST or GET, following redirects along
the way. If a body is specified, then the request will be a
POST. Otherwise, it will be a GET.
@param headers: HTTP headers to include with the request
@type headers: {str:str}
@return: An object representing the server's HTTP response. If
there are network or protocol errors, an exception will be
raised. HTTP error responses, like 404 or 500, do not
cause exceptions.
@rtype: L{HTTPResponse}
@raise Exception: Different implementations will raise
different errors based on the underlying HTTP library.
"""
raise NotImplementedError
def _allowedURL(url):
parsed = urllib.parse.urlparse(url)
# scheme is the first item in the tuple
return parsed[0] in ('http', 'https')
class HTTPFetchingError(Exception):
"""Exception that is wrapped around all exceptions that are raised
by the underlying fetcher when using the ExceptionWrappingFetcher
@ivar why: The exception that caused this exception
"""
def __init__(self, why=None):
Exception.__init__(self, why)
self.why = why
class ExceptionWrappingFetcher(HTTPFetcher):
"""Fetcher that wraps another fetcher, causing all exceptions
@cvar uncaught_exceptions: Exceptions that should be exposed to the
user if they are raised by the fetch call
"""
uncaught_exceptions = (SystemExit, KeyboardInterrupt, MemoryError)
def __init__(self, fetcher):
self.fetcher = fetcher
def fetch(self, *args, **kwargs):
try:
return self.fetcher.fetch(*args, **kwargs)
except self.uncaught_exceptions:
raise
except:
exc_cls, exc_inst = sys.exc_info()[:2]
if exc_inst is None:
# string exceptions
exc_inst = exc_cls
raise HTTPFetchingError(why=exc_inst)
class Urllib2Fetcher(HTTPFetcher):
"""An C{L{HTTPFetcher}} that uses urllib2.
"""
# Parameterized for the benefit of testing frameworks, see
# http://trac.openidenabled.com/trac/ticket/85
urlopen = staticmethod(urllib.request.urlopen)
def fetch(self, url, body=None, headers=None):
if not _allowedURL(url):
raise ValueError('Bad URL scheme: %r' % (url, ))
if headers is None:
headers = {}
headers.setdefault('User-Agent', "%s Python-urllib/%s" %
(USER_AGENT, urllib.request.__version__))
if isinstance(body, str):
body = bytes(body, encoding="utf-8")
req = urllib.request.Request(url, data=body, headers=headers)
url_resource = None
try:
url_resource = self.urlopen(req)
with contextlib.closing(url_resource):
return self._makeResponse(url_resource)
except urllib.error.HTTPError as why:
with contextlib.closing(why):
resp = self._makeResponse(why)
return resp
except (urllib.error.URLError, http.client.BadStatusLine) as why:
raise
except Exception as why:
raise AssertionError(why)
def _makeResponse(self, urllib2_response):
'''
Construct an HTTPResponse from the the urllib response. Attempt to
decode the response body from bytes to str if the necessary information
is available.
'''
resp = HTTPResponse()
resp.body = urllib2_response.read(MAX_RESPONSE_KB * 1024)
resp.final_url = urllib2_response.geturl()
resp.headers = self._lowerCaseKeys(
dict(list(urllib2_response.info().items())))
if hasattr(urllib2_response, 'code'):
resp.status = urllib2_response.code
else:
resp.status = 200
_, extra_dict = self._parseHeaderValue(
resp.headers.get("content-type", ""))
# Try to decode the response body to a string, if there's a
# charset known; fall back to ISO-8859-1 otherwise, since that's
# what's suggested in HTTP/1.1
charset = extra_dict.get('charset', 'latin1')
try:
resp.body = resp.body.decode(charset)
except Exception:
pass
return resp
def _lowerCaseKeys(self, headers_dict):
new_dict = {}
for k, v in headers_dict.items():
new_dict[k.lower()] = v
return new_dict
def _parseHeaderValue(self, header_value):
"""
Parse out a complex header value (such as Content-Type, with a value
like "text/html; charset=utf-8") into a main value and a dictionary of
extra information (in this case, 'text/html' and {'charset': 'utf8'}).
"""
values = header_value.split(';', 1)
if len(values) == 1:
# There's no extra info -- return the main value and an empty dict
return values[0], {}
main_value, extra_values = values[0], values[1].split(';')
extra_dict = {}
for value_string in extra_values:
try:
key, value = value_string.split('=', 1)
extra_dict[key.strip()] = value.strip()
except ValueError:
# Can't unpack it -- must be malformed. Ignore
pass
return main_value, extra_dict
class HTTPError(HTTPFetchingError):
"""
This exception is raised by the C{L{CurlHTTPFetcher}} when it
encounters an exceptional situation fetching a URL.
"""
pass
# XXX: define what we mean by paranoid, and make sure it is.
class CurlHTTPFetcher(HTTPFetcher):
"""
An C{L{HTTPFetcher}} that uses pycurl for fetching.
See U{http://pycurl.sourceforge.net/}.
"""
ALLOWED_TIME = 20 # seconds
def __init__(self):
HTTPFetcher.__init__(self)
if pycurl is None:
raise RuntimeError('Cannot find pycurl library')
def _parseHeaders(self, header_file):
header_file.seek(0)
# Remove all non "name: value" header lines from the input
lines = [line.decode().strip() for line in header_file if b':' in line]
headers = {}
for line in lines:
try:
name, value = line.split(':', 1)
except ValueError:
raise HTTPError("Malformed HTTP header line in response: %r" %
(line, ))
value = value.strip()
# HTTP headers are case-insensitive
name = name.lower()
headers[name] = value
return headers
def _checkURL(self, url):
# XXX: document that this can be overridden to match desired policy
# XXX: make sure url is well-formed and routeable
return _allowedURL(url)
def fetch(self, url, body=None, headers=None):
stop = int(time.time()) + self.ALLOWED_TIME
off = self.ALLOWED_TIME
if headers is None:
headers = {}
headers.setdefault('User-Agent',
"%s %s" % (USER_AGENT, pycurl.version, ))
header_list = []
if headers is not None:
for header_name, header_value in headers.items():
header = '%s: %s' % (header_name, header_value)
header_list.append(header.encode())
c = pycurl.Curl()
try:
c.setopt(pycurl.NOSIGNAL, 1)
if header_list:
c.setopt(pycurl.HTTPHEADER, header_list)
# Presence of a body indicates that we should do a POST
if body is not None:
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.POSTFIELDS, body)
while off > 0:
if not self._checkURL(url):
raise HTTPError("Fetching URL not allowed: %r" % (url, ))
data = io.BytesIO()
def write_data(chunk):
if data.tell() > (1024 * MAX_RESPONSE_KB):
return 0
else:
return data.write(chunk)
response_header_data = io.BytesIO()
c.setopt(pycurl.WRITEFUNCTION, write_data)
c.setopt(pycurl.HEADERFUNCTION, response_header_data.write)
c.setopt(pycurl.TIMEOUT, off)
c.setopt(pycurl.URL, openid.urinorm.urinorm(url))
c.perform()
response_headers = self._parseHeaders(response_header_data)
code = c.getinfo(pycurl.RESPONSE_CODE)
if code in [301, 302, 303, 307]:
url = response_headers.get('location')
if url is None:
raise HTTPError(
'Redirect (%s) returned without a location' % code)
# Redirects are always GETs
c.setopt(pycurl.POST, 0)
# There is no way to reset POSTFIELDS to empty and
# reuse the connection, but we only use it once.
else:
resp = HTTPResponse()
resp.headers = response_headers
resp.status = code
resp.final_url = url
resp.body = data.getvalue().decode()
return resp
off = stop - int(time.time())
raise HTTPError("Timed out fetching: %r" % (url, ))
finally:
c.close()
class HTTPLib2Fetcher(HTTPFetcher):
"""A fetcher that uses C{httplib2} for performing HTTP
requests. This implementation supports HTTP caching.
@see: http://bitworking.org/projects/httplib2/
"""
def __init__(self, cache=None):
"""@param cache: An object suitable for use as an C{httplib2}
cache. If a string is passed, it is assumed to be a
directory name.
"""
if httplib2 is None:
raise RuntimeError('Cannot find httplib2 library. '
'See http://bitworking.org/projects/httplib2/')
super(HTTPLib2Fetcher, self).__init__()
# An instance of the httplib2 object that performs HTTP requests
self.httplib2 = httplib2.Http(cache)
# We want httplib2 to raise exceptions for errors, just like
# the other fetchers.
self.httplib2.force_exception_to_status_code = False
def fetch(self, url, body=None, headers=None):
"""Perform an HTTP request
@raises Exception: Any exception that can be raised by httplib2
@see: C{L{HTTPFetcher.fetch}}
"""
if body:
method = 'POST'
else:
method = 'GET'
if headers is None:
headers = {}
# httplib2 doesn't check to make sure that the URL's scheme is
# 'http' so we do it here.
if not (url.startswith('http://') or url.startswith('https://')):
raise ValueError('URL is not a HTTP URL: %r' % (url, ))
httplib2_response, content = self.httplib2.request(
url, method, body=body, headers=headers)
# Translate the httplib2 response to our HTTP response abstraction
# When a 400 is returned, there is no "content-location"
# header set. This seems like a bug to me. I can't think of a
# case where we really care about the final URL when it is an
# error response, but being careful about it can't hurt.
try:
final_url = httplib2_response['content-location']
except KeyError:
# We're assuming that no redirects occurred
assert not httplib2_response.previous
# And this should never happen for a successful response
assert httplib2_response.status != 200
final_url = url
return HTTPResponse(
body=content.decode(), # TODO Don't assume ASCII
final_url=final_url,
headers=dict(list(httplib2_response.items())),
status=httplib2_response.status, )
| 493 | 31.21 | 79 | 19 | 3,533 | python | [{"finding_id": "semgrep_rules.python.lang.correctness.common-mistakes.identical-is-comparison_bdb9f963e9fa869f_baf2c473", "tool_name": "semgrep", "rule_id": "rules.python.lang.correctness.common-mistakes.identical-is-comparison", "finding_type": "correctness", "severity": "high", "confidence": "medium", "message": "Found identical comparison using is. Ensure this is what you intended.", "remediation": "", "location": {"file_path": "unknown", "line_start": 56, "line_end": 56, "column_start": 8, "column_end": 22, "code_snippet": "requires login"}, "cwe_id": null, "cwe_name": null, "cvss_score": 7.5, "cvss_vector": null, "owasp_category": null, "references": [], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.correctness.common-mistakes.identical-is-comparison", "path": "/tmp/tmpb8jm_z1l/bdb9f963e9fa869f.py", "start": {"line": 56, "col": 8, "offset": 1296}, "end": {"line": 56, "col": 22, "offset": 1310}, "extra": {"message": "Found identical comparison using is. Ensure this is what you intended.", "metadata": {"category": "correctness", "technology": ["python"]}, "severity": "ERROR", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}, {"finding_id": "semgrep_rules.python.lang.correctness.common-mistakes.identical-is-comparison_bdb9f963e9fa869f_93976b13", "tool_name": "semgrep", "rule_id": "rules.python.lang.correctness.common-mistakes.identical-is-comparison", "finding_type": "correctness", "severity": "high", "confidence": "medium", "message": "Found identical comparison using is. Ensure this is what you intended.", "remediation": "", "location": {"file_path": "unknown", "line_start": 319, "line_end": 319, "column_start": 12, "column_end": 26, "code_snippet": "requires login"}, "cwe_id": null, "cwe_name": null, "cvss_score": 7.5, "cvss_vector": null, "owasp_category": null, "references": [], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.correctness.common-mistakes.identical-is-comparison", "path": "/tmp/tmpb8jm_z1l/bdb9f963e9fa869f.py", "start": {"line": 319, "col": 12, "offset": 9682}, "end": {"line": 319, "col": 26, "offset": 9696}, "extra": {"message": "Found identical comparison using is. Ensure this is what you intended.", "metadata": {"category": "correctness", "technology": ["python"]}, "severity": "ERROR", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}, {"finding_id": "semgrep_rules.python.lang.correctness.common-mistakes.identical-is-comparison_bdb9f963e9fa869f_479206bb", "tool_name": "semgrep", "rule_id": "rules.python.lang.correctness.common-mistakes.identical-is-comparison", "finding_type": "correctness", "severity": "high", "confidence": "medium", "message": "Found identical comparison using is. Ensure this is what you intended.", "remediation": "", "location": {"file_path": "unknown", "line_start": 437, "line_end": 437, "column_start": 12, "column_end": 28, "code_snippet": "requires login"}, "cwe_id": null, "cwe_name": null, "cvss_score": 7.5, "cvss_vector": null, "owasp_category": null, "references": [], "fingerprint": "requires login", "tags": [], "raw_output": {"check_id": "rules.python.lang.correctness.common-mistakes.identical-is-comparison", "path": "/tmp/tmpb8jm_z1l/bdb9f963e9fa869f.py", "start": {"line": 437, "col": 12, "offset": 13744}, "end": {"line": 437, "col": 28, "offset": 13760}, "extra": {"message": "Found identical comparison using is. Ensure this is what you intended.", "metadata": {"category": "correctness", "technology": ["python"]}, "severity": "ERROR", "fingerprint": "requires login", "lines": "requires login", "validation_state": "NO_VALIDATOR", "engine_kind": "OSS"}}}] | 3 | true | [
"",
"",
""
] | [
"rules.python.lang.correctness.common-mistakes.identical-is-comparison",
"rules.python.lang.correctness.common-mistakes.identical-is-comparison",
"rules.python.lang.correctness.common-mistakes.identical-is-comparison"
] | [
"correctness",
"correctness",
"correctness"
] | [
"MEDIUM",
"MEDIUM",
"MEDIUM"
] | [
"HIGH",
"HIGH",
"HIGH"
] | [
56,
319,
437
] | [
56,
319,
437
] | [
8,
12,
12
] | [
22,
26,
28
] | [
"",
"",
""
] | [
"Found identical comparison using is. Ensure this is what you intended.",
"Found identical comparison using is. Ensure this is what you intended.",
"Found identical comparison using is. Ensure this is what you intended."
] | [
7.5,
7.5,
7.5
] | [
"",
"",
""
] | [
"",
"",
""
] | fetchers.py | /openid/fetchers.py | necaris/python3-openid | Apache-2.0 |
End of preview. Expand in Data Studio
README.md exists but content is empty.
- Downloads last month
- -