hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e070366174e8f2cc8f06d7ef450b79dee8cdc990
| 217
|
py
|
Python
|
Examples/AppKit/TableModel/setup.py
|
Khan/pyobjc-framework-Cocoa
|
f8b015ea2a72d8d78be6084fb12925c4785b8f1f
|
[
"MIT"
] | 132
|
2015-01-01T10:02:42.000Z
|
2022-03-09T12:51:01.000Z
|
mac/pyobjc-framework-Cocoa/Examples/AppKit/TableModel/setup.py
|
mba811/music-player
|
7998986b34cfda2244ef622adefb839331b81a81
|
[
"BSD-2-Clause"
] | 6
|
2015-01-06T08:23:19.000Z
|
2019-03-14T12:22:06.000Z
|
mac/pyobjc-framework-Cocoa/Examples/AppKit/TableModel/setup.py
|
mba811/music-player
|
7998986b34cfda2244ef622adefb839331b81a81
|
[
"BSD-2-Clause"
] | 27
|
2015-02-23T11:51:43.000Z
|
2022-03-07T02:34:18.000Z
|
"""
Script for building the example.
Usage:
python setup.py py2app
"""
from distutils.core import setup
import py2app
setup(
name='TableModel',
app=["TableModel.py"],
data_files=["English.lproj"],
)
| 14.466667
| 33
| 0.677419
|
9007d4b83d3accd47d787ad1b51b103a0bcf6ca6
| 2,628
|
py
|
Python
|
locations/spiders/rei.py
|
mfjackson/alltheplaces
|
37c90b4041c80a574e6e4c2f886883e97df4b636
|
[
"MIT"
] | null | null | null |
locations/spiders/rei.py
|
mfjackson/alltheplaces
|
37c90b4041c80a574e6e4c2f886883e97df4b636
|
[
"MIT"
] | null | null | null |
locations/spiders/rei.py
|
mfjackson/alltheplaces
|
37c90b4041c80a574e6e4c2f886883e97df4b636
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import scrapy
import json
import re
from locations.items import GeojsonPointItem
DAY_MAPPING = {
"Mon": "Mo",
"Tue": "Tu",
"Wed": "We",
"Thu": "Th",
"Fri": "Fr",
"Sat": "Sa",
"Sun": "Su",
}
class ReiSpider(scrapy.Spider):
name = "rei"
allowed_domains = ["www.rei.com"]
start_urls = ("https://www.rei.com/stores/map",)
# Fix formatting for ["Mon - Fri 10:00-1800","Sat 12:00-18:00"]
def format_days(self, range):
pattern = r"^(.{3})( - (.{3}) | )(\d.*)"
start_day, seperator, end_day, time_range = re.search(
pattern, range.strip()
).groups()
result = DAY_MAPPING[start_day]
if end_day:
result += "-" + DAY_MAPPING[end_day]
result += " " + time_range
return result
def fix_opening_hours(self, opening_hours):
return ";".join(map(self.format_days, opening_hours))
def parse_store(self, response):
json_string = response.xpath(
'//script[@id="store-schema"]/text()'
).extract_first()
store_dict = json.loads(json_string)
# The above dict has more clearly laid-out info, but doesn't include storeId or country, which is found here:
store_id_js_text = str(
response.xpath('//script[@id="modelData"]/text()').extract_first()
)
store_data_model = json.loads(store_id_js_text)["pageData"]["storeDataModel"]
properties = {
"lat": store_dict["geo"].get("latitude"),
"lon": store_dict["geo"].get("longitude"),
"addr_full": store_dict["address"].get("streetAddress"),
"city": store_dict["address"].get("addressLocality"),
"state": store_dict["address"].get("addressRegion"),
"postcode": store_dict["address"].get("postalCode"),
"country": store_data_model.get("countryCode"),
"opening_hours": self.fix_opening_hours(store_dict["openingHours"]),
"phone": store_dict.get("telephone"),
"website": store_dict.get("url"),
"ref": store_data_model.get("storeNumber"),
}
yield GeojsonPointItem(**properties)
def parse(self, response):
urls = set(
response.xpath(
'//a[contains(@href,"stores") and contains(@href,".html")]/@href'
).extract()
)
for path in urls:
if path == "/stores/bikeshop.html":
continue
yield scrapy.Request(
response.urljoin(path),
callback=self.parse_store,
)
| 32.85
| 117
| 0.561263
|
93c8304379086921cf5a5e8b660775b2a5b83a35
| 741
|
py
|
Python
|
producthunt/products/models.py
|
muhammad-mamdouh/Django_Projects
|
1f31e12aefb36b33474256db40a2c551882f445e
|
[
"MIT"
] | null | null | null |
producthunt/products/models.py
|
muhammad-mamdouh/Django_Projects
|
1f31e12aefb36b33474256db40a2c551882f445e
|
[
"MIT"
] | 40
|
2020-06-05T22:10:58.000Z
|
2022-03-11T23:56:09.000Z
|
producthunt/products/models.py
|
muhammad-mamdouh/Django_Projects
|
1f31e12aefb36b33474256db40a2c551882f445e
|
[
"MIT"
] | 1
|
2021-03-31T10:30:03.000Z
|
2021-03-31T10:30:03.000Z
|
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
class Product(models.Model):
title = models.CharField(max_length=100)
date_published = models.DateTimeField(default=timezone.now)
total_votes = models.IntegerField(default=1)
image = models.ImageField(upload_to='images/')
icon = models.ImageField(upload_to='icons/')
body = models.TextField()
url = models.TextField()
hunter = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return f'{self.title}'
def date_published_formatted(self):
return f'{self.date_published.strftime("%b %-d, %Y")}'
def summary(self):
return f'{self.body[:150]} ...'
| 30.875
| 63
| 0.699055
|
bb03ee3dce8342946e180b66a15252703ec86e51
| 11,356
|
py
|
Python
|
linter.py
|
MicroConsult/CMSIS_5
|
48b8de320d841446f400f94ed6fc7578a3d1525f
|
[
"Apache-2.0"
] | 2
|
2019-06-28T01:52:10.000Z
|
2021-08-09T05:28:04.000Z
|
linter.py
|
MicroConsult/CMSIS_5
|
48b8de320d841446f400f94ed6fc7578a3d1525f
|
[
"Apache-2.0"
] | null | null | null |
linter.py
|
MicroConsult/CMSIS_5
|
48b8de320d841446f400f94ed6fc7578a3d1525f
|
[
"Apache-2.0"
] | 4
|
2020-08-04T13:48:44.000Z
|
2022-02-25T08:29:40.000Z
|
# -*- coding: utf-8 -*-
import logging
import lxml
import os
import os.path
import re
import requests
from AdvancedHTMLParser import AdvancedHTMLParser
from glob import iglob
from urllib.parse import urlparse
from cmsis.PackLint import PackLinter, VersionParser
from cmsis.Pack import Pack, Api, SemanticVersion
def create():
return CmsisPackLinter()
class CmsisPackVersionParser(VersionParser):
def __init__(self, logger = None):
super().__init__(logger)
self._packs = {}
def _file_version_(self, file):
v = self._regex_(file, ".*@version\s+([vV])?([0-9]+[.][0-9]+([.][0-9]+)?).*", 2)
if not v:
v = self._regex_(file, ".*\$Revision:\s+([vV])?([0-9]+[.][0-9]+([.][0-9]+)?).*", 2)
return v
def _cmtable_(self, file, skip = 0):
table = ""
dump = False
with open(file, 'r') as f:
for l in f:
if not dump and l.strip() == "<table class=\"cmtable\" summary=\"Revision History\">":
if skip > 0:
skip -= 1
else:
dump = True
if dump:
table += l.replace("<br>", "\\n").replace("\\<", "<").replace("\\>", ">")
if l.strip() == "</table>":
break
if table:
table = lxml.etree.fromstring(table)
return table
return None
def _revhistory_(self, file, skip = 0):
table = self._cmtable_(file, skip)
if table is not None:
m = re.match("[Vv]?(\d+.\d+(.\d+)?)", table[1][0].text)
if m:
return SemanticVersion(m.group(1))
else:
self._logger.info("Revision History table not found in "+file)
return None
def readme_md(self, file):
"""Get the latest release version from README.md"""
return self._regex_(file, ".*repository contains the CMSIS Version ([0-9]+[.][0-9]+([.][0-9]+)?).*")
def _dxy(self, file):
"""Get the PROJECT_NUMBER from a Doxygen configuration file."""
return self._regex_(file, "PROJECT_NUMBER\s*=\s*\"(Version\s+)?(\d+.\d+(.\d+)?)\"", 2)
def _pdsc(self, file, component = None):
pack = None
if not file in self._packs:
pack = Pack(file, None)
self._packs[file] = pack
else:
pack = self._packs[file]
if component:
history = pack.history()
for r in sorted(history.keys(), reverse=True):
m = re.search(re.escape(component)+"(:)?\s+[Vv]?(\d+.\d+(.\d+)?)", history[r], re.MULTILINE)
if m:
return SemanticVersion(m.group(2))
else:
return pack.version()
def _h(self, file):
return self._file_version_(file)
def _c(self, file):
return self._file_version_(file)
def _s(self, file):
return self._file_version_(file)
def overview_txt(self, file, skip = 0):
return self._revhistory_(file, skip)
def introduction_txt(self, file, component = None):
table = self._cmtable_(file)
if table is None:
return None
if component:
m = re.search(re.escape(component)+"\s+[Vv]?(\d+.\d+(.\d+)?)", table[1][1].text, re.MULTILINE)
if m:
return SemanticVersion(m.group(1))
else:
return SemanticVersion(table[1][0].text)
def dap_txt(self, file, skip = 0):
return self._revhistory_(file, skip)
def general_txt(self, file, skip = 0):
return self._revhistory_(file, skip)
def history_txt(self, file, skip = 0):
return self._revhistory_(file, skip)
def _all_(self, file):
"""Get the version or revision tag from an arbitrary file."""
version = self._regex_(file, ".*@version\s+([vV])?([0-9]+[.][0-9]+([.][0-9]+)?).*", 2)
if not version:
version = self._regex_(file, ".*\$Revision:\s+([vV])?([0-9]+[.][0-9]+([.][0-9]+)?).*", 2)
return version
class CmsisPackLinter(PackLinter):
def __init__(self, pdsc = "ARM.CMSIS.pdsc"):
super().__init__(pdsc)
self._versionParser = CmsisPackVersionParser(self._logger)
def pack_version(self):
return self._pack.version()
def cmsis_corem_component(self):
rte = { 'components' : set(), 'Dcore' : "Cortex-M3", 'Dvendor' : "", 'Dname' : "", 'Dtz' : "", 'Tcompiler' : "", 'Toptions' : "" }
comp = sorted(self._pack.component_by_name(rte, "CMSIS.CORE"), reverse=True)[0]
return SemanticVersion(comp.version())
def cmsis_corea_component(self):
rte = { 'components' : set(), 'Dcore' : "Cortex-A9", 'Dvendor' : "", 'Dname' : "", 'Dtz' : "", 'Tcompiler' : "", 'Toptions' : "" }
comp = sorted(self._pack.component_by_name(rte, "CMSIS.CORE"), reverse=True)[0]
return SemanticVersion(comp.version())
def cmsis_rtos2_api(self):
rte = { 'components' : set(), 'Dcore' : "", 'Dvendor' : "", 'Dname' : "", 'Dtz' : "", 'Tcompiler' : "", 'Toptions' : "" }
comp = sorted(self._pack.component_by_name(rte, "CMSIS.RTOS2"), reverse=True)[0]
return SemanticVersion(comp.version())
def cmsis_rtx5_component(self):
cs = self._pack.components_by_name("CMSIS.RTOS2.Keil RTX5*")
cvs = { (SemanticVersion(c.version()), SemanticVersion(c.apiversion())) for c in cs }
if len(cvs) == 1:
return cvs.pop()
elif len(cvs) > 1:
self.warning("Not all RTX5 components have same version information: %s", str([ (c.name(), c.version(), c.apiversion()) for c in cs ]))
return None, None
def check_general(self):
"""CMSIS version"""
v = self.pack_version()
self.verify_version("README.md", v)
self.verify_version("CMSIS/DoxyGen/General/general.dxy", v)
self.verify_version("CMSIS/DoxyGen/General/src/introduction.txt", v)
def check_corem(self):
"""CMSIS-Core(M) version"""
v = self.cmsis_corem_component()
self.verify_version("CMSIS/DoxyGen/Core/core.dxy", v)
self.verify_version("CMSIS/DoxyGen/Core/src/Overview.txt", v)
self.verify_version("CMSIS/DoxyGen/General/src/introduction.txt", v, component="CMSIS-Core (Cortex-M)")
self.verify_version(self._pack.location(), v, component="CMSIS-Core(M)")
def check_corea(self):
"""CMSIS-Core(A) version"""
v = self.cmsis_corea_component()
self.verify_version("CMSIS/DoxyGen/Core_A/core_A.dxy", v)
self.verify_version("CMSIS/DoxyGen/Core_A/src/Overview.txt", v)
self.verify_version("CMSIS/DoxyGen/General/src/introduction.txt", v, component="CMSIS-Core (Cortex-A)")
self.verify_version(self._pack.location(), v, component="CMSIS-Core(A)")
def check_dap(self):
"""CMSIS-DAP version"""
v = self._versionParser.get_version("CMSIS/DoxyGen/DAP/dap.dxy")
self.verify_version("CMSIS/DoxyGen/DAP/src/dap.txt", v)
self.verify_version("CMSIS/DoxyGen/General/src/introduction.txt", v, component="CMSIS-DAP")
self.verify_version(self._pack.location(), v, component="CMSIS-DAP")
def check_driver(self):
"""CMSIS-Driver version"""
v = self._versionParser.get_version("CMSIS/DoxyGen/Driver/Driver.dxy")
self.verify_version("CMSIS/DoxyGen/Driver/src/General.txt", v)
self.verify_version("CMSIS/DoxyGen/General/src/introduction.txt", v, component="CMSIS-Driver")
self.verify_version(self._pack.location(), v, component="CMSIS-Driver")
def check_dsp(self):
"""CMSIS-DSP version"""
v = self._versionParser.get_version("CMSIS/DoxyGen/DSP/dsp.dxy")
self.verify_version("CMSIS/DoxyGen/DSP/src/history.txt", v)
self.verify_version("CMSIS/DoxyGen/General/src/introduction.txt", v, component="CMSIS-DSP")
self.verify_version(self._pack.location(), v, component="CMSIS-DSP")
def check_nn(self):
"""CMSIS-NN version"""
v = self._versionParser.get_version("CMSIS/DoxyGen/NN/nn.dxy")
self.verify_version("CMSIS/DoxyGen/NN/src/history.txt", v)
self.verify_version("CMSIS/DoxyGen/General/src/introduction.txt", v, component="CMSIS-NN")
self.verify_version(self._pack.location(), v, component="CMSIS-NN")
def check_pack(self):
"""CMSIS-Pack version"""
v = self._versionParser.get_version("CMSIS/DoxyGen/Pack/Pack.dxy")
self.verify_version("CMSIS/DoxyGen/Pack/src/General.txt", v)
self.verify_version("CMSIS/DoxyGen/General/src/introduction.txt", v, component="CMSIS-Pack")
self.verify_version(self._pack.location(), v, component="CMSIS-Pack")
def check_rtos2(self):
"""CMSIS-RTOS2 version"""
api = self.cmsis_rtos2_api()
v, a = self.cmsis_rtx5_component()
self.verify_version("CMSIS/DoxyGen/RTOS2/rtos.dxy", api)
self.verify_version("CMSIS/DoxyGen/RTOS2/src/history.txt", api, skip=0)
self.verify_version("CMSIS/DoxyGen/General/src/introduction.txt", api, component="CMSIS-RTOS")
# self.verify_version(self._pack.location(), v, component="CMSIS-RTOS2")
if a and not api.match(a):
self.warning("RTX5 API version (%s) does not match RTOS2 API version (%s)!", a, api)
self.verify_version("CMSIS/DoxyGen/RTOS2/src/history.txt", v, skip=1)
def check_files(self):
"""Files referenced by pack description"""
# Check schema of pack description
self.verify_schema(self._pack.location(), "CMSIS/Utilities/PACK.xsd")
# Check schema of SVD files
svdfiles = { d.svdfile() for d in self._pack.devices() if d.svdfile() }
for svd in svdfiles:
if os.path.exists(svd):
self.verify_schema(svd, "CMSIS/Utilities/CMSIS-SVD.xsd")
else:
self.warning("SVD File does not exist: %s!", svd)
# Check component file version
for c in self._pack.components():
cv = c.version()
for f in c.files():
hv = f.version()
if c is Api:
if f.isHeader():
if not hv:
self.verify_version(f.location(), cv)
if hv:
self.verify_version(f.location(), SemanticVersion(hv))
def check_doc(self, pattern="./CMSIS/Documentation/**/*.html"):
"""Documentation"""
self.debug("Using pattern '%s'", pattern)
for html in iglob(pattern, recursive=True):
self.info("%s: Checking links ...", html)
parser = AdvancedHTMLParser()
parser.parseFile(html)
links = parser.getElementsByTagName("a")
for l in links:
href = l.getAttribute("href")
if href:
href = urlparse(href)
if href.scheme in ["http", "https", "ftp", "ftps" ]:
try:
self.info("%s: Checking link to %s...", html, href.geturl())
r = requests.head(href.geturl(), headers={'user-agent' : "packlint/1.0"}, timeout=10)
r.raise_for_status()
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError) as e:
exc_info = None
if self.loglevel() == logging.DEBUG:
exc_info = e
self.warning("%s: Broken web-link to %s!", html, href.geturl(), exc_info=exc_info)
except requests.exceptions.Timeout as e:
exc_info = None
if self.loglevel() == logging.DEBUG:
exc_info = e
self.warning("%s: Timeout following web-link to %s.", html, href.geturl(), exc_info=exc_info)
elif href.scheme == "javascript":
pass
elif not os.path.isabs(href.path):
target = os.path.normpath(os.path.join(os.path.dirname(html), href.path))
if not os.path.exists(target):
self.warning("%s: Broken relative-link to %s!", html, href.path)
else:
self.warning("%s: Broken relative-link to %s!", html, href.path)
def check_schema(self):
"""XML Schema"""
pass
| 39.158621
| 141
| 0.629623
|
cbe2ce8810ad0077ee9c25ded7807c9fe3c9b4e9
| 2,051
|
py
|
Python
|
project/boundingbox.py
|
arifBurakDemiray/computer-graphics
|
acf4781f92e325b12d986974c448b0e3520af431
|
[
"MIT"
] | null | null | null |
project/boundingbox.py
|
arifBurakDemiray/computer-graphics
|
acf4781f92e325b12d986974c448b0e3520af431
|
[
"MIT"
] | null | null | null |
project/boundingbox.py
|
arifBurakDemiray/computer-graphics
|
acf4781f92e325b12d986974c448b0e3520af431
|
[
"MIT"
] | null | null | null |
# CENG 487 Assignment6 by
# Arif Burak Demiray
# December 2021
from .vector import *
from .matrix import *
class BoundingBox:
def __init__(self):
self.min = Point3f(1000, 1000, 1000)
self.max = Point3f(-1000, -1000, -1000)
def volume(self):
diagonal = self.max - self.min
return sqrlen(diagonal)
def center(self):
return 0.5 * (self.min + self.max)
def expand(self, point):
if (point.x < self.min.x):
self.min.x = point.x
if (point.y < self.min.y):
self.min.y = point.y
if (point.z < self.min.z):
self.min.z = point.z
if (point.x > self.max.x):
self.max.x = point.x
if (point.y > self.max.y):
self.max.y = point.y
if (point.z > self.max.z):
self.max.z = point.z
def union(self, other):
if (other.min.x < self.min.x):
self.min.x = other.min.x
if (other.min.y < self.min.y):
self.min.y = other.min.y
if (other.min.z < self.min.z):
self.min.z = other.min.z
if (other.max.x > self.max.x):
self.max.x = other.max.x
if (other.max.y > self.max.y):
self.max.y = other.max.y
if (other.max.z > self.max.z):
self.max.z = other.max.z
def contains(self, point):
return self.min.x < point.x and point.x < self.max.x and \
self.min.y < point.y and point.y < self.max.y and \
self.min.z < point.z and point.z < self.max.z
def encloses(self, other):
return other.min.x >= self.min.x and other.min.y >= self.min.y and other.min.z >= self.min.z and \
other.max.x <= self.max.x and other.max.y <= self.max.y and other.max.z <= self.max.z
def overlaps(self, other):
return not (self.max.x <= other.min.x or other.max.x <= self.min.x or
self.max.y <= other.min.y or other.max.y <= self.min.y or
self.max.z <= other.min.z or other.max.z <= self.min.z)
| 33.080645
| 106
| 0.532911
|
fad47491cdc6bc31ef234b2d40b259ca925f6318
| 15,288
|
py
|
Python
|
src/noise.py
|
richtertill/noisy_machine_learning
|
731966b1f4d1d392a9fe6dee692e254488f4c569
|
[
"MIT"
] | 2
|
2022-01-10T03:54:00.000Z
|
2022-03-11T23:19:31.000Z
|
src/noise.py
|
richtertill/noisy_machine_learning
|
731966b1f4d1d392a9fe6dee692e254488f4c569
|
[
"MIT"
] | null | null | null |
src/noise.py
|
richtertill/noisy_machine_learning
|
731966b1f4d1d392a9fe6dee692e254488f4c569
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
import os
import sys
import torch
from torch.distributions.poisson import Poisson
import numpy as np
from scipy import stats
def add_data_noise(data_noise, tensor, prob, mean=0, std=1):
"""
adds noise to the image
:param std:
:param mean:
:param prob: for salt_pepper loss, default 5% of pixels are set to black or white
:param data_noise: str, type of noise to be added
'gauss' Gaussian-distributed additive noise
'salt_pepper' Replaces random pixels with 0 or 1
'poisson' Posson-distributed noise generated from the data
'speckle' Multiplicative noise
:param tensor: torch tensor, image
:return: torch tensor, image with added noise
"""
# we need torch tensor, dataloader might return numpy array
if not torch.is_tensor(tensor):
tensor = torch.from_numpy(tensor)
if data_noise == 'no':
return tensor
if data_noise == 'gauss':
"""
1. L2-norm of the signal/image using functions like
numpy.linalg.norm —> Signal Power P_s (Scalar)
2. L2-norm of the noise using the same function above
—> Noise Power P_n (Scalar)
3. Compute the SNR = P_s / P_n —> Scalar
4. Noisy signal = Signal + \alpha * SNR * Noise.
—> signal or image, and \alpha controls how much noise you would like to add.
For example, when you set \alpha > 1 then you amplify the noise (dominant),
however, when you set \alpha < 1 (the signal will be dominant).
Suggested start with 0.1 (10%).
"""
gaussian = torch.randn(size=tensor.shape)
if tensor.dim() == 2: # shape batch x dim x dim
# get norm for each image
signal_norm = np.linalg.norm(tensor, axis=(-1, -2))
noise_norm = np.linalg.norm(gaussian, axis=(-1, -2))
elif tensor.dim() == 3 and tensor.shape[-1] == 3: # cifar10 case
signal_norm = np.linalg.norm(tensor, axis=(0, 1))
noise_norm = np.linalg.norm(gaussian, axis=(0, 1))
elif tensor.dim() == 3 and tensor.shape[0] == 3: # cifar10 new transforms
signal_norm = np.linalg.norm(tensor, axis=(1,2))
noise_norm = np.linalg.norm(gaussian, axis=(1,2))
signal_norm = np.mean(signal_norm, axis=-1)
noise_norm = np.mean(noise_norm, axis=-1)
elif tensor.dim() == 4: # shape batch x channels x dim x dim
signal_norm = np.linalg.norm(tensor, axis=(-1, -2))
signal_norm = np.mean(signal_norm, axis=-1)
noise_norm = np.linalg.norm(gaussian, axis=(-1, -2))
noise_norm = np.mean(noise_norm, axis=-1)
snr = (signal_norm / noise_norm) # -> shape batch
if type(snr) != torch.tensor:
snr = torch.tensor(snr)
out = tensor + prob * snr * gaussian
return tensor + prob * snr * gaussian
if data_noise == 'salt_pepper':
rnd = torch.FloatTensor(tensor.size()).uniform_(0, 1)
noisy = tensor
noisy[rnd < prob / 2] = 0.
noisy[rnd > 1 - prob / 2] = 1.
return noisy
if data_noise == 'poisson':
np_tensor = tensor.numpy()
val = len(np.unique(np_tensor))
val = 2 ** np.ceil(np.log2(val))
np_noisy = np.random.poisson(np_tensor * val) / float(val)
return torch.from_numpy(np_noisy)
if data_noise == 'speckle':
if tensor.dim() == 4:
gauss = torch.randn(tensor.shape[0], tensor.shape[1], tensor.shape[2], tensor.shape[3])
return torch.Tensor(tensor + tensor * gauss)
elif tensor.dim() == 3:
gauss = torch.randn(tensor.shape[0], tensor.shape[1], tensor.shape[2])
return torch.Tensor(tensor + tensor * gauss)
elif tensor.dim() == 2:
gauss = torch.randn(tensor.shape[0], tensor.shape[1])
return torch.Tensor(tensor + tensor * gauss)
if data_noise == 'open_set':
'''
Open set noise as in Yu et al (2019)
Replace prob % of images with images of another dataset
'''
print('Open Set noise from Yu et al (2019) is not implemented yet')
exit()
return tensor
def add_label_noise(label_noise, tensor, labels, prob, classes, indices):
"""
adds noise to the labels, currently only for MNIST
:param labels: array, all possible labels
:param label_noise: str, indicates type of noise to be added
:param tensor: torch tensor, true labels from the dataset
:param prob: percentage of labels to be corrupted
:param classes: index of classes that should be corrupted
:return: label, torch tensor of labels with noise
"""
if label_noise == 'no':
return tensor
if label_noise == 'deterministic':
'''
Switch labels of first prob%
'''
if type(tensor) != torch.Tensor:
tensor = torch.Tensor(tensor)
ind_a_corrupted = indices[0]
ind_b_corrupted = indices[1]
tensor[ind_a_corrupted] = classes[1]
tensor[ind_b_corrupted] = classes[0]
return tensor
if label_noise == 'vanilla':
'''
Switch labels of prob % of label a and b
'''
if type(tensor) != torch.Tensor:
tensor = torch.Tensor(tensor)
tensor = tensor.long()
class_a = classes[0] # class that should be corrupted with class b, int
class_b = classes[1] # class that should be corrupted with class a, int
if tensor.dim() == 2:
ind_a = tensor[:, class_a].nonzero()
ind_b = tensor[:, class_b].nonzero()
count_a = ind_a.shape[0]
count_b = ind_b.shape[0]
min_total = min(count_a, count_b) # get the min of both to have enough true labels left, int
n_corrupted = int(prob * min_total) # number of labels to be corrupted, int
indices_a = np.random.choice(ind_a.squeeze(1), size=n_corrupted) # get n% of ind_a
indices_b = np.random.choice(ind_b.squeeze(1), size=n_corrupted) # get n% of ind_b
tensor[indices_a, class_a] = 0
tensor[indices_b, class_b] = 0
tensor[indices_a, class_b] = 1
tensor[indices_b, class_a] = 1
else:
ind_a = (tensor == class_a).nonzero() # ind of occurences for a, tensor with [row1, col1], ..., tensor Nx1
ind_b = (tensor == class_b).nonzero() # ind of occurences for b, tensor with [row1, col1], ..., tensor Nx1
count_a = (tensor == class_a).sum().item() # total number of class a, int
count_b = (tensor == class_b).sum().item() # total number of class b, int
min_total = min(count_a, count_b) # get the min of both to have enough true labels left, int
n_corrupted = int(prob * min_total) # number of labels to be corrupted, int
indices_a = np.random.choice(ind_a.squeeze(1), size=n_corrupted) # get n% of ind_a
indices_b = np.random.choice(ind_b.squeeze(1), size=n_corrupted) # get n% of ind_b
tensor[indices_a] = class_b
tensor[indices_b] = class_a
return tensor
"""
Possible label noise, doesn't yield much information gain
if label_noise == 'symmetric_incl':
'''
Switch labels of prob % of all classes, including switching to original number
'''
tensor = tensor.long()
# get n indices that should be corrupted
indices = torch.randperm(tensor.shape[0])[:int(prob * tensor.size(0))]
# workaround to be generalized with all types of labels
length = labels.shape[0]
ind = torch.arange(0, length)
# new_label = np.random.choice(ind, size=int(prob * tensor.size(0)), replace=False)
new_label = torch.randperm(ind.shape[0])[:int(prob * tensor.size(0))].type(torch.FloatTensor)
tensor[indices] = new_label
return tensor
"""
if label_noise == 'symmetric':
'''
Switch labels of prob % of class a, excluding switching to original number
'''
if type(tensor) != torch.Tensor:
tensor = torch.Tensor(tensor)
class_a = classes[0] # class that should be corrupted with class b, int
tensor = tensor.long()
if tensor.dim() == 2: # multi label case
ind_a = (tensor[:,
class_a] == 1).nonzero() # ind of occurences for a, tensor with [row1, col1], ..., tensor Nx1
count_a = ind_a.shape[0]
n_corrupted = int(prob * count_a) # number of labels to be corrupted,
indices_a = np.random.choice(ind_a.squeeze(1), size=n_corrupted)
new_label = np.random.choice(np.setdiff1d(range(0, tensor.shape[-1]), int(class_a)),
size=n_corrupted).astype(np.int)
for i in range(new_label.shape[0]):
tensor[indices_a[i], new_label[i]] = 1
tensor[indices_a[i], class_a] = 0
else:
ind_a = (tensor == class_a).nonzero() # ind of occurences for a, tensor with [row1, col1], ..., tensor Nx1
count_a = (tensor == class_a).sum().item() # total number of class a, int
n_corrupted = int(prob * count_a) # number of labels to be corrupted,
indices_a = np.random.choice(ind_a.squeeze(1), size=n_corrupted) # get n% of ind a
length = labels.shape[0]
ind = torch.arange(0, length)
new_label = np.empty(indices_a.shape[0])
for i in range(indices_a.shape[0]):
new_label[i] = np.random.choice(np.setdiff1d(range(0, length), int(class_a))).astype(np.int)
tensor[indices_a] = torch.from_numpy(new_label).type(torch.LongTensor)
return tensor
if label_noise == 'symmetrc_multi': # symmetric for multi-label case, eg CheXPert
if type(tensor) != torch.Tensor:
tensor = torch.Tensor(tensor)
class_a = classes[0] # class that should be corrupted with class b, int
tensor = tensor.long()
if tensor.dim() < 2:
print('This label noise is only designed for multi-label scenario, you have single label')
else:
ind_a = (tensor[:,
class_a] == 1).nonzero() # ind of occurences for a, tensor with [row1, col1], ..., tensor Nx1
count_a = ind_a.shape[0]
n_corrupted = int(prob * count_a) # number of labels to be corrupted,
indices_a = np.random.choice(ind_a.squeeze(1), size=n_corrupted)
new_label = np.random.choice(np.setdiff1d(range(0, tensor.shape[-1]), int(class_a)),
size=n_corrupted).astype(np.int)
for i in range(new_label.shape[0]):
tensor[indices_a[i], new_label[i]] = 1
return tensor
if label_noise == 'assymetric_single':
'''
Switch labels of prob % or class i to the neighbouring class
From Want et al 2018a
Change class i to class i+1
'''
if type(tensor) != torch.Tensor:
tensor = torch.Tensor(tensor)
class_a = classes[0] # class that should be corrupted with class b, int
tensor = tensor.long()
if class_a == len(labels) - 1:
new_class = 0
else:
new_class = class_a + 1
if tensor.dim() == 2: # multi label case
ind_a = (tensor[:,
class_a] == 1).nonzero() # ind of occurences for a, tensor with [row1, col1], ..., tensor Nx1
count_a = ind_a.shape[0]
n_corrupted = int(prob * count_a) # number of labels to be corrupted,
indices_a = np.random.choice(ind_a.squeeze(1), size=n_corrupted)
indices = torch.randperm(ind_a.shape[0])[:int(prob * ind_a.size(0))]
tensor_check = tensor
for i in range(indices.shape[0]):
tensor[indices[i], class_a] = 0
tensor[indices[i], class_a + 1] = 1
else:
ind_a = (tensor == class_a).nonzero() # ind of occurences for a, tensor with [row1, col1], ..., tensor Nx1
# get n indices that should be corrupted
n_corrupted = int(prob * ind_a.shape[0]) # how many labels should be corrupted
indices_a = np.random.choice(ind_a.squeeze(1), size=n_corrupted) # get n% of ind_a
indices = torch.randperm(ind_a.shape[0])[:int(prob * ind_a.size(0))]
tensor[indices] = new_class
tensor.type(torch.FloatTensor)
count_a = (tensor == class_a).sum().item() # total number of class a, int
n_corrupted = int(prob * count_a) # number of labels to be corrupted,
indices_a = np.random.choice(ind_a.squeeze(1), size=n_corrupted) # get n% of ind a
length = labels.shape[0]
ind = torch.arange(0, length)
new_label = np.empty(indices_a.shape[0])
for i in range(indices_a.shape[0]):
new_label[i] = np.random.choice(np.setdiff1d(range(0, length), int(class_a))).astype(np.int)
tensor[indices_a] = torch.from_numpy(new_label).type(torch.LongTensor)
return tensor
if label_noise == 'assymetric':
'''
Switch labels of prob % of all classes to specific incorrect class
From Wang et al 2018a
Change class i to class i+1
'''
if type(tensor) != torch.Tensor:
tensor = torch.Tensor(tensor)
tensor = tensor.long()
indices = torch.randperm(tensor.shape[0])[:int(prob * tensor.size(0))] # indices to be permuted
if tensor.dim() == 2: # multi label case
for image in range(indices.shape[0]):
labels = (tensor[image, :] == 1).nonzero() # get true labels, ie where the label is 1 for the image
# 1st set all old labels to 0
for j in range(labels.shape[0]):
tensor[image, labels[j]] = 0
# 2nd set all new labels to 1
for j in range(labels.shape[0]):
if labels[j] == tensor.shape[-1]:
tensor[image, 0] = 1
else:
tensor[image, labels[j]] = 1
else:
length = labels.shape[0]
new_label = np.empty(indices.shape)
for i in range(len(indices)):
if tensor[i] == length - 1: # old label = 9 -> new label = 0
new_label[i] = 0
else:
new_label[i] = tensor[i] + 1
tensor[indices] = torch.from_numpy(new_label).type(torch.LongTensor)
tensor.type(torch.FloatTensor)
return tensor
if label_noise == 'semantic':
'''
Switch labels from false negative samples to the erroneously classified label
'''
return tensor
if label_noise == 'adversarial':
'''
Add noise on labels such that the decision boundary is yet not crossed
'''
print('Adversarial Label Noise is a future research direction!')
return tensor
| 43.68
| 119
| 0.579082
|
f2fd64d98990d4790e0d8888e3863f7c40f919ae
| 839
|
py
|
Python
|
setup.py
|
davegallant/ickinesis
|
eca715a8f5c3364dd9249adb59a9ec8bddbb7a5c
|
[
"Apache-2.0"
] | 1
|
2019-10-03T13:43:14.000Z
|
2019-10-03T13:43:14.000Z
|
setup.py
|
davegallant/kinesis
|
eca715a8f5c3364dd9249adb59a9ec8bddbb7a5c
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
davegallant/kinesis
|
eca715a8f5c3364dd9249adb59a9ec8bddbb7a5c
|
[
"Apache-2.0"
] | null | null | null |
import io # for python2
from os import path
from setuptools import setup, find_packages
from kinesis.__version__ import VERSION
WORKING_DIR = path.abspath(path.dirname(__file__))
# Get long description from README.md
with io.open(path.join(WORKING_DIR, "README.md"), encoding="utf-8") as f:
LONG_DESCRIPTION = f.read()
setup(
author="Dave Gallant",
description="a kinesis consumer / producer",
entry_points={"console_scripts": ["kinesis=kinesis.cli:main"]},
install_requires=["boto3>=1.5.36", "pygments>=2.2.0"],
keywords=["aws", "kinesis", "pygments"],
license="Apache License, Version 2.0",
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
name="kinesis",
packages=find_packages(),
url="https://github.com/davegallant/kinesis",
version=VERSION,
)
| 31.074074
| 73
| 0.716329
|
f67df278a7381ed9533362eb686fec9fdbfc9dc9
| 892
|
py
|
Python
|
reference/generated/numpy-fft-fftn-1.py
|
bonn0062/devdocs
|
221a9b4a14cc2ba22fb656960d1b55e4ed46e7c7
|
[
"BSD-3-Clause"
] | 1
|
2021-07-30T04:40:34.000Z
|
2021-07-30T04:40:34.000Z
|
reference/generated/numpy-fft-fftn-1.py
|
baby636/devdocs
|
406a0989ab7a31f11e0b0da3e50503c0ad6193cd
|
[
"BSD-3-Clause"
] | null | null | null |
reference/generated/numpy-fft-fftn-1.py
|
baby636/devdocs
|
406a0989ab7a31f11e0b0da3e50503c0ad6193cd
|
[
"BSD-3-Clause"
] | null | null | null |
a = np.mgrid[:3, :3, :3][0]
np.fft.fftn(a, axes=(1, 2))
# array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary
# [ 0.+0.j, 0.+0.j, 0.+0.j],
# [ 0.+0.j, 0.+0.j, 0.+0.j]],
# [[ 9.+0.j, 0.+0.j, 0.+0.j],
# [ 0.+0.j, 0.+0.j, 0.+0.j],
# [ 0.+0.j, 0.+0.j, 0.+0.j]],
# [[18.+0.j, 0.+0.j, 0.+0.j],
# [ 0.+0.j, 0.+0.j, 0.+0.j],
# [ 0.+0.j, 0.+0.j, 0.+0.j]]])
np.fft.fftn(a, (2, 2), axes=(0, 1))
# array([[[ 2.+0.j, 2.+0.j, 2.+0.j], # may vary
# [ 0.+0.j, 0.+0.j, 0.+0.j]],
# [[-2.+0.j, -2.+0.j, -2.+0.j],
# [ 0.+0.j, 0.+0.j, 0.+0.j]]])
import matplotlib.pyplot as plt
[X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
2 * np.pi * np.arange(200) / 34)
S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
FS = np.fft.fftn(S)
plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
# <matplotlib.image.AxesImage object at 0x...>
plt.show()
| 34.307692
| 60
| 0.433857
|
cc044459a7111425203a1b4cfb43eacfdd3cb805
| 84
|
py
|
Python
|
meiduo_mall/meiduo_mall/apps/ouath/constants.py
|
JianChengBai/Django
|
a81f71ef431df5e2b0cdb43af11366feb9bfd346
|
[
"MIT"
] | null | null | null |
meiduo_mall/meiduo_mall/apps/ouath/constants.py
|
JianChengBai/Django
|
a81f71ef431df5e2b0cdb43af11366feb9bfd346
|
[
"MIT"
] | null | null | null |
meiduo_mall/meiduo_mall/apps/ouath/constants.py
|
JianChengBai/Django
|
a81f71ef431df5e2b0cdb43af11366feb9bfd346
|
[
"MIT"
] | null | null | null |
SAVE_QQ_USER_TOKEN_EXPIRES = 300
VERIFY_EMAIL_TOKEN_EXPIRES = 2 * 24 * 60 * 60
| 10.5
| 45
| 0.738095
|
d77d8ab327963250509b90395ce2b6d79aea36b7
| 1,848
|
py
|
Python
|
qtim_tools/qtim_dce/tofts_worker.py
|
QTIM-Lab/qtim_tools
|
92bd15ec7a81c5eda70d11a015f74538f3c41e22
|
[
"Apache-2.0"
] | 12
|
2017-03-29T18:17:24.000Z
|
2020-03-19T05:28:56.000Z
|
qtim_tools/qtim_dce/tofts_worker.py
|
QTIM-Lab/qtim_tools
|
92bd15ec7a81c5eda70d11a015f74538f3c41e22
|
[
"Apache-2.0"
] | 7
|
2017-03-08T21:06:01.000Z
|
2017-06-21T19:01:58.000Z
|
qtim_tools/qtim_dce/tofts_worker.py
|
QTIM-Lab/qtim_tools
|
92bd15ec7a81c5eda70d11a015f74538f3c41e22
|
[
"Apache-2.0"
] | 5
|
2017-03-02T09:08:21.000Z
|
2019-10-26T05:37:39.000Z
|
import tofts_parameter_calculator
# from ..qtim_utilities import nifti_util
def test(filepath=[]):
tofts_parameter_calculator.test_method_3d(filepath)
def run_test():
# You must install the packages nibabel and pydicom before running this program.
filepath='C:/Users/abeers/Documents/GitHub/Public_QTIM/qtim_tools/qtim_tools/test_data/test_data_dce/tofts_v6.nii.gz'
label_file=[]
label_suffix=[]
label_value=1
label_mode='separate'
T1_map_file=[]
T1_map_suffix=[]
AIF_value_data=[]
AIF_value_suffix=[]
convert_AIF_values=False
default_population_AIF=False
AIF_label_file=[]
AIF_mode='label_average'
AIF_label_suffix='-AIF-label'
AIF_label_value=1
T1_tissue=1000
T1_blood=1440
relaxivity=.0045
TR=5
TE=2.1
scan_time_seconds=(11*60)
hematocrit=0.45
injection_start_time_seconds=60
flip_angle_degrees=30
initial_fitting_function_parameters=[.3,.1]
outputs=['ktrans','ve','auc']
outfile_prefix='tofts_v6_test_'
processes=2
mask_threshold=20
mask_value=-1
gaussian_blur=0
gaussian_blur_axis=-1
param_file=[]
tofts_parameter_calculator.calc_DCE_properties_single(filepath, T1_tissue, T1_blood, relaxivity, TR, TE, scan_time_seconds, hematocrit, injection_start_time_seconds, flip_angle_degrees, label_file, label_suffix, label_value, mask_value, mask_threshold, T1_map_file, T1_map_suffix, AIF_label_file, AIF_value_data, AIF_value_suffix, convert_AIF_values, AIF_mode, AIF_label_suffix, AIF_label_value, label_mode, param_file, default_population_AIF, initial_fitting_function_parameters, outputs, outfile_prefix, processes, gaussian_blur, gaussian_blur_axis)
if __name__ == '__main__':
print 'Entered program..'
run_test()
# test('C:/Users/azb22/Documents/Junk/dce_mc_st_corrected.nii')
# test()
# tofts_parameter_calculator.test_method_3d()
# tofts_parameter_calculator.test_method_2d()
| 28.875
| 553
| 0.808442
|
9347e06faba3b4f815e09631de18f83930a215f0
| 1,979
|
py
|
Python
|
2021/d8.py
|
shubham-goel/advent_of_code
|
a0df7d692f25489f980caacda9fe17e7ccc56503
|
[
"MIT"
] | 1
|
2021-12-10T21:08:56.000Z
|
2021-12-10T21:08:56.000Z
|
2021/d8.py
|
shubham-goel/advent_of_code
|
a0df7d692f25489f980caacda9fe17e7ccc56503
|
[
"MIT"
] | null | null | null |
2021/d8.py
|
shubham-goel/advent_of_code
|
a0df7d692f25489f980caacda9fe17e7ccc56503
|
[
"MIT"
] | null | null | null |
import numpy as np
file = '2021/inputs/d8.txt'
# Read the file
with open(file) as f:
lines = [line.strip() for line in f if line.strip()]
lines1 = [line.split(' | ') for line in lines]
inputs = [line[0].split(' ') for line in lines1]
outputs = [line[1].split(' ') for line in lines1]
for i,o in zip(inputs, outputs):
print(' '.join(i), '|', ' '.join(o))
outputs_sizes = np.array([[len(x) for x in line] for line in outputs])
print(outputs_sizes)
num_1 = np.sum(outputs_sizes == 2)
num_4 = np.sum(outputs_sizes == 4)
num_7 = np.sum(outputs_sizes == 3)
num_8 = np.sum(outputs_sizes == 7)
print(num_1, num_4, num_7, num_8)
print(num_1 + num_4 + num_7 + num_8)
## Part 2
output_numbers = []
for inp, out in zip(inputs, outputs):
input_sets = [frozenset(i) for i in inp]
input_sizes = np.array([len(i) for i in input_sets])
sets = [None] * 10
sets[1] = input_sets[np.where(input_sizes == 2)[0][0]]
sets[4] = input_sets[np.where(input_sizes == 4)[0][0]]
sets[7] = input_sets[np.where(input_sizes == 3)[0][0]]
sets[8] = input_sets[np.where(input_sizes == 7)[0][0]]
set_235_idx = np.where(input_sizes == 5)[0]
set_069_idx = np.where(input_sizes == 6)[0]
for i in set_235_idx:
if sets[1].issubset(input_sets[i]):
sets[3] = input_sets[i]
elif (sets[4]-sets[1]).issubset(input_sets[i]):
sets[5] = input_sets[i]
else:
sets[2] = input_sets[i]
sets[9] = sets[8] - (sets[2]-sets[3])
for i in set_069_idx:
if len(sets[1].union(input_sets[i]))==7:
sets[6] = input_sets[i]
elif len(sets[5].union(input_sets[i]))==7:
sets[0] = input_sets[i]
else:
assert input_sets[i]==sets[9]
mapping = {sets[i]:i for i in range(10)}
out_digits = [mapping[frozenset(o)] for o in out]
output = int("".join(map(str, out_digits)))
output_numbers.append(output)
print(output_numbers)
print(sum(output_numbers))
| 31.412698
| 70
| 0.608893
|
7ed17379598c9762f2776ccd4684fa78b9904249
| 770
|
py
|
Python
|
setup.py
|
ericjaychi/project-tigrex
|
6ed033c834e7bdc449ac832788619a184f4a702e
|
[
"MIT"
] | 16
|
2019-08-01T16:46:47.000Z
|
2021-08-19T23:35:33.000Z
|
setup.py
|
ericjaychi/project-tigrex
|
6ed033c834e7bdc449ac832788619a184f4a702e
|
[
"MIT"
] | 15
|
2019-08-05T21:12:51.000Z
|
2020-07-25T17:15:03.000Z
|
setup.py
|
ericjaychi/project-tigrex
|
6ed033c834e7bdc449ac832788619a184f4a702e
|
[
"MIT"
] | 2
|
2019-08-12T08:27:06.000Z
|
2020-07-24T00:57:43.000Z
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
requirements = ["requests<=2.21.0", "fire<=0.1.3"]
setuptools.setup(
name="tigrex",
version="1.3.0",
author="Eric Chi",
author_email="ericjaychi@gmail.com",
description="A Magic the Gathering CLI Tool",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
install_requires=requirements,
url="https://github.com/ericjaychi/tigrex",
classifiers=[
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={'console_scripts': ['tigrex = tigrex.__main__:main']}
)
| 29.615385
| 71
| 0.667532
|
d128de27c700567b15226e82a4acc5d5259ba26b
| 9,330
|
py
|
Python
|
storages/views.py
|
torjean/webvirtcloud
|
1f1fb708710e639116624c5c5acd0fbe0c41a20d
|
[
"Apache-2.0"
] | null | null | null |
storages/views.py
|
torjean/webvirtcloud
|
1f1fb708710e639116624c5c5acd0fbe0c41a20d
|
[
"Apache-2.0"
] | 4
|
2020-02-12T03:16:43.000Z
|
2021-06-10T22:08:23.000Z
|
storages/views.py
|
torjean/webvirtcloud
|
1f1fb708710e639116624c5c5acd0fbe0c41a20d
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect, HttpResponse
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from computes.models import Compute
from storages.forms import AddStgPool, AddImage, CloneImage
from vrtManager.storage import wvmStorage, wvmStorages
from libvirt import libvirtError
from django.contrib import messages
import json
@login_required
def storages(request, compute_id):
"""
:param request:
:return:
"""
if not request.user.is_superuser:
return HttpResponseRedirect(reverse('index'))
error_messages = []
compute = get_object_or_404(Compute, pk=compute_id)
try:
conn = wvmStorages(compute.hostname,
compute.login,
compute.password,
compute.type)
storages = conn.get_storages_info()
secrets = conn.get_secrets()
if request.method == 'POST':
if 'create' in request.POST:
form = AddStgPool(request.POST)
if form.is_valid():
data = form.cleaned_data
if data['name'] in storages:
msg = _("Pool name already use")
error_messages.append(msg)
if data['stg_type'] == 'rbd':
if not data['secret']:
msg = _("You need create secret for pool")
error_messages.append(msg)
if not data['ceph_pool'] and not data['ceph_host'] and not data['ceph_user']:
msg = _("You need input all fields for creating ceph pool")
error_messages.append(msg)
if not error_messages:
if data['stg_type'] == 'rbd':
conn.create_storage_ceph(data['stg_type'], data['name'],
data['ceph_pool'], data['ceph_host'],
data['ceph_user'], data['secret'])
elif data['stg_type'] == 'netfs':
conn.create_storage_netfs(data['stg_type'], data['name'],
data['netfs_host'], data['source'],
data['source_format'], data['target'])
else:
conn.create_storage(data['stg_type'], data['name'], data['source'], data['target'])
return HttpResponseRedirect(reverse('storage', args=[compute_id, data['name']]))
else:
for msg_err in form.errors.values():
error_messages.append(msg_err.as_text())
conn.close()
except libvirtError as lib_err:
error_messages.append(lib_err)
return render(request, 'storages.html', locals())
@login_required
def storage(request, compute_id, pool):
"""
:param request:
:return:
"""
if not request.user.is_superuser:
return HttpResponseRedirect(reverse('index'))
def handle_uploaded_file(path, f_name):
target = path + '/' + str(f_name)
destination = open(target, 'wb+')
for chunk in f_name.chunks():
destination.write(chunk)
destination.close()
error_messages = []
compute = get_object_or_404(Compute, pk=compute_id)
meta_prealloc = False
try:
conn = wvmStorage(compute.hostname,
compute.login,
compute.password,
compute.type,
pool)
storages = conn.get_storages()
state = conn.is_active()
size, free = conn.get_size()
used = (size - free)
if state:
percent = (used * 100) / size
else:
percent = 0
status = conn.get_status()
path = conn.get_target_path()
type = conn.get_type()
autostart = conn.get_autostart()
if state:
conn.refresh()
volumes = conn.update_volumes()
else:
volumes = None
except libvirtError as lib_err:
error_messages.append(lib_err)
if request.method == 'POST':
if 'start' in request.POST:
try:
conn.start()
return HttpResponseRedirect(request.get_full_path())
except libvirtError as lib_err:
error_messages.append(lib_err.message)
if 'stop' in request.POST:
try:
conn.stop()
return HttpResponseRedirect(request.get_full_path())
except libvirtError as lib_err:
error_messages.append(lib_err.message)
if 'delete' in request.POST:
try:
conn.delete()
return HttpResponseRedirect(reverse('storages', args=[compute_id]))
except libvirtError as lib_err:
error_messages.append(lib_err.message)
if 'set_autostart' in request.POST:
try:
conn.set_autostart(1)
return HttpResponseRedirect(request.get_full_path())
except libvirtError as lib_err:
error_messages.append(lib_err.message)
if 'unset_autostart' in request.POST:
try:
conn.set_autostart(0)
return HttpResponseRedirect(request.get_full_path())
except libvirtError as lib_err:
error_messages.append(lib_err.message)
if 'add_volume' in request.POST:
form = AddImage(request.POST)
if form.is_valid():
data = form.cleaned_data
if data['meta_prealloc'] and data['format'] == 'qcow2':
meta_prealloc = True
try:
name = conn.create_volume(data['name'], data['size'], data['format'], meta_prealloc)
messages.success(request, _("Image file {} is created successfully".format(name)))
return HttpResponseRedirect(request.get_full_path())
except libvirtError as lib_err:
error_messages.append(lib_err)
else:
for msg_err in form.errors.values():
error_messages.append(msg_err.as_text())
if 'del_volume' in request.POST:
volname = request.POST.get('volname', '')
try:
vol = conn.get_volume(volname)
vol.delete(0)
messages.success(request, _('Volume: {} is deleted.'.format(volname)))
return HttpResponseRedirect(request.get_full_path())
except libvirtError as lib_err:
error_messages.append(lib_err.message)
if 'iso_upload' in request.POST:
if str(request.FILES['file']) in conn.update_volumes():
error_msg = _("ISO image already exist")
error_messages.append(error_msg)
else:
handle_uploaded_file(path, request.FILES['file'])
messages.success(request, _('ISO: {} is uploaded.'.format(request.FILES['file'])))
return HttpResponseRedirect(request.get_full_path())
if 'cln_volume' in request.POST:
form = CloneImage(request.POST)
if form.is_valid():
data = form.cleaned_data
img_name = data['name']
meta_prealloc = 0
if img_name in conn.update_volumes():
msg = _("Name of volume already in use")
error_messages.append(msg)
if not error_messages:
if data['convert']:
format = data['format']
if data['meta_prealloc'] and data['format'] == 'qcow2':
meta_prealloc = True
else:
format = None
try:
name = conn.clone_volume(data['image'], data['name'], format, meta_prealloc)
messages.success(request, _("{} image cloned as {} successfully".format(data['image'], name)))
return HttpResponseRedirect(request.get_full_path())
except libvirtError as lib_err:
error_messages.append(lib_err)
else:
for msg_err in form.errors.values():
error_messages.append(msg_err.as_text())
conn.close()
return render(request, 'storage.html', locals())
@login_required
def get_volumes(request, compute_id, pool):
data = {}
compute = get_object_or_404(Compute, pk=compute_id)
try:
conn = wvmStorage(compute.hostname,
compute.login,
compute.password,
compute.type,
pool)
conn.refresh()
except libvirtError:
pass
data['vols'] = sorted(conn.get_volumes())
return HttpResponse(json.dumps(data))
| 40.921053
| 118
| 0.53462
|
63c5daabc9e300b468e69ef22d0e34736c6c76c3
| 421
|
py
|
Python
|
ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/_api/v2/compat/v2/xla/experimental/__init__.py
|
Lube-Project/ProgettoLube
|
cbf33971e2c2e865783ec1a2302625539186a338
|
[
"MIT"
] | 2
|
2020-09-30T00:11:09.000Z
|
2021-10-04T13:00:38.000Z
|
ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/_api/v2/compat/v2/xla/experimental/__init__.py
|
Lube-Project/ProgettoLube
|
cbf33971e2c2e865783ec1a2302625539186a338
|
[
"MIT"
] | null | null | null |
ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/_api/v2/compat/v2/xla/experimental/__init__.py
|
Lube-Project/ProgettoLube
|
cbf33971e2c2e865783ec1a2302625539186a338
|
[
"MIT"
] | 1
|
2021-01-28T01:57:41.000Z
|
2021-01-28T01:57:41.000Z
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.xla.experimental namespace.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.compiler.xla.jit import experimental_jit_scope as jit_scope
from tensorflow.python.compiler.xla.xla import compile
del _print_function
| 30.071429
| 82
| 0.821853
|
e9219ca6263d24a607dd87bfa9f6eb2ec56d7d35
| 575
|
py
|
Python
|
onmt/__init__.py
|
philhchen/OpenNMT-evidential-softmax
|
87709ce1cf7bda783aed4a64c096fa23282e7aa9
|
[
"MIT"
] | null | null | null |
onmt/__init__.py
|
philhchen/OpenNMT-evidential-softmax
|
87709ce1cf7bda783aed4a64c096fa23282e7aa9
|
[
"MIT"
] | null | null | null |
onmt/__init__.py
|
philhchen/OpenNMT-evidential-softmax
|
87709ce1cf7bda783aed4a64c096fa23282e7aa9
|
[
"MIT"
] | null | null | null |
""" Main entry point of the ONMT library """
from __future__ import division, print_function
import onmt.inputters
import onmt.encoders
import onmt.decoders
import onmt.models
import onmt.utils
import onmt.modules
from onmt.trainer import Trainer
import sys
import onmt.utils.optimizers
onmt.utils.optimizers.Optim = onmt.utils.optimizers.Optimizer
sys.modules["onmt.Optim"] = onmt.utils.optimizers
# For Flake
__all__ = [
onmt.inputters,
onmt.encoders,
onmt.decoders,
onmt.models,
onmt.utils,
onmt.modules,
"Trainer",
]
__version__ = "0.6.0"
| 19.827586
| 61
| 0.747826
|
20c85228f52d8e6181d21a79fa7d0dcc6db7acc3
| 9,470
|
py
|
Python
|
tests/settings.py
|
Yuessiah/Othello-Minimax
|
e7fb043189ef108b639697ae74fd92c0695caac1
|
[
"MIT"
] | null | null | null |
tests/settings.py
|
Yuessiah/Othello-Minimax
|
e7fb043189ef108b639697ae74fd92c0695caac1
|
[
"MIT"
] | null | null | null |
tests/settings.py
|
Yuessiah/Othello-Minimax
|
e7fb043189ef108b639697ae74fd92c0695caac1
|
[
"MIT"
] | 1
|
2021-05-05T01:20:40.000Z
|
2021-05-05T01:20:40.000Z
|
from unittest import TestCase
from game.board import Board
from game.settings import *
__author__ = 'bengt'
class TestSettings(TestCase):
def test_outside_top_left(self):
self.assertEqual(outside_board(0, NORTH), True)
self.assertEqual(outside_board(0, NORTHEAST), True)
self.assertEqual(outside_board(0, NORTHWEST), True)
self.assertEqual(outside_board(0, WEST), True)
self.assertEqual(outside_board(0, SOUTHWEST), True)
self.assertEqual(outside_board(0, EAST), False)
self.assertEqual(outside_board(0, SOUTHEAST), False)
self.assertEqual(outside_board(0, SOUTH), False)
def test_outside_top(self):
self.assertEqual(outside_board(1, NORTH), True)
self.assertEqual(outside_board(1, NORTHEAST), True)
self.assertEqual(outside_board(1, NORTHWEST), True)
self.assertEqual(outside_board(1, WEST), False)
self.assertEqual(outside_board(1, SOUTHWEST), False)
self.assertEqual(outside_board(1, EAST), False)
self.assertEqual(outside_board(1, SOUTHEAST), False)
self.assertEqual(outside_board(1, SOUTH), False)
self.assertEqual(outside_board(4, NORTH), True)
self.assertEqual(outside_board(4, NORTHEAST), True)
self.assertEqual(outside_board(4, NORTHWEST), True)
self.assertEqual(outside_board(4, WEST), False)
self.assertEqual(outside_board(4, SOUTHWEST), False)
self.assertEqual(outside_board(4, EAST), False)
self.assertEqual(outside_board(4, SOUTHEAST), False)
self.assertEqual(outside_board(4, SOUTH), False)
self.assertEqual(outside_board(6, NORTH), True)
self.assertEqual(outside_board(6, NORTHEAST), True)
self.assertEqual(outside_board(6, NORTHWEST), True)
self.assertEqual(outside_board(6, WEST), False)
self.assertEqual(outside_board(6, SOUTHWEST), False)
self.assertEqual(outside_board(6, EAST), False)
self.assertEqual(outside_board(6, SOUTHEAST), False)
self.assertEqual(outside_board(6, SOUTH), False)
def test_outside_top_right(self):
self.assertEqual(outside_board(7, NORTH), True)
self.assertEqual(outside_board(7, NORTHEAST), True)
self.assertEqual(outside_board(7, NORTHWEST), True)
self.assertEqual(outside_board(7, WEST), False)
self.assertEqual(outside_board(7, SOUTHWEST), False)
self.assertEqual(outside_board(7, EAST), True)
self.assertEqual(outside_board(7, SOUTHEAST), True)
self.assertEqual(outside_board(7, SOUTH), False)
def test_outside_right(self):
self.assertEqual(outside_board(15, NORTH), False)
self.assertEqual(outside_board(15, NORTHEAST), True)
self.assertEqual(outside_board(15, NORTHWEST), False)
self.assertEqual(outside_board(15, WEST), False)
self.assertEqual(outside_board(15, SOUTHWEST), False)
self.assertEqual(outside_board(15, EAST), True)
self.assertEqual(outside_board(15, SOUTHEAST), True)
self.assertEqual(outside_board(15, SOUTH), False)
self.assertEqual(outside_board(23, NORTH), False)
self.assertEqual(outside_board(23, NORTHEAST), True)
self.assertEqual(outside_board(23, NORTHWEST), False)
self.assertEqual(outside_board(23, WEST), False)
self.assertEqual(outside_board(23, SOUTHWEST), False)
self.assertEqual(outside_board(23, EAST), True)
self.assertEqual(outside_board(23, SOUTHEAST), True)
self.assertEqual(outside_board(23, SOUTH), False)
self.assertEqual(outside_board(39, NORTH), False)
self.assertEqual(outside_board(39, NORTHEAST), True)
self.assertEqual(outside_board(39, NORTHWEST), False)
self.assertEqual(outside_board(39, WEST), False)
self.assertEqual(outside_board(39, SOUTHWEST), False)
self.assertEqual(outside_board(39, EAST), True)
self.assertEqual(outside_board(39, SOUTHEAST), True)
self.assertEqual(outside_board(39, SOUTH), False)
def test_outside_bottom_right(self):
self.assertEqual(outside_board(63, NORTH), False)
self.assertEqual(outside_board(63, NORTHEAST), True)
self.assertEqual(outside_board(63, NORTHWEST), False)
self.assertEqual(outside_board(63, WEST), False)
self.assertEqual(outside_board(63, SOUTHWEST), True)
self.assertEqual(outside_board(63, EAST), True)
self.assertEqual(outside_board(63, SOUTHEAST), True)
self.assertEqual(outside_board(63, SOUTH), True)
def test_outside_bottom(self):
self.assertEqual(outside_board(57, NORTH), False)
self.assertEqual(outside_board(57, NORTHEAST), False)
self.assertEqual(outside_board(57, NORTHWEST), False)
self.assertEqual(outside_board(57, WEST), False)
self.assertEqual(outside_board(57, SOUTHWEST), True)
self.assertEqual(outside_board(57, EAST), False)
self.assertEqual(outside_board(57, SOUTHEAST), True)
self.assertEqual(outside_board(57, SOUTH), True)
self.assertEqual(outside_board(58, NORTH), False)
self.assertEqual(outside_board(58, NORTHEAST), False)
self.assertEqual(outside_board(58, NORTHWEST), False)
self.assertEqual(outside_board(58, WEST), False)
self.assertEqual(outside_board(58, SOUTHWEST), True)
self.assertEqual(outside_board(58, EAST), False)
self.assertEqual(outside_board(58, SOUTHEAST), True)
self.assertEqual(outside_board(58, SOUTH), True)
self.assertEqual(outside_board(62, NORTH), False)
self.assertEqual(outside_board(62, NORTHEAST), False)
self.assertEqual(outside_board(62, NORTHWEST), False)
self.assertEqual(outside_board(62, WEST), False)
self.assertEqual(outside_board(62, SOUTHWEST), True)
self.assertEqual(outside_board(62, EAST), False)
self.assertEqual(outside_board(62, SOUTHEAST), True)
self.assertEqual(outside_board(62, SOUTH), True)
def test_outside_bottom_left(self):
self.assertEqual(outside_board(56, NORTH), False)
self.assertEqual(outside_board(56, NORTHEAST), False)
self.assertEqual(outside_board(56, NORTHWEST), True)
self.assertEqual(outside_board(56, WEST), True)
self.assertEqual(outside_board(56, SOUTHWEST), True)
self.assertEqual(outside_board(56, EAST), False)
self.assertEqual(outside_board(56, SOUTHEAST), True)
self.assertEqual(outside_board(56, SOUTH), True)
def test_outside_left(self):
self.assertEqual(outside_board(48, NORTH), False)
self.assertEqual(outside_board(48, NORTHEAST), False)
self.assertEqual(outside_board(48, NORTHWEST), True)
self.assertEqual(outside_board(48, WEST), True)
self.assertEqual(outside_board(48, SOUTHWEST), True)
self.assertEqual(outside_board(48, EAST), False)
self.assertEqual(outside_board(48, SOUTHEAST), False)
self.assertEqual(outside_board(48, SOUTH), False)
self.assertEqual(outside_board(40, NORTH), False)
self.assertEqual(outside_board(40, NORTHEAST), False)
self.assertEqual(outside_board(40, NORTHWEST), True)
self.assertEqual(outside_board(40, WEST), True)
self.assertEqual(outside_board(40, SOUTHWEST), True)
self.assertEqual(outside_board(40, EAST), False)
self.assertEqual(outside_board(40, SOUTHEAST), False)
self.assertEqual(outside_board(40, SOUTH), False)
self.assertEqual(outside_board(24, NORTH), False)
self.assertEqual(outside_board(24, NORTHEAST), False)
self.assertEqual(outside_board(24, NORTHWEST), True)
self.assertEqual(outside_board(24, WEST), True)
self.assertEqual(outside_board(24, SOUTHWEST), True)
self.assertEqual(outside_board(24, EAST), False)
self.assertEqual(outside_board(24, SOUTHEAST), False)
self.assertEqual(outside_board(24, SOUTH), False)
def test_outside_middle(self):
self.assertEqual(outside_board(26, NORTH), False)
self.assertEqual(outside_board(26, NORTHEAST), False)
self.assertEqual(outside_board(26, NORTHWEST), False)
self.assertEqual(outside_board(26, WEST), False)
self.assertEqual(outside_board(26, SOUTHWEST), False)
self.assertEqual(outside_board(26, EAST), False)
self.assertEqual(outside_board(26, SOUTHEAST), False)
self.assertEqual(outside_board(26, SOUTH), False)
self.assertEqual(outside_board(35, NORTH), False)
self.assertEqual(outside_board(35, NORTHEAST), False)
self.assertEqual(outside_board(35, NORTHWEST), False)
self.assertEqual(outside_board(35, WEST), False)
self.assertEqual(outside_board(35, SOUTHWEST), False)
self.assertEqual(outside_board(35, EAST), False)
self.assertEqual(outside_board(35, SOUTHEAST), False)
self.assertEqual(outside_board(35, SOUTH), False)
self.assertEqual(outside_board(10, NORTH), False)
self.assertEqual(outside_board(10, NORTHEAST), False)
self.assertEqual(outside_board(10, NORTHWEST), False)
self.assertEqual(outside_board(10, WEST), False)
self.assertEqual(outside_board(10, SOUTHWEST), False)
self.assertEqual(outside_board(10, EAST), False)
self.assertEqual(outside_board(10, SOUTHEAST), False)
self.assertEqual(outside_board(10, SOUTH), False)
| 50.37234
| 61
| 0.697571
|
b2894a8f1731b5599a131d2b052bafa079485660
| 3,681
|
py
|
Python
|
setup.py
|
hurwitzlab/viral-learning
|
8d7aebc0d58fa32a429f4a47593452ee2722ba82
|
[
"MIT"
] | 1
|
2018-02-23T16:49:30.000Z
|
2018-02-23T16:49:30.000Z
|
setup.py
|
hurwitzlab/viral-learning
|
8d7aebc0d58fa32a429f4a47593452ee2722ba82
|
[
"MIT"
] | null | null | null |
setup.py
|
hurwitzlab/viral-learning
|
8d7aebc0d58fa32a429f4a47593452ee2722ba82
|
[
"MIT"
] | null | null | null |
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='vl',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.0.0',
description='Discriminate bacteria vs virus using deep learning.',
long_description=long_description,
# The project's main homepage.
url='https://github.com/hurwitzlab/viral-learning',
# Author details
author='Joshua Lynch',
author_email='jklynch@email.arizona.edu',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'
],
# What does your project relate to?
keywords='',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'keras',
'h5py',
'tensorflow',
'matplotlib',
'numpy',
'pandas',
'scikit-learn'
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': [],
'test': ['pytest'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
##package_data={
## 'sample': ['package_data.dat'],
##},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
##data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
##entry_points={
## 'console_scripts': [
## 'write_models=orminator.write_models:main'
## ],
##},
)
| 32.575221
| 94
| 0.661233
|
8a198565fc594428ace23d59602582f6e3518054
| 440
|
py
|
Python
|
tests/test_stats.py
|
Prathamesh-B/chess-stats-readme
|
fe9bedb049d12edcbac9d824066c6b7a9346d2ab
|
[
"MIT"
] | 1
|
2021-07-14T11:28:37.000Z
|
2021-07-14T11:28:37.000Z
|
tests/test_stats.py
|
Prathamesh-B/chess-stats-readme
|
fe9bedb049d12edcbac9d824066c6b7a9346d2ab
|
[
"MIT"
] | 1
|
2021-11-30T23:11:11.000Z
|
2021-11-30T23:11:11.000Z
|
tests/test_stats.py
|
Prathamesh-B/chess-stats-readme
|
fe9bedb049d12edcbac9d824066c6b7a9346d2ab
|
[
"MIT"
] | 1
|
2021-11-30T11:19:16.000Z
|
2021-11-30T11:19:16.000Z
|
import unittest
import requests
from main import get_stats
class TestStats(unittest.TestCase):
def test_api(self):
response = requests.get("https://api.chess.com/pub/player/PrathamRex/stats")
self.assertEqual(response.status_code, 200)
def test_best_ratings(self):
self.assertTrue(get_stats("PrathamRex", "best"))
def test_last_ratings(self):
self.assertTrue(get_stats("PrathamRex", "last"))
| 27.5
| 84
| 0.713636
|
6ba6f938df38c3bf82eee2b4e516302ec99c93dd
| 5,925
|
py
|
Python
|
spotify.py
|
gregahpradana/spotify_downloader_telegram__bot
|
d2fab0a6111c99bfc2f2ea21131edadf3fce05a0
|
[
"MIT"
] | 1
|
2021-09-05T14:33:06.000Z
|
2021-09-05T14:33:06.000Z
|
spotify.py
|
gregahpradana/spotify_downloader_telegram__bot
|
d2fab0a6111c99bfc2f2ea21131edadf3fce05a0
|
[
"MIT"
] | null | null | null |
spotify.py
|
gregahpradana/spotify_downloader_telegram__bot
|
d2fab0a6111c99bfc2f2ea21131edadf3fce05a0
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import requests
from youtube_search import YoutubeSearch
import youtube_dl
import eyed3.id3
import eyed3
import lyricsgenius
import telepot
spotifyy = spotipy.Spotify(
client_credentials_manager=SpotifyClientCredentials(client_id='a145db3dcd564b9592dacf10649e4ed5',
client_secret='389614e1ec874f17b8c99511c7baa2f6'))
genius = lyricsgenius.Genius('biZZReO7F98mji5oz3cE0FiIG73Hh07qoXSIzYSGNN3GBsnY-eUrPAVSdJk_0_de')
token = 'token bot'
bot = telepot.Bot(token)
def DOWNLOADMP3(link,chat_id):
results = spotifyy.track(link)
song = results['name']
artist = results['artists'][0]['name']
YTSEARCH = str(song + " " + artist)
artistfinder = results['artists']
tracknum = results['track_number']
album = results['album']['name']
realese_date = int(results['album']['release_date'][:4])
if len(artistfinder) > 1:
fetures = "(Ft."
for lomi in range(0, len(artistfinder)):
try:
if lomi < len(artistfinder) - 2:
artistft = artistfinder[lomi + 1]['name'] + ", "
fetures += artistft
else:
artistft = artistfinder[lomi + 1]['name'] + ")"
fetures += artistft
except:
pass
else:
fetures = ""
millis = results['duration_ms']
millis = int(millis)
seconds = (millis / 1000) % 60
minutes = (millis / (1000 * 60)) % 60
seconds = int(seconds)
minutes = int(minutes)
if seconds >= 10:
time_duration = "{0}:{1}".format(minutes, seconds)
time_duration1 = "{0}:{1}".format(minutes, seconds + 1)
time_duration2 = "{0}:{1}".format(minutes, seconds - 1)
time_duration3 = "{0}:{1}".format(minutes, seconds + 2)
if seconds == 10:
time_duration2 = "{0}:0{1}".format(minutes, seconds - 1)
elif seconds == 58 or seconds == 59:
time_duration3 = "{0}:0{1}".format(minutes + 1, seconds - 58)
if seconds == 59:
time_duration1 = "{0}:0{1}".format(minutes + 1, seconds - 59)
else:
time_duration = "{0}:0{1}".format(minutes, seconds)
time_duration1 = "{0}:0{1}".format(minutes, seconds + 1)
time_duration2 = "{0}:0{1}".format(minutes, seconds - 1)
time_duration3 = "{0}:0{1}".format(minutes, seconds + 2)
if seconds == 9 or seconds == 8:
time_duration3 = "{0}:{1}".format(minutes, seconds + 2)
if seconds == 9:
time_duration1 = "{0}:{1}".format(minutes, seconds + 1)
elif seconds == 0:
time_duration2 = "{0}:{1}".format(minutes - 1, seconds + 59)
trackname = song + fetures
response = requests.get(results['album']['images'][0]['url'])
DIRCOVER = "songpicts//" + trackname + ".png"
file = open(DIRCOVER, "wb")
file.write(response.content)
file.close()
results = list(YoutubeSearch(str(YTSEARCH)).to_dict())
try:
LINKASLI = ''
for URLSSS in results:
timeyt = URLSSS["duration"]
if timeyt == time_duration or timeyt == time_duration1:
LINKASLI = URLSSS['url_suffix']
break
elif timeyt == time_duration2 or timeyt == time_duration3:
LINKASLI = URLSSS['url_suffix']
break
YTLINK = str("https://www.youtube.com/" + LINKASLI)
print(YTLINK)
options = {
# PERMANENT options
'format': 'bestaudio/best',
'keepvideo': False,
'outtmpl': f'song//{trackname}.*',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '320'
}],
# (OPTIONAL options)
'noplaylist': True
}
with youtube_dl.YoutubeDL(options) as mp3:
mp3.download([YTLINK])
aud = eyed3.load(f"song//{trackname}.mp3")
aud.tag.artist = artist
aud.tag.album = album
aud.tag.album_artist = artist
aud.tag.title = trackname
aud.tag.track_num = tracknum
aud.tag.year = realese_date
try:
songok = genius.search_song(song, artist)
aud.tag.lyrics.set(songok.lyrics)
except:
pass
aud.tag.images.set(3, open("songpicts//" + trackname + ".png", 'rb').read(), 'image/png')
aud.tag.save()
CAPTION = f'Track: {song}\nAlbum: {album}\nArtist: {artist}'
bot.sendAudio(chat_id, open(f'song//{trackname}.mp3', 'rb'), title=trackname, caption=CAPTION)
except:
bot.sendSticker(chat_id, 'CAACAgQAAxkBAAIFSWBF_m3GHUtZJxQzobvD_iWxYVClAAJuAgACh4hSOhXuVi2-7-xQHgQ')
bot.sendMessage(chat_id, f'404 "{song}" Not Found')
def album(link):
results = spotifyy.album_tracks(link)
albums = results['items']
while results['next']:
results = spotifyy.next(results)
albums.extend(results['items'])
print('[Spotify]Album Found!')
return albums
def artist(link):
results = spotifyy.artist_top_tracks(link)
albums = results['tracks']
print('[Spotify]Artist Found!')
return albums
def searchalbum(track):
results = spotifyy.search(track)
return results['tracks']['items'][0]['album']['external_urls']['spotify']
def playlist(link):
results = spotifyy.playlist_tracks(link)
print('[Spotify]Playlist Found!')
return results['items']
def searchsingle(track):
results = spotifyy.search(track)
return results['tracks']['items'][0]['href']
def searchartist(searchstr):
results = spotifyy.search(searchstr)
return results['tracks']['items'][0]['artists'][0]["external_urls"]['spotify']
| 32.554945
| 107
| 0.587004
|
cd6efa04794dc23ee7475fdb830adf2aaf55f904
| 2,128
|
py
|
Python
|
spikeinterface/toolkit/postprocessing/tests/test_spike_amplitudes.py
|
marcbue/spikeinterface
|
d3462eeabcb9f0b9816004dd47355e40f4de1ac5
|
[
"MIT"
] | null | null | null |
spikeinterface/toolkit/postprocessing/tests/test_spike_amplitudes.py
|
marcbue/spikeinterface
|
d3462eeabcb9f0b9816004dd47355e40f4de1ac5
|
[
"MIT"
] | null | null | null |
spikeinterface/toolkit/postprocessing/tests/test_spike_amplitudes.py
|
marcbue/spikeinterface
|
d3462eeabcb9f0b9816004dd47355e40f4de1ac5
|
[
"MIT"
] | null | null | null |
import numpy as np
from pathlib import Path
import shutil
from spikeinterface import download_dataset, extract_waveforms
import spikeinterface.extractors as se
from spikeinterface.toolkit import get_spike_amplitudes
def test_get_spike_amplitudes():
repo = 'https://gin.g-node.org/NeuralEnsemble/ephy_testing_data'
remote_path = 'mearec/mearec_test_10s.h5'
local_path = download_dataset(repo=repo, remote_path=remote_path, local_folder=None)
recording = se.MEArecRecordingExtractor(local_path)
sorting = se.MEArecSortingExtractor(local_path)
folder = Path('mearec_waveforms')
we = extract_waveforms(recording, sorting, folder,
ms_before=1., ms_after=2., max_spikes_per_unit=500,
n_jobs=1, chunk_size=30000, load_if_exists=False,
overwrite=True)
amplitudes = get_spike_amplitudes(we, peak_sign='neg', outputs='concatenated', chunk_size=10000, n_jobs=1)
amplitudes = get_spike_amplitudes(we, peak_sign='neg', outputs='by_unit', chunk_size=10000, n_jobs=1)
# shutil.rmtree(folder)
def test_get_spike_amplitudes_parallel():
repo = 'https://gin.g-node.org/NeuralEnsemble/ephy_testing_data'
remote_path = 'mearec/mearec_test_10s.h5'
local_path = download_dataset(repo=repo, remote_path=remote_path, local_folder=None)
recording = se.MEArecRecordingExtractor(local_path)
sorting = se.MEArecSortingExtractor(local_path)
folder = Path('mearec_waveforms_all')
we = extract_waveforms(recording, sorting, folder,
ms_before=1., ms_after=2., max_spikes_per_unit=None,
n_jobs=1, chunk_size=30000, load_if_exists=True)
amplitudes1 = get_spike_amplitudes(we, peak_sign='neg', outputs='concatenated', chunk_size=10000, n_jobs=1)
# amplitudes2 = get_spike_amplitudes(we, peak_sign='neg', outputs='concatenated', chunk_size=10000, n_jobs=2)
# assert np.array_equal(amplitudes1, amplitudes2)
# shutil.rmtree(folder)
if __name__ == '__main__':
test_get_spike_amplitudes()
test_get_spike_amplitudes_parallel()
| 40.150943
| 113
| 0.726974
|
9fa60750afe97a603f2c7ead3a28b1f5d2cc391c
| 70,140
|
py
|
Python
|
test/integration/component/test_projects.py
|
bvbharatk/cloud-stack
|
9acfc0307726204b444cc5e81abecb849eca23d9
|
[
"Apache-2.0"
] | 2
|
2016-03-18T09:44:07.000Z
|
2019-01-12T06:52:08.000Z
|
test/integration/component/test_projects.py
|
bvbharatk/cloud-stack
|
9acfc0307726204b444cc5e81abecb849eca23d9
|
[
"Apache-2.0"
] | 1
|
2016-03-04T09:58:31.000Z
|
2016-03-08T17:31:56.000Z
|
test/integration/component/test_projects.py
|
bvbharatk/cloud-stack
|
9acfc0307726204b444cc5e81abecb849eca23d9
|
[
"Apache-2.0"
] | 1
|
2021-12-23T09:22:28.000Z
|
2021-12-23T09:22:28.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 tests for Project
"""
#Import Local Modules
import marvin
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.integration.lib.utils import *
from marvin.integration.lib.base import *
from marvin.integration.lib.common import *
from marvin.remoteSSHClient import remoteSSHClient
import datetime
class Services:
"""Test Project Services
"""
def __init__(self):
self.services = {
"domain": {
"name": "Domain",
},
"project": {
"name": "Project",
"displaytext": "Test project",
},
"mgmt_server": {
"ipaddress": '192.168.100.21',
"username": 'root',
"password": 'password',
"port": 22,
},
"account": {
"email": "administrator@clogeny.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended for unique
# username
"password": "password",
},
"user": {
"email": "administrator@clogeny.com",
"firstname": "User",
"lastname": "User",
"username": "User",
# Random characters are appended for unique
# username
"password": "password",
},
"disk_offering": {
"displaytext": "Tiny Disk Offering",
"name": "Tiny Disk Offering",
"disksize": 1
},
"volume": {
"diskname": "Test Volume",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100, # in MHz
"memory": 64, # In MBs
},
"virtual_machine": {
"displayname": "Test VM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
# Hypervisor type should be same as
# hypervisor type of cluster
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"ostype": 'CentOS 5.3 (64-bit)',
# Cent OS 5.3 (64 bit)
"sleep": 60,
"timeout": 10,
}
class TestMultipleProjectCreation(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestMultipleProjectCreation,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone
cls.zone = get_zone(cls.api_client, cls.services)
cls.services['mode'] = cls.zone.networktype
# Create domains, account etc.
cls.domain = get_domain(
cls.api_client,
cls.services
)
configs = Configurations.list(
cls.api_client,
name='project.invite.required'
)
if not isinstance(configs, list):
raise unittest.SkipTest("List configurations has no config: project.invite.required")
elif (configs[0].value).lower() != 'false':
raise unittest.SkipTest("'project.invite.required' should be set to false")
cls.account = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls.user = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls._cleanup = [cls.account, cls.user]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created accounts, domains etc
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advanced", "basic", "sg", "eip", "advancedns", "simulator"])
def test_01_create_multiple_projects_by_account(self):
""" Verify an account can own multiple projects and can belong to
multiple projects
"""
# Validate the following
# 1. Create multiple project. Verify at step 1 An account is allowed
# to create multiple projects
# 2. add one account to multiple project. Verify at step 2 an account
# is allowed to added to multiple project
# Create project as a domain admin
project_1 = Project.create(
self.apiclient,
self.services["project"],
account=self.account.name,
domainid=self.account.domainid
)
# Cleanup created project at end of test
self.cleanup.append(project_1)
self.debug("Created project with domain admin with ID: %s" %
project_1.id)
list_projects_reponse = Project.list(
self.apiclient,
id=project_1.id,
listall=True
)
self.assertEqual(
isinstance(list_projects_reponse, list),
True,
"Check for a valid list projects response"
)
list_project = list_projects_reponse[0]
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
self.assertEqual(
project_1.name,
list_project.name,
"Check project name from list response"
)
# Create another project as a domain admin
project_2 = Project.create(
self.apiclient,
self.services["project"],
account=self.account.name,
domainid=self.account.domainid
)
# Cleanup created project at end of test
self.cleanup.append(project_2)
self.debug("Created project with domain user with ID: %s" %
project_2.id)
list_projects_reponse = Project.list(
self.apiclient,
id=project_2.id,
listall=True
)
self.assertEqual(
isinstance(list_projects_reponse, list),
True,
"Check for a valid list projects response"
)
list_project = list_projects_reponse[0]
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
# Add user to the project
project_1.addAccount(
self.apiclient,
self.user.account.name,
self.user.account.email
)
# listProjectAccount to verify the user is added to project or not
accounts_reponse = Project.listAccounts(
self.apiclient,
projectid=project_1.id,
account=self.user.account.name,
)
self.debug(accounts_reponse)
self.assertEqual(
isinstance(accounts_reponse, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
account = accounts_reponse[0]
self.assertEqual(
account.role,
'Regular',
"Newly added user is not added as a regular user"
)
# Add user to the project
project_2.addAccount(
self.apiclient,
self.user.account.name,
self.user.account.email
)
# listProjectAccount to verify the user is added to project or not
accounts_reponse = Project.listAccounts(
self.apiclient,
projectid=project_2.id,
account=self.user.account.name,
)
self.debug(accounts_reponse)
self.assertEqual(
isinstance(accounts_reponse, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
account = accounts_reponse[0]
self.assertEqual(
account.role,
'Regular',
"Newly added user is not added as a regular user"
)
return
class TestCrossDomainAccountAdd(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestCrossDomainAccountAdd,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone
cls.zone = get_zone(cls.api_client, cls.services)
cls.services['mode'] = cls.zone.networktype
cls.domain = get_domain(
cls.api_client,
cls.services
)
configs = Configurations.list(
cls.api_client,
name='project.invite.required'
)
if not isinstance(configs, list):
raise unittest.SkipTest("List configurations has no config: project.invite.required")
elif (configs[0].value).lower() != 'false':
raise unittest.SkipTest("'project.invite.required' should be set to false")
# Create domains, account etc.
cls.new_domain = Domain.create(
cls.api_client,
cls.services["domain"]
)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls.user = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.new_domain.id
)
cls._cleanup = [cls.account, cls.user]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created accounts, domains etc
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advanced", "basic", "sg", "eip", "advancedns", "simulator"])
def test_02_cross_domain_account_add(self):
""" Verify No cross domain projects
"""
# Validate the following
# 1. Create a project in a domain.
# 2. Add different domain account to the project. Add account should
# fail
# Create project as a domain admin
project = Project.create(
self.apiclient,
self.services["project"],
account=self.account.name,
domainid=self.account.domainid
)
# Cleanup created project at end of test
self.cleanup.append(project)
self.debug("Created project with domain admin with ID: %s" %
project.id)
list_projects_reponse = Project.list(
self.apiclient,
id=project.id,
listall=True
)
self.assertEqual(
isinstance(list_projects_reponse, list),
True,
"Check for a valid list projects response"
)
list_project = list_projects_reponse[0]
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
self.assertEqual(
project.name,
list_project.name,
"Check project name from list response"
)
self.debug("Adding user: %s from domain: %s to project: %s" % (
self.user.account.name,
self.user.account.domainid,
project.id
))
with self.assertRaises(Exception):
# Add user to the project from different domain
project.addAccount(
self.apiclient,
self.user.account.name
)
self.debug("User add to project failed!")
return
class TestDeleteAccountWithProject(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestDeleteAccountWithProject,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone
cls.zone = get_zone(cls.api_client, cls.services)
cls.services['mode'] = cls.zone.networktype
cls.domain = get_domain(
cls.api_client,
cls.services
)
configs = Configurations.list(
cls.api_client,
name='project.invite.required'
)
if not isinstance(configs, list):
raise unittest.SkipTest("List configurations has no config: project.invite.required")
elif (configs[0].value).lower() != 'false':
raise unittest.SkipTest("'project.invite.required' should be set to false")
# Create account
cls.account = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls._cleanup = [cls.account]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created accounts, domains etc
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advanced", "basic", "sg", "eip", "advancedns", "simulator"])
def test_03_delete_account_with_project(self):
""" Test As long as the project exists, its owner can't be removed
"""
# Validate the following
# 1. Create a project.
# 2. Delete account who is owner of the project. Delete account should
# fail
# Create project as a domain admin
project = Project.create(
self.apiclient,
self.services["project"],
account=self.account.name,
domainid=self.account.domainid
)
# Cleanup created project at end of test
self.cleanup.append(project)
self.debug("Created project with domain admin with ID: %s" %
project.id)
list_projects_reponse = Project.list(
self.apiclient,
id=project.id,
listall=True
)
self.assertEqual(
isinstance(list_projects_reponse, list),
True,
"Check for a valid list projects response"
)
list_project = list_projects_reponse[0]
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
self.assertEqual(
project.name,
list_project.name,
"Check project name from list response"
)
# Deleting account who is owner of the project
with self.assertRaises(Exception):
self.account.delete(self.apiclient)
self.debug("Deleting account %s failed!" %
self.account.name)
return
@unittest.skip("Deleting domain doesn't cleanup account")
class TestDeleteDomainWithProject(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestDeleteDomainWithProject,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone
cls.zone = get_zone(cls.api_client, cls.services)
cls.services['mode'] = cls.zone.networktype
configs = Configurations.list(
cls.api_client,
name='project.invite.required'
)
if not isinstance(configs, list):
raise unittest.SkipTest("List configurations has no config: project.invite.required")
elif (configs[0].value).lower() != 'false':
raise unittest.SkipTest("'project.invite.required' should be set to false")
# Create account
cls.domain = Domain.create(
cls.api_client,
cls.services["domain"]
)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls._cleanup = []
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created accounts, domains etc
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advanced", "basic", "sg", "eip", "advancedns", "simulator"])
def test_04_delete_domain_with_project(self):
""" Test Verify delete domain with cleanup=true should delete projects
belonging to the domain
"""
# Validate the following
# 1. Create a project in a domain
# 2. Delete domain forcefully. Verify that project is also deleted as
# as part of domain cleanup
# Create project as a domain admin
project = Project.create(
self.apiclient,
self.services["project"],
account=self.account.name,
domainid=self.account.domainid
)
# Cleanup created project at end of test
self.debug("Created project with domain admin with ID: %s" %
project.id)
list_projects_reponse = Project.list(
self.apiclient,
id=project.id,
listall=True
)
self.assertEqual(
isinstance(list_projects_reponse, list),
True,
"Check for a valid list projects response"
)
list_project = list_projects_reponse[0]
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
self.assertEqual(
project.name,
list_project.name,
"Check project name from list response"
)
self.debug("Deleting domain: %s forcefully" % self.domain.name)
# Delete domain with cleanup=True
self.domain.delete(self.apiclient, cleanup=True)
self.debug("Removed domain: %s" % self.domain.name)
interval = list_configurations(
self.apiclient,
name='account.cleanup.interval'
)
self.assertEqual(
isinstance(interval, list),
True,
"Check if account.cleanup.interval config present"
)
self.debug(
"Sleep for account cleanup interval: %s" %
interval[0].value)
# Sleep to ensure that all resources are deleted
time.sleep(int(interval[0].value))
# Project should be deleted as part of domain cleanup
list_projects_reponse = Project.list(
self.apiclient,
id=project.id,
listall=True
)
self.assertEqual(
list_projects_reponse,
None,
"Project should be deleted as part of domain cleanup"
)
return
class TestProjectOwners(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestProjectOwners,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone
cls.domain = get_domain(
cls.api_client,
cls.services
)
cls.zone = get_zone(cls.api_client, cls.services)
cls.services['mode'] = cls.zone.networktype
configs = Configurations.list(
cls.api_client,
name='project.invite.required'
)
if not isinstance(configs, list):
raise unittest.SkipTest("List configurations has no config: project.invite.required")
elif (configs[0].value).lower() != 'false':
raise unittest.SkipTest("'project.invite.required' should be set to false")
# Create accounts
cls.admin = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls.new_admin = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls._cleanup = [cls.admin, cls.new_admin]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created accounts, domains etc
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advanced", "basic", "sg", "eip", "advancedns", "simulator"])
def test_05_user_project_owner_promotion(self):
""" Test Verify a project user can be later promoted to become a
owner
"""
# Validate the following
# 1. Create a project.
# 2. Add account to the project. Edit account to make it a project
# owner. verify new user is project owner and old account is
# regular user of the project.
# Create project as a domain admin
project = Project.create(
self.apiclient,
self.services["project"],
account=self.admin.account.name,
domainid=self.admin.account.domainid
)
self.cleanup.append(project)
# Cleanup created project at end of test
self.debug("Created project with domain admin with ID: %s" %
project.id)
list_projects_reponse = Project.list(
self.apiclient,
id=project.id,
listall=True
)
self.assertEqual(
isinstance(list_projects_reponse, list),
True,
"Check for a valid list projects response"
)
list_project = list_projects_reponse[0]
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
self.assertEqual(
project.name,
list_project.name,
"Check project name from list response"
)
self.debug("Adding %s user to project: %s" % (
self.new_admin.account.name,
project.name
))
# Add user to the project
project.addAccount(
self.apiclient,
self.new_admin.account.name,
)
# listProjectAccount to verify the user is added to project or not
accounts_reponse = Project.listAccounts(
self.apiclient,
projectid=project.id,
account=self.new_admin.account.name,
)
self.debug(accounts_reponse)
self.assertEqual(
isinstance(accounts_reponse, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
account = accounts_reponse[0]
self.assertEqual(
account.role,
'Regular',
"Newly added user is not added as a regular user"
)
# Update the project with new admin
project.update(
self.apiclient,
account=self.new_admin.account.name
)
# listProjectAccount to verify the user is new admin of the project
accounts_reponse = Project.listAccounts(
self.apiclient,
projectid=project.id,
account=self.new_admin.account.name,
)
self.debug(accounts_reponse)
self.assertEqual(
isinstance(accounts_reponse, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
account = accounts_reponse[0]
self.assertEqual(
account.role,
'Admin',
"Newly added user is not added as a regular user"
)
# listProjectAccount to verify old user becomes a regular user
accounts_reponse = Project.listAccounts(
self.apiclient,
projectid=project.id,
account=self.admin.account.name,
)
self.debug(accounts_reponse)
self.assertEqual(
isinstance(accounts_reponse, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
account = accounts_reponse[0]
self.assertEqual(
account.role,
'Regular',
"Newly added user is not added as a regular user"
)
return
@attr(tags = ["advanced", "basic", "sg", "eip", "advancedns", "simulator"])
def test_06_max_one_project_owner(self):
""" Test Verify there can only be one owner of a project at a time
"""
# Validate the following
# 1. Create a project.
# 2. Add account to the project. Edit account to make it a project
# owner.
# 3. Update project to add another account as an owner
# Create project as a domain admin
project = Project.create(
self.apiclient,
self.services["project"],
account=self.admin.account.name,
domainid=self.admin.account.domainid
)
# Cleanup created project at end of test
self.cleanup.append(project)
self.debug("Created project with domain admin with ID: %s" %
project.id)
self.user = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup.append(self.user)
self.debug("Created account with ID: %s" %
self.user.account.name)
list_projects_reponse = Project.list(
self.apiclient,
id=project.id,
listall=True
)
self.assertEqual(
isinstance(list_projects_reponse, list),
True,
"Check for a valid list projects response"
)
list_project = list_projects_reponse[0]
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
self.assertEqual(
project.name,
list_project.name,
"Check project name from list response"
)
self.debug("Adding %s user to project: %s" % (
self.new_admin.account.name,
project.name
))
# Add user to the project
project.addAccount(
self.apiclient,
self.new_admin.account.name,
)
# listProjectAccount to verify the user is added to project or not
accounts_reponse = Project.listAccounts(
self.apiclient,
projectid=project.id,
account=self.new_admin.account.name,
)
self.debug(accounts_reponse)
self.assertEqual(
isinstance(accounts_reponse, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
account = accounts_reponse[0]
self.assertEqual(
account.role,
'Regular',
"Newly added user is not added as a regular user"
)
self.debug("Updating project with new Admin: %s" %
self.new_admin.account.name)
# Update the project with new admin
project.update(
self.apiclient,
account=self.new_admin.account.name
)
# listProjectAccount to verify the user is new admin of the project
accounts_reponse = Project.listAccounts(
self.apiclient,
projectid=project.id,
account=self.new_admin.account.name,
)
self.assertEqual(
isinstance(accounts_reponse, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
account = accounts_reponse[0]
self.assertEqual(
account.role,
'Admin',
"Newly added user is not added as a regular user"
)
self.debug("Adding %s user to project: %s" % (
self.user.account.name,
project.name
))
# Add user to the project
project.addAccount(
self.apiclient,
self.user.account.name,
)
# listProjectAccount to verify the user is added to project or not
accounts_reponse = Project.listAccounts(
self.apiclient,
projectid=project.id,
account=self.user.account.name,
)
self.assertEqual(
isinstance(accounts_reponse, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
account = accounts_reponse[0]
self.assertEqual(
account.role,
'Regular',
"Newly added user is not added as a regular user"
)
self.debug("Updating project with new Admin: %s" %
self.user.account.name)
# Update the project with new admin
project.update(
self.apiclient,
account=self.user.account.name
)
# listProjectAccount to verify the user is new admin of the project
accounts_reponse = Project.listAccounts(
self.apiclient,
projectid=project.id,
account=self.user.account.name,
)
self.debug(accounts_reponse)
self.assertEqual(
isinstance(accounts_reponse, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
account = accounts_reponse[0]
self.assertEqual(
account.role,
'Admin',
"Newly added user is not added as a regular user"
)
# listProjectAccount to verify old user becomes a regular user
accounts_reponse = Project.listAccounts(
self.apiclient,
projectid=project.id,
account=self.new_admin.account.name,
)
self.assertEqual(
isinstance(accounts_reponse, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
account = accounts_reponse[0]
self.assertEqual(
account.role,
'Regular',
"Newly added user is not added as a regular user"
)
return
class TestProjectResources(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestProjectResources,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone
cls.zone = get_zone(cls.api_client, cls.services)
cls.services['mode'] = cls.zone.networktype
cls.domain = get_domain(
cls.api_client,
cls.services
)
configs = Configurations.list(
cls.api_client,
name='project.invite.required'
)
if not isinstance(configs, list):
raise unittest.SkipTest("List configurations has no config: project.invite.required")
elif (configs[0].value).lower() != 'false':
raise unittest.SkipTest("'project.invite.required' should be set to false")
# Create account, disk offering etc.
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls.user = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls._cleanup = [cls.account, cls.disk_offering]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created accounts, domains etc
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advanced", "basic", "sg", "eip", "advancedns", "simulator"])
def test_07_project_resources_account_delete(self):
""" Test Verify after an account is removed from the project, all his
resources stay with the project.
"""
# Validate the following
# 1. Create a project.
# 2. Add some accounts to project. Add resources to the project
# 3. Delete the account. Verify resources are still there after
# account deletion.
# Create project as a domain admin
project = Project.create(
self.apiclient,
self.services["project"],
account=self.account.name,
domainid=self.account.domainid
)
# Cleanup created project at end of test
self.cleanup.append(project)
self.debug("Created project with domain admin with ID: %s" %
project.id)
list_projects_reponse = Project.list(
self.apiclient,
id=project.id,
listall=True
)
self.assertEqual(
isinstance(list_projects_reponse, list),
True,
"Check for a valid list projects response"
)
list_project = list_projects_reponse[0]
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
self.assertEqual(
project.name,
list_project.name,
"Check project name from list response"
)
self.debug("Adding %s user to project: %s" % (
self.user.account.name,
project.name
))
# Add user to the project
project.addAccount(
self.apiclient,
self.user.account.name,
)
# listProjectAccount to verify the user is added to project or not
accounts_reponse = Project.listAccounts(
self.apiclient,
projectid=project.id,
account=self.user.account.name,
)
self.assertEqual(
isinstance(accounts_reponse, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
account = accounts_reponse[0]
self.assertEqual(
account.role,
'Regular',
"Newly added user is not added as a regular user"
)
# Create some resources(volumes) for the projects
volume = Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
diskofferingid=self.disk_offering.id,
projectid=project.id
)
self.cleanup.append(volume)
# Delete the project user
self.user.delete(self.apiclient)
volumes = Volume.list(self.apiclient, id=volume.id)
self.assertEqual(
isinstance(volumes, list),
True,
"Check for a valid list volumes response"
)
self.assertNotEqual(
len(volumes),
0,
"Check list volumes API response returns a valid list"
)
volume_response = volumes[0]
self.assertEqual(
volume_response.name,
volume.name,
"Volume should exist after project user deletion."
)
return
@attr(tags = ["advanced", "basic", "sg", "eip", "advancedns", "simulator"])
def test_08_cleanup_after_project_delete(self):
""" Test accounts are unassigned from project after project deletion
"""
# Validate the following
# 1. Create a project.
# 2. Add some accounts to project. Add resources to the project
# 3. Delete the project. Verify resources are freed after
# account deletion.
# 4. Verify all accounts are unassigned from project.
# Create project as a domain admin
project = Project.create(
self.apiclient,
self.services["project"],
account=self.account.name,
domainid=self.account.domainid
)
# Cleanup created project at end of test
self.debug("Created project with domain admin with ID: %s" %
project.id)
list_projects_reponse = Project.list(
self.apiclient,
id=project.id,
listall=True
)
self.assertEqual(
isinstance(list_projects_reponse, list),
True,
"Check for a valid list projects response"
)
list_project = list_projects_reponse[0]
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
self.assertEqual(
project.name,
list_project.name,
"Check project name from list response"
)
self.user = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup.append(self.user)
self.debug("Adding %s user to project: %s" % (
self.user.account.name,
project.name
))
# Add user to the project
project.addAccount(
self.apiclient,
self.user.account.name
)
# listProjectAccount to verify the user is added to project or not
accounts_reponse = Project.listAccounts(
self.apiclient,
projectid=project.id,
account=self.user.account.name,
)
self.assertEqual(
isinstance(accounts_reponse, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
account = accounts_reponse[0]
self.assertEqual(
account.role,
'Regular',
"Newly added user is not added as a regular user"
)
# Create some resources(volumes) for the projects
volume = Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
diskofferingid=self.disk_offering.id,
projectid=project.id
)
self.debug("Created a volume: %s for project: %s" % (
volume.id,
project.name
))
# Delete the project user
self.debug("Deleting project: %s" % project.name)
project.delete(self.apiclient)
self.debug("Successfully deleted project: %s" % project.name)
volumes = Volume.list(self.apiclient, id=volume.id)
self.assertEqual(
volumes,
None,
"Resources (volume) should be deleted as part of cleanup"
)
accounts = Project.listAccounts(self.apiclient, projectid=project.id)
self.assertEqual(
accounts,
None,
"Accounts should be un-assigned from project"
)
return
class TestProjectSuspendActivate(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestProjectSuspendActivate,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, domain, template etc
cls.zone = get_zone(cls.api_client, cls.services)
cls.services['mode'] = cls.zone.networktype
cls.domain = get_domain(
cls.api_client,
cls.services
)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
configs = Configurations.list(
cls.api_client,
name='project.invite.required'
)
if not isinstance(configs, list):
raise unittest.SkipTest("List configurations has no config: project.invite.required")
elif (configs[0].value).lower() != 'false':
raise unittest.SkipTest("'project.invite.required' should be set to false")
# Create account, service offering, disk offering etc.
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"],
domainid=cls.domain.id
)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls.user = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
# Create project as a domain admin
cls.project = Project.create(
cls.api_client,
cls.services["project"],
account=cls.account.name,
domainid=cls.account.domainid
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls._cleanup = [
cls.project,
cls.account,
cls.disk_offering,
cls.service_offering
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created accounts, domains etc
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advanced", "basic", "sg", "eip", "advancedns", "simulator"])
def test_09_project_suspend(self):
""" Test Verify after an account is removed from the project, all his
resources stay with the project.
"""
# Validate the following
# 1. Create a project.
# 2. Add some accounts to project. Add resources to the project
# 3. Delete the account. Verify resources are still there after
# account deletion.
self.debug("Adding %s user to project: %s" % (
self.user.account.name,
self.project.name
))
# Add user to the project
self.project.addAccount(
self.apiclient,
self.user.account.name,
)
# listProjectAccount to verify the user is added to project or not
accounts_reponse = Project.listAccounts(
self.apiclient,
projectid=self.project.id,
account=self.user.account.name,
)
self.assertEqual(
isinstance(accounts_reponse, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(accounts_reponse),
0,
"Check list project response returns a valid project"
)
account = accounts_reponse[0]
self.assertEqual(
account.role,
'Regular',
"Newly added user is not added as a regular user"
)
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
serviceofferingid=self.service_offering.id,
projectid=self.project.id
)
self.debug("Created a VM: %s for project: %s" % (
virtual_machine.id,
self.project.id
))
self.debug("Suspending a project: %s" % self.project.name)
self.project.suspend(self.apiclient)
# Check status of all VMs associated with project
vms = VirtualMachine.list(
self.apiclient,
projectid=self.project.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(vms),
0,
"Check list project response returns a valid project"
)
for vm in vms:
self.debug("VM ID: %s state: %s" % (vm.id, vm.state))
self.assertEqual(
vm.state,
'Stopped',
"VM should be in stopped state after project suspension"
)
self.debug("Attempting to create volume in suspended project")
with self.assertRaises(Exception):
# Create some resources(volumes) for the projects
volume = Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
diskofferingid=self.disk_offering.id,
projectid=self.project.id
)
self.debug("Volume creation failed")
# Start the stopped VM
self.debug("Attempting to start VM: %s in suspended project" %
virtual_machine.id)
with self.assertRaises(Exception):
virtual_machine.start(self.apiclient)
self.debug("VM start failed!")
# Destroy Stopped VM
virtual_machine.delete(self.apiclient)
self.debug("Destroying VM: %s" % virtual_machine.id)
# Check status of all VMs associated with project
vms = VirtualMachine.list(
self.apiclient,
projectid=self.project.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(vms),
0,
"Check list project response returns a valid project"
)
for vm in vms:
self.debug("VM ID: %s state: %s" % (vm.id, vm.state))
self.assertEqual(
vm.state,
'Destroyed',
"VM should be in stopped state after project suspension"
)
return
@attr(tags = ["advanced", "basic", "sg", "eip", "advancedns", "simulator"])
def test_10_project_activation(self):
""" Test project activation after suspension
"""
# Validate the following
# 1. Activate the project
# 2. Verify project is activated and we are able to add resources
# Activating the project
self.debug("Activating project: %s" % self.project.name)
self.project.activate(self.apiclient)
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
serviceofferingid=self.service_offering.id,
projectid=self.project.id
)
self.cleanup.append(virtual_machine)
self.debug("Created a VM: %s for project: %s" % (
virtual_machine.id,
self.project.id
))
# Check status of all VMs associated with project
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(vms),
0,
"Check list project response returns a valid project"
)
for vm in vms:
self.debug("VM ID: %s state: %s" % (vm.id, vm.state))
self.assertEqual(
vm.state,
'Running',
"VM should be in Running state after project activation"
)
return
| 40.731707
| 97
| 0.430824
|
c926bf25e343ad370ac4ef00195a6a0f9c76ed2e
| 1,255
|
py
|
Python
|
venv/lib/python3.8/site-packages/vsts/work_item_tracking/v4_1/models/work_item_query_sort_column.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/vsts/work_item_tracking/v4_1/models/work_item_query_sort_column.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/vsts/work_item_tracking/v4_1/models/work_item_query_sort_column.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class WorkItemQuerySortColumn(Model):
"""WorkItemQuerySortColumn.
:param descending: The direction to sort by.
:type descending: bool
:param field: A work item field.
:type field: :class:`WorkItemFieldReference <work-item-tracking.v4_1.models.WorkItemFieldReference>`
"""
_attribute_map = {
'descending': {'key': 'descending', 'type': 'bool'},
'field': {'key': 'field', 'type': 'WorkItemFieldReference'}
}
def __init__(self, descending=None, field=None):
super(WorkItemQuerySortColumn, self).__init__()
self.descending = descending
self.field = field
| 41.833333
| 105
| 0.528287
|
9099381727424a7340efee3764970fe25401165b
| 2,487
|
py
|
Python
|
tutorials/03-advanced/image_captioning/build_vocab.py
|
jax79sg/pytorch-tutorial
|
dc389b42f6bfdbfd3600044fc840fa5154441dcf
|
[
"MIT"
] | null | null | null |
tutorials/03-advanced/image_captioning/build_vocab.py
|
jax79sg/pytorch-tutorial
|
dc389b42f6bfdbfd3600044fc840fa5154441dcf
|
[
"MIT"
] | null | null | null |
tutorials/03-advanced/image_captioning/build_vocab.py
|
jax79sg/pytorch-tutorial
|
dc389b42f6bfdbfd3600044fc840fa5154441dcf
|
[
"MIT"
] | null | null | null |
import nltk
import pickle
import argparse
from collections import Counter
from pycocotools.coco import COCO
class Vocabulary(object):
"""Simple vocabulary wrapper."""
def __init__(self):
self.word2idx = {}
self.idx2word = {}
self.idx = 0
def add_word(self, word):
if not word in self.word2idx:
self.word2idx[word] = self.idx
self.idx2word[self.idx] = word
self.idx += 1
def __call__(self, word):
if not word in self.word2idx:
return self.word2idx['<unk>']
return self.word2idx[word]
def __len__(self):
return len(self.word2idx)
def build_vocab(json, threshold):
"""Build a simple vocabulary wrapper."""
coco = COCO(json)
counter = Counter()
ids = coco.anns.keys()
for i, id in enumerate(ids):
caption = str(coco.anns[id]['caption'])
tokens = nltk.tokenize.word_tokenize(caption.lower())
counter.update(tokens)
if (i+1) % 1000 == 0:
print("[{}/{}] Tokenized the captions.".format(i+1, len(ids)))
# If the word frequency is less than 'threshold', then the word is discarded.
words = [word for word, cnt in counter.items() if cnt >= threshold]
# Create a vocab wrapper and add some special tokens.
vocab = Vocabulary()
vocab.add_word('<pad>')
vocab.add_word('<start>')
vocab.add_word('<end>')
vocab.add_word('<unk>')
# Add the words to the vocabulary.
for i, word in enumerate(words):
vocab.add_word(word)
return vocab
def main(args):
nltk.download('punkt')
vocab = build_vocab(json=args.caption_path, threshold=args.threshold)
vocab_path = args.vocab_path
with open(vocab_path, 'wb') as f:
pickle.dump(vocab, f)
print("Total vocabulary size: {}".format(len(vocab)))
print("Saved the vocabulary wrapper to '{}'".format(vocab_path))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--caption_path', type=str,
default='data/annotations/captions_train2014.json',
help='path for train annotation file')
parser.add_argument('--vocab_path', type=str, default='./data/vocab.pkl',
help='path for saving vocabulary wrapper')
parser.add_argument('--threshold', type=int, default=4,
help='minimum word count threshold')
args = parser.parse_args()
main(args)
| 31.884615
| 81
| 0.61922
|
897d13b9e01e72b96089cca4df0559a1a8706fe1
| 5,515
|
py
|
Python
|
project/raw2nii.py
|
MattVUIIS/raw2nii
|
8c294f273e3cbb9a4757b38f97834f132b7ab0b7
|
[
"MIT"
] | 1
|
2019-01-20T07:55:00.000Z
|
2019-01-20T07:55:00.000Z
|
project/raw2nii.py
|
MattVUIIS/raw2nii
|
8c294f273e3cbb9a4757b38f97834f132b7ab0b7
|
[
"MIT"
] | null | null | null |
project/raw2nii.py
|
MattVUIIS/raw2nii
|
8c294f273e3cbb9a4757b38f97834f132b7ab0b7
|
[
"MIT"
] | null | null | null |
#!/bin/env python
from __future__ import division
import argparse
import logging
import numpy as np
import os
import re
import sys
from nii import write_nii_from_par
from write_parrec_from_dicom import write_parrec_from_dicom
from read_dicom import read_dicom
from read_par import read_par
def raw_convert(input_file, output_file, **options):
logger = logging.getLogger('raw2nii')
#Convert from DCM to PAR
if re.search('.dcm$', input_file, re.I) and re.search('.par$', output_file,
re.I):
return convert_dcm2par(input_file, output_file, **options)
#Convert from PAR to NII
if re.search('.par$', input_file, re.I) and re.search('.nii$', output_file,
re.I):
return convert_par2nii(input_file, output_file, **options)
#Error
logger.error('Conversion not supported')
def _get_rec_fname(par_fname):
rec_fname, ext = os.path.splitext(par_fname)
if '.par' == ext:
rec_fname += '.rec'
elif '.PAR' == ext:
rec_fname += '.REC'
return rec_fname
def convert_par2nii(par_fname, nii_fname, no_angulation, no_rescale,
dti_revertb0):
"""
no_angulation : when True: do NOT include affine transformation as defined in PAR
file in hdr part of Nifti file (nifti only, EXPERIMENTAL!)
no_rescale : when True: do NOT store intensity scale as found in PAR
file (assumed equall for all slices). do NOT yield DV values.
dti_revertb0 : when False (default), philips ordering is used for DTI data
(eg b0 image last). When True, b0 is saved as first image
in 3D or 4D data
"""
logger = logging.getLogger('raw2nii')
rec_fname = _get_rec_fname(par_fname)
#extract the bval and bvec from the PAR file
par = read_par(par_fname, rec_fname)
if par.problem_reading:
logger.warning('Skipping volume {0} because of reading errors.'
.format(par_fname))
return 1
if 'V3' == par.version:
raise NotImplementedError
elif par.version in ('V4', 'V4.1', 'V4.2'):
#new: loop slices (as in slice_index) and open and close
#files along the way (according to info in index on dynamic
#and mr_type)
if not no_angulation:
logger.warning('Assuming angulation parameters are identical '
'for all scans in (4D) volume!')
if not no_rescale:
logger.warning('Assuming rescaling parameters (see PAR-file) '
'are identical for all slices in volume and all scans in '
'(4D) volume!')
write_nii_from_par(nii_fname, par)
else:
logger.warning('Sorry, but data format extracted using Philips '
'Research File format {0} was not known at the time the '
'raw2nii software was developed'.format(par.version))
return 0
def convert_dcm2par(dcm_fname, par_fname, **options):
logger = logging.getLogger('raw2nii')
dcm = read_dicom(dcm_fname)
rec_fname = _get_rec_fname(par_fname)
write_parrec_from_dicom(par_fname, rec_fname, dcm)
return 0
def _generate_filename(sl):
if nr_dyn > 1:
dyn_suffix = '-{0:04d}'.format(
sl.dynamic_scan_number)
dyn_ndsuffix = '-d{0:04d}'.format(nr_dyn)
else:
dyn_suffix = '-{0:04d}'.format(1)
dyn_ndsuffix = ''
if nr_mrtypes > 1:
mrtype_suffix = '-s{0:3d}'.format(
sl.scanning_sequence)
mrtype_ndsuffix = '-s{0:03d}'.format(nr_mrtypes)
else:
mrtype_suffix = ''
mrtype_ndsuffix = ''
if nr_realmrtypes > 1:
realmrtype_suffix = '-t{0:03d}'.format(
sl.image_type_mr)
realmrtype_ndsuffix = '-t{0:03d}'.format(
nr_realmrtypes)
else:
realmrtype_suffix = ''
realmrtype_ndsuffix = ''
if nr_echos > 1:
echo_suffix = '-e{0:03d}'.format(
sl.echo_number)
echo_ndsuffix = '-e{0:03d}'.format(nr_echos)
else:
echo_suffix = ''
echo_ndsuffix = ''
if nr_diffgrads > 1:
diffgrad_suffix = '-g{0:03d}'.format(
sl.gradient_orientation_number)
diffgrad_ndsuffix = '-g{0:03d}'.format(
nr_diffgrads)
else:
diffgrad_suffix = ''
diffgrad_ndsuffix = ''
if nr_bvalues > 1:
bval_suffix = '-b{0:03d}'.format(
sl.diffusion_b_value_number)
bval_ndsuffix = '-b{0:03d}'.format(nr_bvalues)
else:
bval_suffix = ''
bval_ndsuffix = ''
def main():
logger = logging.getLogger('raw2nii')
logger.setLevel(logging.INFO)
_formatter = logging.Formatter('%(levelname)s %(asctime)s %(filename)s: '
'%(message)s')
_stream_handler = logging.StreamHandler()
_stream_handler.setFormatter(_formatter)
logger.addHandler(_stream_handler)
parser = argparse.ArgumentParser()
parser.add_argument('--debug', '-d', action='store_true')
parser.add_argument('--no-rescale', action='store_false')
parser.add_argument('--no-angulation', action='store_false')
parser.add_argument('--dti_revertb0', action='store_true')
parser.add_argument('input_file', type=str)
parser.add_argument('output_file', type=str)
options = parser.parse_args()
if options.debug:
logger.setLevel(logging.DEBUG)
options = vars(options)
options.pop('debug', None)
sys.exit(raw_convert(**options))
if __name__ == '__main__':
main()
| 36.045752
| 91
| 0.633001
|
66a38400f1b321754c2fa5d0a3669c83393f5130
| 201
|
py
|
Python
|
dffml/skel/model/setup.py
|
Patil2099/dffml
|
9310c1ad55a7339e5d15786d4b9d890283f52ec2
|
[
"MIT"
] | null | null | null |
dffml/skel/model/setup.py
|
Patil2099/dffml
|
9310c1ad55a7339e5d15786d4b9d890283f52ec2
|
[
"MIT"
] | 1
|
2019-10-17T17:34:14.000Z
|
2019-10-17T17:34:14.000Z
|
dffml/skel/model/setup.py
|
raghav-ys/dffml
|
2a23f55acaac69c7a1840260b0ede694216c2007
|
[
"MIT"
] | null | null | null |
from setuptools import setup
from dffml_setup_common import SETUP_KWARGS, IMPORT_NAME
SETUP_KWARGS["entry_points"] = {
"dffml.model": [f"misc = {IMPORT_NAME}.misc:Misc"]
}
setup(**SETUP_KWARGS)
| 20.1
| 56
| 0.751244
|
59d7f30a9ec77cebae88e0742e008b070d77a90e
| 2,424
|
py
|
Python
|
snoopy/server/transforms/fetchTweetsByLocation.py
|
aiddenkeli/Snoopy
|
dd76180145981b3574b419edce39dbb060bd8c8c
|
[
"MIT"
] | 432
|
2015-01-07T09:56:32.000Z
|
2022-03-28T12:15:42.000Z
|
snoopy/server/transforms/fetchTweetsByLocation.py
|
aiddenkeli/Snoopy
|
dd76180145981b3574b419edce39dbb060bd8c8c
|
[
"MIT"
] | 9
|
2015-01-31T10:07:28.000Z
|
2021-09-10T08:13:47.000Z
|
snoopy/server/transforms/fetchTweetsByLocation.py
|
aiddenkeli/Snoopy
|
dd76180145981b3574b419edce39dbb060bd8c8c
|
[
"MIT"
] | 135
|
2015-01-07T15:06:35.000Z
|
2022-01-24T02:19:55.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# glenn@sensepost.com
# Snoopy // 2012
# By using this code you agree to abide by the supplied LICENSE.txt
from Maltego import *
import logging
import requests
import json
import stawk_db
import re
logging.basicConfig(level=logging.DEBUG,filename='/tmp/maltego_logs.txt',format='%(asctime)s %(levelname)s: %(message)s',datefmt='%Y-%m-%d %H:%M:%S')
sys.stderr = sys.stdout
def main():
print "Content-type: xml\n\n";
MaltegoXML_in = sys.stdin.read()
if MaltegoXML_in <> '':
m = MaltegoMsg(MaltegoXML_in)
cursor=stawk_db.dbconnect()
TRX = MaltegoTransform()
try:
logging.debug("Here we go")
for item in m.TransformSettings.keys():
logging.debug("N:"+item+" V:"+m.TransformSettings[item])
# logging.debug(MaltegoXML_in)
radius="5" #miles
lat=m.AdditionalFields['lat']
lng=m.AdditionalFields['long']
if 'radius' in m.AdditionalFields:
radius=m.AdditionalFields
logging.debug("Tweep cords to search - %s,%s (%s miles)" %(lat,lng,radius))
r=requests.get("https://search.twitter.com/search.json?q=geocode:%s,%s,%smi"%(lat,lng,radius))
tw=json.loads(r.text)
logging.debug("Tweep results - %d"%len(tw['results']))
for tweep in tw['results']:
name=tweep['from_user_name'].encode('utf8','xmlcharrefreplace')
username=tweep['from_user'].encode('utf8','xmlcharrefreplace')
uid=tweep['from_user_id_str'].encode('utf8','xmlcharrefreplace')
recent_tweet=tweep['text'].encode('utf8','xmlcharrefreplace')
img=tweep['profile_image_url'].encode('utf8','xmlcharrefreplace')
profile_page="http://twitter.com/%s"%username
largephoto=re.sub('_normal','',img)
NewEnt=TRX.addEntity("maltego.affiliation.Twitter", name)
NewEnt.addAdditionalFields("uid","UID","strict",uid)
NewEnt.addAdditionalFields("affiliation.profile-url","Profile URL","strict",profile_page)
NewEnt.addAdditionalFields("twitter.screen-name","Screen Name","strict",username)
NewEnt.addAdditionalFields("person.fullname","Real Name","strict",name)
NewEnt.addAdditionalFields("photo","Photo","nostrict",largephoto)
NewEnt.addAdditionalFields("tweet","Recent Tweet","nostrict",recent_tweet)
NewEnt.setIconURL(img)
except Exception, e:
logging.debug("Exception:")
logging.debug(e)
TRX.returnOutput()
main()
| 32.32
| 149
| 0.680693
|
22c4b8c40866b92bfc576a881bba146025752846
| 4,070
|
py
|
Python
|
flaskbb/utils/widgets.py
|
konstantin1985/forum
|
7d4de24ccc932e9764699d89c8cc9d210b7fac7f
|
[
"BSD-3-Clause"
] | null | null | null |
flaskbb/utils/widgets.py
|
konstantin1985/forum
|
7d4de24ccc932e9764699d89c8cc9d210b7fac7f
|
[
"BSD-3-Clause"
] | null | null | null |
flaskbb/utils/widgets.py
|
konstantin1985/forum
|
7d4de24ccc932e9764699d89c8cc9d210b7fac7f
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
flaskbb.utils.widgets
~~~~~~~~~~~~~~~~~~~~~
Additional widgets for wtforms
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
import simplejson as json
from datetime import datetime
from wtforms.widgets.core import Select, HTMLString, html_params
class SelectBirthdayWidget(object):
"""Renders a DateTime field with 3 selects.
For more information see: http://stackoverflow.com/a/14664504
"""
FORMAT_CHOICES = {
'%d': [(x, str(x)) for x in range(1, 32)],
'%m': [(x, str(x)) for x in range(1, 13)]
}
FORMAT_CLASSES = {
'%d': 'select_date_day',
'%m': 'select_date_month',
'%Y': 'select_date_year'
}
def __init__(self, years=range(1930, datetime.utcnow().year + 1)):
"""Initialzes the widget.
:param years: The min year which should be chooseable.
Defatuls to ``1930``.
"""
super(SelectBirthdayWidget, self).__init__()
self.FORMAT_CHOICES['%Y'] = [(x, str(x)) for x in years]
def __call__(self, field, **kwargs):
field_id = kwargs.pop('id', field.id)
html = []
allowed_format = ['%d', '%m', '%Y']
surrounded_div = kwargs.pop('surrounded_div', None)
css_class = kwargs.get('class', None)
for date_format in field.format.split():
if date_format in allowed_format:
choices = self.FORMAT_CHOICES[date_format]
id_suffix = date_format.replace('%', '-')
id_current = field_id + id_suffix
if css_class is not None: # pragma: no cover
select_class = "{} {}".format(
css_class, self.FORMAT_CLASSES[date_format]
)
else:
select_class = self.FORMAT_CLASSES[date_format]
kwargs['class'] = select_class
try:
del kwargs['placeholder']
except KeyError:
pass
if surrounded_div is not None:
html.append('<div class="%s">' % surrounded_div)
html.append('<select %s>' % html_params(name=field.name,
id=id_current,
**kwargs))
if field.data:
current_value = int(field.data.strftime(date_format))
else:
current_value = None
for value, label in choices:
selected = (value == current_value)
# Defaults to blank
if value == 1 or value == 1930:
html.append(Select.render_option("None", " ", selected))
html.append(Select.render_option(value, label, selected))
html.append('</select>')
if surrounded_div is not None:
html.append("</div>")
html.append(' ')
return HTMLString(''.join(html))
class MultiSelect(object):
"""
Renders a megalist-multiselect widget.
The field must provide an `iter_choices()` method which the widget will
call on rendering; this method must yield tuples of
`(value, label, selected)`.
"""
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
src_list, dst_list = [], []
for val, label, selected in field.iter_choices():
if selected:
dst_list.append({'label':label, 'listValue':val})
else:
src_list.append({'label':label, 'listValue':val})
kwargs.update(
{
'data-provider-src':json.dumps(src_list),
'data-provider-dst':json.dumps(dst_list)
}
)
html = ['<div %s>' % html_params(name=field.name, **kwargs)]
html.append('</div>')
return HTMLString(''.join(html))
| 32.301587
| 80
| 0.51769
|
2dfeb8329f0aa7ab7186075407c7eedaa3bd3242
| 175
|
py
|
Python
|
ioutracker/inference/__init__.py
|
jiankaiwang/ioutracker
|
8a55925fd5488a340b2ca5095d35105cc34b6cb8
|
[
"MIT"
] | 3
|
2020-05-15T02:49:56.000Z
|
2022-02-10T15:57:20.000Z
|
ioutracker/inference/__init__.py
|
jiankaiwang/ioutracker
|
8a55925fd5488a340b2ca5095d35105cc34b6cb8
|
[
"MIT"
] | null | null | null |
ioutracker/inference/__init__.py
|
jiankaiwang/ioutracker
|
8a55925fd5488a340b2ca5095d35105cc34b6cb8
|
[
"MIT"
] | 2
|
2020-06-23T09:28:34.000Z
|
2020-08-13T02:38:22.000Z
|
import os, sys
filePath = os.path.abspath(__file__)
currentFolder = os.path.dirname(filePath)
sys.path.append(currentFolder)
from .MOTDet17Main import outputAsFramesToVideo
| 21.875
| 47
| 0.817143
|
0236e82b2ac6bd777f2bd3b0afc8e5df76f47cc3
| 2,052
|
py
|
Python
|
docker-images/taigav2/taiga-back/taiga/projects/services/filters.py
|
mattcongy/itshop
|
6be025a9eaa7fe7f495b5777d1f0e5a3184121c9
|
[
"MIT"
] | 1
|
2017-05-29T19:01:06.000Z
|
2017-05-29T19:01:06.000Z
|
docker-images/taigav2/taiga-back/taiga/projects/services/filters.py
|
mattcongy/itshop
|
6be025a9eaa7fe7f495b5777d1f0e5a3184121c9
|
[
"MIT"
] | null | null | null |
docker-images/taigav2/taiga-back/taiga/projects/services/filters.py
|
mattcongy/itshop
|
6be025a9eaa7fe7f495b5777d1f0e5a3184121c9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from contextlib import closing
from django.db import connection
def _get_project_tags(project):
result = set()
tags = project.tags or []
for tag in tags:
result.add(tag)
return result
def _get_stories_tags(project):
result = set()
for tags in project.user_stories.values_list("tags", flat=True):
if tags:
result.update(tags)
return result
def _get_tasks_tags(project):
result = set()
for tags in project.tasks.values_list("tags", flat=True):
if tags:
result.update(tags)
return result
def _get_issues_tags(project):
result = set()
for tags in project.issues.values_list("tags", flat=True):
if tags:
result.update(tags)
return result
# Public api
def get_all_tags(project):
"""
Given a project, return sorted list of unique
tags found on it.
"""
result = set()
result.update(_get_project_tags(project))
result.update(_get_issues_tags(project))
result.update(_get_stories_tags(project))
result.update(_get_tasks_tags(project))
return sorted(result)
| 30.176471
| 74
| 0.705166
|
ffa1f33b15f438cbb26e1873ea0863911e2f48cb
| 259
|
py
|
Python
|
supervisor/supervisor/supervisor/doctype/supervisor_settings/supervisor_settings.py
|
amolash/superviser
|
1e7946f815de35ca0ea013c2ecdd9d6d063a0fd9
|
[
"MIT"
] | null | null | null |
supervisor/supervisor/supervisor/doctype/supervisor_settings/supervisor_settings.py
|
amolash/superviser
|
1e7946f815de35ca0ea013c2ecdd9d6d063a0fd9
|
[
"MIT"
] | null | null | null |
supervisor/supervisor/supervisor/doctype/supervisor_settings/supervisor_settings.py
|
amolash/superviser
|
1e7946f815de35ca0ea013c2ecdd9d6d063a0fd9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2018, amol and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class SupervisorSettings(Document):
pass
| 23.545455
| 49
| 0.783784
|
9e0e890cd5bef55450ab23567dda00550a1af047
| 5,744
|
py
|
Python
|
sdk/python/pulumi_azure_native/documentdb/v20200601preview/get_sql_resource_sql_trigger.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/documentdb/v20200601preview/get_sql_resource_sql_trigger.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/documentdb/v20200601preview/get_sql_resource_sql_trigger.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetSqlResourceSqlTriggerResult',
'AwaitableGetSqlResourceSqlTriggerResult',
'get_sql_resource_sql_trigger',
]
@pulumi.output_type
class GetSqlResourceSqlTriggerResult:
"""
An Azure Cosmos DB trigger.
"""
def __init__(__self__, id=None, identity=None, location=None, name=None, resource=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource and not isinstance(resource, dict):
raise TypeError("Expected argument 'resource' to be a dict")
pulumi.set(__self__, "resource", resource)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The unique resource identifier of the ARM resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ManagedServiceIdentityResponse']:
"""
Identity for the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The location of the resource group to which the resource belongs.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the ARM resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def resource(self) -> Optional['outputs.SqlTriggerGetPropertiesResponseResource']:
return pulumi.get(self, "resource")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
class AwaitableGetSqlResourceSqlTriggerResult(GetSqlResourceSqlTriggerResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSqlResourceSqlTriggerResult(
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
resource=self.resource,
tags=self.tags,
type=self.type)
def get_sql_resource_sql_trigger(account_name: Optional[str] = None,
container_name: Optional[str] = None,
database_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
trigger_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSqlResourceSqlTriggerResult:
"""
An Azure Cosmos DB trigger.
:param str account_name: Cosmos DB database account name.
:param str container_name: Cosmos DB container name.
:param str database_name: Cosmos DB database name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str trigger_name: Cosmos DB trigger name.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['containerName'] = container_name
__args__['databaseName'] = database_name
__args__['resourceGroupName'] = resource_group_name
__args__['triggerName'] = trigger_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:documentdb/v20200601preview:getSqlResourceSqlTrigger', __args__, opts=opts, typ=GetSqlResourceSqlTriggerResult).value
return AwaitableGetSqlResourceSqlTriggerResult(
id=__ret__.id,
identity=__ret__.identity,
location=__ret__.location,
name=__ret__.name,
resource=__ret__.resource,
tags=__ret__.tags,
type=__ret__.type)
| 38.039735
| 505
| 0.646588
|
76209c9882ef2cca098977052418c9cecc804683
| 353
|
py
|
Python
|
py-if-else.py
|
FTraian/hackerrank-python
|
203bbfd644eb3323f179c6da21fbf569ad528885
|
[
"CC0-1.0"
] | null | null | null |
py-if-else.py
|
FTraian/hackerrank-python
|
203bbfd644eb3323f179c6da21fbf569ad528885
|
[
"CC0-1.0"
] | null | null | null |
py-if-else.py
|
FTraian/hackerrank-python
|
203bbfd644eb3323f179c6da21fbf569ad528885
|
[
"CC0-1.0"
] | null | null | null |
#!/bin/python3
import math
import os
import random
import re
import sys
if __name__ == '__main__':
n = int(input().strip())
if n % 2 == 1:
print('Weird')
else:
if n in range(2, 5):
print('Not Weird')
elif n in range(6, 21):
print('Weird')
elif n > 20:
print('Not Weird')
| 17.65
| 31
| 0.504249
|
94d7f08051a055a7665ad67f385db5bb1f6266b7
| 416
|
py
|
Python
|
django_git/urls.py
|
sethtrain/django-git
|
5d5b1222c55946b6de9c9db5820438d791cc3b4e
|
[
"BSD-3-Clause"
] | 35
|
2015-02-07T14:35:45.000Z
|
2021-11-16T10:31:50.000Z
|
django_git/urls.py
|
sethtrain/django-git
|
5d5b1222c55946b6de9c9db5820438d791cc3b4e
|
[
"BSD-3-Clause"
] | 1
|
2015-11-22T19:34:17.000Z
|
2015-11-22T19:34:17.000Z
|
django_git/urls.py
|
sethtrain/django-git
|
5d5b1222c55946b6de9c9db5820438d791cc3b4e
|
[
"BSD-3-Clause"
] | 13
|
2015-02-02T14:54:49.000Z
|
2020-10-24T10:17:37.000Z
|
from django.conf.urls.defaults import *
urlpatterns = []
urlpatterns += patterns('django_git.views',
url(r'^(?P<repo>[\w_-]+)/commit/(?P<commit>[\w\d]+)/blob/$', 'blob', name='django-git-blob'),
url(r'^(?P<repo>[\w_-]+)/commit/(?P<commit>[\w\d]+)/$', 'commit', name='django-git-commit'),
url(r'^(?P<repo>[\w_-]+)/$', 'repo', name='django-git-repo'),
url(r'^$', 'index', name='django-git-index'),
)
| 37.818182
| 97
| 0.572115
|
51a5f0ee62a41fe109de83ade7baf5351c47f2bf
| 741
|
py
|
Python
|
init_sqldb_environment.py
|
pythononwheels/coronadash
|
876258d00f5b8bcccd4746713a15b3de54534fcf
|
[
"MIT"
] | null | null | null |
init_sqldb_environment.py
|
pythononwheels/coronadash
|
876258d00f5b8bcccd4746713a15b3de54534fcf
|
[
"MIT"
] | null | null | null |
init_sqldb_environment.py
|
pythononwheels/coronadash
|
876258d00f5b8bcccd4746713a15b3de54534fcf
|
[
"MIT"
] | null | null | null |
#
# adapts the alembic migrations ini
# to changes in the pow db config
#
from coronadash.database.sqldblib import conn_str
import configparser
from coronadash.conf.config import database
def init_migrations(stdout=False):
#config= configparser.ConfigParser.RawConfigParser()
config= configparser.ConfigParser()
config.read(database["sql"]["alembic.ini"])
config.set('alembic','sqlalchemy.url',conn_str)
with open(database["sql"]["alembic.ini"], 'w') as configfile:
config.write(configfile)
if stdout:
print(70*"-")
print("updated migration environment: " + conn_str)
print(70*"-")
return True
def main():
init_migrations(stdout=True)
if __name__=="__main__":
main()
| 28.5
| 65
| 0.695007
|
39709237bcf454930bb87074e99321c0bda1abc4
| 170
|
py
|
Python
|
ddd_base/value_object.py
|
sunwei/ddd-base
|
acf227ab8dba5f110f4dee00aa38966a74dd4011
|
[
"MIT"
] | null | null | null |
ddd_base/value_object.py
|
sunwei/ddd-base
|
acf227ab8dba5f110f4dee00aa38966a74dd4011
|
[
"MIT"
] | null | null | null |
ddd_base/value_object.py
|
sunwei/ddd-base
|
acf227ab8dba5f110f4dee00aa38966a74dd4011
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Domain Driven Design base framework - Value Object."""
class ValueObject(object):
def same_as(self, other):
return self == other
| 21.25
| 57
| 0.635294
|
3a9a3996a64867764b2a49878b508d67fab4324d
| 809
|
py
|
Python
|
torchfm/model/ffm.py
|
yfreedomliTHU/pytorch-fm
|
5983763b4a1659a67831d14edd29392339d6dd0e
|
[
"MIT"
] | 734
|
2019-06-03T12:55:26.000Z
|
2022-03-31T06:56:10.000Z
|
torchfm/model/ffm.py
|
yfreedomliTHU/pytorch-fm
|
5983763b4a1659a67831d14edd29392339d6dd0e
|
[
"MIT"
] | 34
|
2019-07-01T09:11:22.000Z
|
2022-02-16T12:28:34.000Z
|
torchfm/model/ffm.py
|
yfreedomliTHU/pytorch-fm
|
5983763b4a1659a67831d14edd29392339d6dd0e
|
[
"MIT"
] | 191
|
2019-07-08T14:57:14.000Z
|
2022-03-18T07:19:06.000Z
|
import torch
from torchfm.layer import FeaturesLinear, FieldAwareFactorizationMachine
class FieldAwareFactorizationMachineModel(torch.nn.Module):
"""
A pytorch implementation of Field-aware Factorization Machine.
Reference:
Y Juan, et al. Field-aware Factorization Machines for CTR Prediction, 2015.
"""
def __init__(self, field_dims, embed_dim):
super().__init__()
self.linear = FeaturesLinear(field_dims)
self.ffm = FieldAwareFactorizationMachine(field_dims, embed_dim)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
ffm_term = torch.sum(torch.sum(self.ffm(x), dim=1), dim=1, keepdim=True)
x = self.linear(x) + ffm_term
return torch.sigmoid(x.squeeze(1))
| 31.115385
| 83
| 0.673671
|
e1d18a8d7d061c9b96f6157d19f50a0b7a051940
| 8,617
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/cdn/v20191231/get_origin.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/cdn/v20191231/get_origin.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/cdn/v20191231/get_origin.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetOriginResult',
'AwaitableGetOriginResult',
'get_origin',
]
@pulumi.output_type
class GetOriginResult:
"""
CDN origin is the source of the content being delivered via CDN. When the edge nodes represented by an endpoint do not have the requested content cached, they attempt to fetch it from one or more of the configured origins.
"""
def __init__(__self__, enabled=None, host_name=None, http_port=None, https_port=None, id=None, name=None, origin_host_header=None, priority=None, provisioning_state=None, resource_state=None, type=None, weight=None):
if enabled and not isinstance(enabled, bool):
raise TypeError("Expected argument 'enabled' to be a bool")
pulumi.set(__self__, "enabled", enabled)
if host_name and not isinstance(host_name, str):
raise TypeError("Expected argument 'host_name' to be a str")
pulumi.set(__self__, "host_name", host_name)
if http_port and not isinstance(http_port, int):
raise TypeError("Expected argument 'http_port' to be a int")
pulumi.set(__self__, "http_port", http_port)
if https_port and not isinstance(https_port, int):
raise TypeError("Expected argument 'https_port' to be a int")
pulumi.set(__self__, "https_port", https_port)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if origin_host_header and not isinstance(origin_host_header, str):
raise TypeError("Expected argument 'origin_host_header' to be a str")
pulumi.set(__self__, "origin_host_header", origin_host_header)
if priority and not isinstance(priority, int):
raise TypeError("Expected argument 'priority' to be a int")
pulumi.set(__self__, "priority", priority)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_state and not isinstance(resource_state, str):
raise TypeError("Expected argument 'resource_state' to be a str")
pulumi.set(__self__, "resource_state", resource_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if weight and not isinstance(weight, int):
raise TypeError("Expected argument 'weight' to be a int")
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
Origin is enabled for load balancing or not
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="hostName")
def host_name(self) -> str:
"""
The address of the origin. Domain names, IPv4 addresses, and IPv6 addresses are supported.This should be unique across all origins in an endpoint.
"""
return pulumi.get(self, "host_name")
@property
@pulumi.getter(name="httpPort")
def http_port(self) -> Optional[int]:
"""
The value of the HTTP port. Must be between 1 and 65535.
"""
return pulumi.get(self, "http_port")
@property
@pulumi.getter(name="httpsPort")
def https_port(self) -> Optional[int]:
"""
The value of the HTTPS port. Must be between 1 and 65535.
"""
return pulumi.get(self, "https_port")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="originHostHeader")
def origin_host_header(self) -> Optional[str]:
"""
The host header value sent to the origin with each request. If you leave this blank, the request hostname determines this value. Azure CDN origins, such as Web Apps, Blob Storage, and Cloud Services require this host header value to match the origin hostname by default. If endpoint uses multiple origins for load balancing, then the host header at endpoint is ignored and this one is considered.
"""
return pulumi.get(self, "origin_host_header")
@property
@pulumi.getter
def priority(self) -> Optional[int]:
"""
Priority of origin in given origin group for load balancing. Higher priorities will not be used for load balancing if any lower priority origin is healthy.Must be between 1 and 5
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning status of the origin.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceState")
def resource_state(self) -> str:
"""
Resource status of the origin.
"""
return pulumi.get(self, "resource_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def weight(self) -> Optional[int]:
"""
Weight of the origin in given origin group for load balancing. Must be between 1 and 1000
"""
return pulumi.get(self, "weight")
class AwaitableGetOriginResult(GetOriginResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetOriginResult(
enabled=self.enabled,
host_name=self.host_name,
http_port=self.http_port,
https_port=self.https_port,
id=self.id,
name=self.name,
origin_host_header=self.origin_host_header,
priority=self.priority,
provisioning_state=self.provisioning_state,
resource_state=self.resource_state,
type=self.type,
weight=self.weight)
def get_origin(endpoint_name: Optional[str] = None,
origin_name: Optional[str] = None,
profile_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetOriginResult:
"""
CDN origin is the source of the content being delivered via CDN. When the edge nodes represented by an endpoint do not have the requested content cached, they attempt to fetch it from one or more of the configured origins.
:param str endpoint_name: Name of the endpoint under the profile which is unique globally.
:param str origin_name: Name of the origin which is unique within the endpoint.
:param str profile_name: Name of the CDN profile which is unique within the resource group.
:param str resource_group_name: Name of the Resource group within the Azure subscription.
"""
__args__ = dict()
__args__['endpointName'] = endpoint_name
__args__['originName'] = origin_name
__args__['profileName'] = profile_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:cdn/v20191231:getOrigin', __args__, opts=opts, typ=GetOriginResult).value
return AwaitableGetOriginResult(
enabled=__ret__.enabled,
host_name=__ret__.host_name,
http_port=__ret__.http_port,
https_port=__ret__.https_port,
id=__ret__.id,
name=__ret__.name,
origin_host_header=__ret__.origin_host_header,
priority=__ret__.priority,
provisioning_state=__ret__.provisioning_state,
resource_state=__ret__.resource_state,
type=__ret__.type,
weight=__ret__.weight)
| 40.07907
| 404
| 0.65835
|
447f46e79417bf658897f875764d0fdfd22ac8a9
| 36,356
|
py
|
Python
|
angr/analyses/decompiler/clinic.py
|
fmagin/angr
|
c41563b0f82a3d031c3aa482ebe2973c87ec4adb
|
[
"BSD-2-Clause"
] | 6,132
|
2015-08-06T23:24:47.000Z
|
2022-03-31T21:49:34.000Z
|
angr/analyses/decompiler/clinic.py
|
fmagin/angr
|
c41563b0f82a3d031c3aa482ebe2973c87ec4adb
|
[
"BSD-2-Clause"
] | 2,272
|
2015-08-10T08:40:07.000Z
|
2022-03-31T23:46:44.000Z
|
angr/analyses/decompiler/clinic.py
|
fmagin/angr
|
c41563b0f82a3d031c3aa482ebe2973c87ec4adb
|
[
"BSD-2-Clause"
] | 1,155
|
2015-08-06T23:37:39.000Z
|
2022-03-31T05:54:11.000Z
|
from collections import defaultdict
import logging
from typing import Dict, List, Tuple, Set, Optional, Iterable, Union, Type, TYPE_CHECKING
import networkx
import ailment
from ...knowledge_base import KnowledgeBase
from ...codenode import BlockNode
from ...utils import timethis
from ...calling_conventions import SimRegArg, SimStackArg, SimFunctionArgument
from ...sim_type import SimTypeChar, SimTypeInt, SimTypeLongLong, SimTypeShort, SimTypeFunction, SimTypeBottom
from ...sim_variable import SimVariable, SimStackVariable, SimRegisterVariable, SimMemoryVariable
from ...knowledge_plugins.key_definitions.constants import OP_BEFORE
from ...procedures.stubs.UnresolvableCallTarget import UnresolvableCallTarget
from ...procedures.stubs.UnresolvableJumpTarget import UnresolvableJumpTarget
from .. import Analysis, register_analysis
from ..cfg.cfg_base import CFGBase
from .ailgraph_walker import AILGraphWalker
from .ailblock_walker import AILBlockWalker
from .optimization_passes import get_default_optimization_passes, OptimizationPassStage
if TYPE_CHECKING:
from angr.knowledge_plugins.cfg import CFGModel
from .decompilation_cache import DecompilationCache
from .peephole_optimizations import PeepholeOptimizationStmtBase, PeepholeOptimizationExprBase
l = logging.getLogger(name=__name__)
class Clinic(Analysis):
"""
A Clinic deals with AILments.
"""
def __init__(self, func,
remove_dead_memdefs=False,
exception_edges=False,
sp_tracker_track_memory=True,
optimization_passes=None,
cfg=None,
peephole_optimizations: Optional[Iterable[Union[Type['PeepholeOptimizationStmtBase'],Type['PeepholeOptimizationExprBase']]]]=None, # pylint:disable=line-too-long
must_struct: Optional[Set[str]]=None,
variable_kb=None,
reset_variable_names=False,
cache: Optional['DecompilationCache']=None,
):
if not func.normalized:
raise ValueError("Decompilation must work on normalized function graphs.")
self.function = func
self.graph = None
self.arg_list = None
self.variable_kb = variable_kb
self._func_graph: Optional[networkx.DiGraph] = None
self._ail_manager = None
self._blocks_by_addr_and_size = { }
self._remove_dead_memdefs = remove_dead_memdefs
self._exception_edges = exception_edges
self._sp_tracker_track_memory = sp_tracker_track_memory
self._cfg: Optional['CFGModel'] = cfg
self.peephole_optimizations = peephole_optimizations
self._must_struct = must_struct
self._reset_variable_names = reset_variable_names
self._cache = cache
# sanity checks
if not self.kb.functions:
l.warning('No function is available in kb.functions. It will lead to a suboptimal conversion result.')
if optimization_passes is not None:
self._optimization_passes = optimization_passes
else:
self._optimization_passes = get_default_optimization_passes(self.project.arch, self.project.simos.name)
l.debug("Get %d optimization passes for the current binary.", len(self._optimization_passes))
self._analyze()
#
# Public methods
#
def block(self, addr, size):
"""
Get the converted block at the given specific address with the given size.
:param int addr:
:param int size:
:return:
"""
try:
return self._blocks_by_addr_and_size[(addr, size)]
except KeyError:
return None
def dbg_repr(self):
"""
:return:
"""
s = ""
for block in sorted(self.graph.nodes(), key=lambda x: x.addr):
s += str(block) + "\n\n"
return s
#
# Private methods
#
def _analyze(self):
# Set up the function graph according to configurations
self._update_progress(0., text="Setting up function graph")
self._set_function_graph()
# Remove alignment blocks
self._update_progress(5., text="Removing alignment blocks")
self._remove_alignment_blocks()
# if the graph is empty, don't continue
if not self._func_graph:
return
# Make sure calling conventions of all functions have been recovered
self._update_progress(10., text="Recovering calling conventions")
self._recover_calling_conventions()
# initialize the AIL conversion manager
self._ail_manager = ailment.Manager(arch=self.project.arch)
# Track stack pointers
self._update_progress(15., text="Tracking stack pointers")
spt = self._track_stack_pointers()
# Convert VEX blocks to AIL blocks and then simplify them
self._update_progress(20., text="Converting VEX to AIL")
self._convert_all()
ail_graph = self._make_ailgraph()
# Fix "fake" indirect jumps and calls
self._update_progress(25., text="Analyzing simple indirect jumps")
ail_graph = self._replace_single_target_indirect_transitions(ail_graph)
# Make returns
self._update_progress(30., text="Making return sites")
if self.function.prototype is None or not isinstance(self.function.prototype.returnty, SimTypeBottom):
ail_graph = self._make_returns(ail_graph)
# Simplify blocks
# we never remove dead memory definitions before making callsites. otherwise stack arguments may go missing
# before they are recognized as stack arguments.
self._update_progress(35., text="Simplifying blocks 1")
ail_graph = self._simplify_blocks(ail_graph, stack_pointer_tracker=spt, remove_dead_memdefs=False)
# Run simplification passes
self._update_progress(40., text="Running simplifications 1")
ail_graph = self._run_simplification_passes(ail_graph,
stage=OptimizationPassStage.AFTER_SINGLE_BLOCK_SIMPLIFICATION)
# Simplify the entire function for the first time
self._update_progress(45., text="Simplifying function 1")
self._simplify_function(ail_graph, remove_dead_memdefs=False, unify_variables=False)
# clear _blocks_by_addr_and_size so no one can use it again
# TODO: Totally remove this dict
self._blocks_by_addr_and_size = None
# Make call-sites
self._update_progress(50., text="Making callsites")
_, stackarg_offsets = self._make_callsites(ail_graph, stack_pointer_tracker=spt)
# Simplify the entire function for the second time
self._update_progress(55., text="Simplifying function 2")
self._simplify_function(ail_graph, remove_dead_memdefs=self._remove_dead_memdefs,
stack_arg_offsets=stackarg_offsets, unify_variables=True)
# After global optimization, there might be more chances for peephole optimizations.
# Simplify blocks for the second time
self._update_progress(60., text="Simplifying blocks 2")
ail_graph = self._simplify_blocks(ail_graph, remove_dead_memdefs=self._remove_dead_memdefs,
stack_pointer_tracker=spt)
# Make function arguments
self._update_progress(65., text="Making argument list")
arg_list = self._make_argument_list()
# Recover variables on AIL blocks
self._update_progress(70., text="Recovering variables")
variable_kb = self._recover_and_link_variables(ail_graph, arg_list)
# Make function prototype
self._update_progress(75., text="Making function prototype")
self._make_function_prototype(arg_list, variable_kb)
# Run simplification passes
self._update_progress(80., text="Running simplifications 2")
ail_graph = self._run_simplification_passes(ail_graph, stage=OptimizationPassStage.AFTER_GLOBAL_SIMPLIFICATION)
self.graph = ail_graph
self.arg_list = arg_list
self.variable_kb = variable_kb
@timethis
def _set_function_graph(self):
self._func_graph = self.function.graph_ex(exception_edges=self._exception_edges)
@timethis
def _remove_alignment_blocks(self):
"""
Alignment blocks are basic blocks that only consist of nops. They should not be included in the graph.
"""
for node in list(self._func_graph.nodes()):
if self._func_graph.in_degree(node) == 0 and \
CFGBase._is_noop_block(self.project.arch, self.project.factory.block(node.addr, node.size)):
self._func_graph.remove_node(node)
@timethis
def _recover_calling_conventions(self):
self.project.analyses.CompleteCallingConventions()
@timethis
def _track_stack_pointers(self):
"""
For each instruction, track its stack pointer offset and stack base pointer offset.
:return: None
"""
regs = {self.project.arch.sp_offset}
if hasattr(self.project.arch, 'bp_offset') and self.project.arch.bp_offset is not None:
regs.add(self.project.arch.bp_offset)
spt = self.project.analyses.StackPointerTracker(self.function, regs, track_memory=self._sp_tracker_track_memory)
if spt.inconsistent_for(self.project.arch.sp_offset):
l.warning("Inconsistency found during stack pointer tracking. Decompilation results might be incorrect.")
return spt
@timethis
def _convert_all(self):
"""
Convert all VEX blocks in the function graph to AIL blocks, and fill self._blocks.
:return: None
"""
for block_node in self._func_graph.nodes():
ail_block = self._convert(block_node)
if type(ail_block) is ailment.Block:
self._blocks_by_addr_and_size[(block_node.addr, block_node.size)] = ail_block
def _convert(self, block_node):
"""
Convert a VEX block to an AIL block.
:param block_node: A BlockNode instance.
:return: An converted AIL block.
:rtype: ailment.Block
"""
if not type(block_node) is BlockNode:
return block_node
block = self.project.factory.block(block_node.addr, block_node.size)
ail_block = ailment.IRSBConverter.convert(block.vex, self._ail_manager)
return ail_block
@timethis
def _replace_single_target_indirect_transitions(self, ail_graph: networkx.DiGraph) -> networkx.DiGraph:
"""
Remove single-target indirect jumps and calls and replace them with direct jumps or calls.
"""
if self._cfg is None:
return ail_graph
for block in ail_graph.nodes():
if not block.statements:
continue
last_stmt = block.statements[-1]
if isinstance(last_stmt, ailment.Stmt.Call) and not isinstance(last_stmt.target, ailment.Expr.Const):
# indirect call
# consult CFG to see if this is a call with a single successor
node = self._cfg.get_any_node(block.addr)
if node is None:
continue
successors = self._cfg.get_successors(node, excluding_fakeret=True, jumpkind='Ijk_Call')
if len(successors) == 1 and \
not isinstance(self.project.hooked_by(successors[0].addr), UnresolvableCallTarget):
# found a single successor - replace the last statement
new_last_stmt = last_stmt.copy()
new_last_stmt.target = ailment.Expr.Const(None, None, successors[0].addr, last_stmt.target.bits)
block.statements[-1] = new_last_stmt
elif isinstance(last_stmt, ailment.Stmt.Jump) and not isinstance(last_stmt.target, ailment.Expr.Const):
# indirect jump
# consult CFG to see if there is a jump with a single successor
node = self._cfg.get_any_node(block.addr)
if node is None:
continue
successors = self._cfg.get_successors(node, excluding_fakeret=True, jumpkind='Ijk_Boring')
if len(successors) == 1 and \
not isinstance(self.project.hooked_by(successors[0].addr), UnresolvableJumpTarget):
# found a single successor - replace the last statement
new_last_stmt = last_stmt.copy()
new_last_stmt.target = ailment.Expr.Const(None, None, successors[0].addr, last_stmt.target.bits)
block.statements[-1] = new_last_stmt
return ail_graph
@timethis
def _make_ailgraph(self) -> networkx.DiGraph:
graph = self._function_graph_to_ail_graph(self._func_graph)
return graph
@timethis
def _simplify_blocks(self, ail_graph: networkx.DiGraph, remove_dead_memdefs=False, stack_pointer_tracker=None):
"""
Simplify all blocks in self._blocks.
:param ail_graph: The AIL function graph.
:param stack_pointer_tracker: The RegisterDeltaTracker analysis instance.
:return: None
"""
blocks_by_addr_and_idx: Dict[Tuple[int,Optional[int]],ailment.Block] = { }
for ail_block in ail_graph.nodes():
simplified = self._simplify_block(ail_block, remove_dead_memdefs=remove_dead_memdefs,
stack_pointer_tracker=stack_pointer_tracker)
key = ail_block.addr, ail_block.idx
blocks_by_addr_and_idx[key] = simplified
# update blocks_map to allow node_addr to node lookup
def _replace_node_handler(node):
key = node.addr, node.idx
if key in blocks_by_addr_and_idx:
return blocks_by_addr_and_idx[key]
return None
AILGraphWalker(ail_graph, _replace_node_handler, replace_nodes=True).walk()
return ail_graph
def _simplify_block(self, ail_block, remove_dead_memdefs=False, stack_pointer_tracker=None):
"""
Simplify a single AIL block.
:param ailment.Block ail_block: The AIL block to simplify.
:param stack_pointer_tracker: The RegisterDeltaTracker analysis instance.
:return: A simplified AIL block.
"""
simp = self.project.analyses.AILBlockSimplifier(
ail_block,
remove_dead_memdefs=remove_dead_memdefs,
stack_pointer_tracker=stack_pointer_tracker,
peephole_optimizations=self.peephole_optimizations,
)
return simp.result_block
@timethis
def _simplify_function(self, ail_graph, remove_dead_memdefs=False, stack_arg_offsets=None, unify_variables=False,
max_iterations: int=8) -> None:
"""
Simplify the entire function until it reaches a fixed point.
"""
for _ in range(max_iterations):
simplified = self._simplify_function_once(ail_graph, remove_dead_memdefs=remove_dead_memdefs,
unify_variables=unify_variables,
stack_arg_offsets=stack_arg_offsets)
if not simplified:
break
@timethis
def _simplify_function_once(self, ail_graph, remove_dead_memdefs=False, stack_arg_offsets=None,
unify_variables=False):
"""
Simplify the entire function once.
:return: None
"""
simp = self.project.analyses.AILSimplifier(
self.function,
func_graph=ail_graph,
remove_dead_memdefs=remove_dead_memdefs,
unify_variables=unify_variables,
stack_arg_offsets=stack_arg_offsets,
)
# the function graph has been updated at this point
return simp.simplified
@timethis
def _run_simplification_passes(self, ail_graph, stage:int=OptimizationPassStage.AFTER_GLOBAL_SIMPLIFICATION):
addr_and_idx_to_blocks: Dict[Tuple[int,Optional[int]],ailment.Block] = { }
addr_to_blocks: Dict[int,Set[ailment.Block]] = defaultdict(set)
# update blocks_map to allow node_addr to node lookup
def _updatedict_handler(node):
addr_and_idx_to_blocks[(node.addr, node.idx)] = node
addr_to_blocks[node.addr].add(node)
AILGraphWalker(ail_graph, _updatedict_handler).walk()
# Run each pass
for pass_ in self._optimization_passes:
if pass_.STAGE != stage:
continue
analysis = getattr(self.project.analyses, pass_.__name__)
a = analysis(self.function, blocks_by_addr=addr_to_blocks, blocks_by_addr_and_idx=addr_and_idx_to_blocks,
graph=ail_graph)
if a.out_graph:
# use the new graph
ail_graph = a.out_graph
return ail_graph
@timethis
def _make_argument_list(self) -> List[SimVariable]:
if self.function.calling_convention is not None:
args: List[SimFunctionArgument] = self.function.calling_convention.args
arg_vars: List[SimVariable] = [ ]
if args:
for idx, arg in enumerate(args):
if isinstance(arg, SimRegArg):
argvar = SimRegisterVariable(
self.project.arch.registers[arg.reg_name][0],
arg.size,
ident="arg_%d" % idx,
name="a%d" % idx,
region=self.function.addr,
)
elif isinstance(arg, SimStackArg):
argvar = SimStackVariable(
arg.stack_offset,
arg.size,
base='bp',
ident="arg_%d" % idx,
name="a%d" % idx,
region=self.function.addr,
)
else:
raise TypeError("Unsupported function argument type %s." % type(arg))
arg_vars.append(argvar)
return arg_vars
return [ ]
@timethis
def _make_callsites(self, ail_graph, stack_pointer_tracker=None):
"""
Simplify all function call statements.
:return: None
"""
# Computing reaching definitions
rd = self.project.analyses.ReachingDefinitions(subject=self.function, func_graph=ail_graph,
observe_callback=self._make_callsites_rd_observe_callback)
class TempClass: # pylint:disable=missing-class-docstring
stack_arg_offsets = set()
def _handler(block):
csm = self.project.analyses.AILCallSiteMaker(block,
reaching_definitions=rd,
stack_pointer_tracker=stack_pointer_tracker,
ail_manager=self._ail_manager,
)
if csm.stack_arg_offsets is not None:
TempClass.stack_arg_offsets |= csm.stack_arg_offsets
if csm.result_block:
if csm.result_block != block:
ail_block = csm.result_block
simp = self.project.analyses.AILBlockSimplifier(ail_block,
stack_pointer_tracker=stack_pointer_tracker,
peephole_optimizations=self.peephole_optimizations,
stack_arg_offsets=csm.stack_arg_offsets,
)
return simp.result_block
return None
AILGraphWalker(ail_graph, _handler, replace_nodes=True).walk()
return ail_graph, TempClass.stack_arg_offsets
@timethis
def _make_returns(self, ail_graph: networkx.DiGraph) -> networkx.DiGraph:
"""
Work on each return statement and fill in its return expressions.
"""
if self.function.calling_convention is None:
# unknown calling convention. cannot do much about return expressions.
return ail_graph
# Block walker
def _handle_Return(stmt_idx: int, stmt: ailment.Stmt.Return, block: Optional[ailment.Block]): # pylint:disable=unused-argument
if block is not None \
and not stmt.ret_exprs \
and self.function.calling_convention.ret_val is not None:
new_stmt = stmt.copy()
ret_val = self.function.calling_convention.ret_val
if isinstance(ret_val, SimRegArg):
reg = self.project.arch.registers[ret_val.reg_name]
new_stmt.ret_exprs.append(ailment.Expr.Register(
self._next_atom(),
None,
reg[0],
reg[1] * self.project.arch.byte_width,
reg_name=self.project.arch.translate_register_name(reg[0], reg[1])
))
else:
l.warning("Unsupported type of return expression %s.",
type(self.function.calling_convention.ret_val))
block.statements[stmt_idx] = new_stmt
def _handler(block):
walker = AILBlockWalker()
# we don't need to handle any statement besides Returns
walker.stmt_handlers.clear()
walker.expr_handlers.clear()
walker.stmt_handlers[ailment.Stmt.Return] = _handle_Return
walker.walk(block)
# Graph walker
AILGraphWalker(ail_graph, _handler, replace_nodes=True).walk()
return ail_graph
@timethis
def _make_function_prototype(self, arg_list: List[SimVariable], variable_kb):
if self.function.prototype is not None:
# do not overwrite an existing function prototype
# if you want to re-generate the prototype, clear the existing one first
return
variables = variable_kb.variables[self.function.addr]
func_args = [ ]
for arg in arg_list:
func_arg = None
arg_ty = variables.get_variable_type(arg)
if arg_ty is None:
# determine type based on size
if isinstance(arg, (SimRegisterVariable, SimStackVariable)):
if arg.size == 1:
func_arg = SimTypeChar()
elif arg.size == 2:
func_arg = SimTypeShort()
elif arg.size == 4:
func_arg = SimTypeInt()
elif arg.size == 8:
func_arg = SimTypeLongLong()
else:
l.warning("Unsupported argument size %d.", arg.size)
else:
func_arg = arg_ty
func_args.append(func_arg)
if self.function.calling_convention is not None and self.function.calling_convention.ret_val is None:
returnty = SimTypeBottom(label="void")
else:
returnty = SimTypeInt()
self.function.prototype = SimTypeFunction(func_args, returnty).with_arch(self.project.arch)
@timethis
def _recover_and_link_variables(self, ail_graph, arg_list):
# variable recovery
tmp_kb = KnowledgeBase(self.project) if self.variable_kb is None else self.variable_kb
vr = self.project.analyses.VariableRecoveryFast(self.function, # pylint:disable=unused-variable
func_graph=ail_graph, kb=tmp_kb, track_sp=False,
func_args=arg_list)
# get ground-truth types
var_manager = tmp_kb.variables[self.function.addr]
groundtruth = {}
for variable in var_manager.variables_with_manual_types:
vartype = var_manager.types.get(variable, None)
if vartype is not None:
groundtruth[vr.var_to_typevar[variable]] = vartype
# clean up existing types for this function
var_manager.remove_types()
# TODO: Type inference for global variables
# run type inference
if self._must_struct:
must_struct = set()
for var, typevar in vr.var_to_typevar.items():
if var.ident in self._must_struct:
must_struct.add(typevar)
else:
must_struct = None
try:
tp = self.project.analyses.Typehoon(vr.type_constraints, kb=tmp_kb, var_mapping=vr.var_to_typevar,
must_struct=must_struct, ground_truth=groundtruth)
# tp.pp_constraints()
# tp.pp_solution()
tp.update_variable_types(self.function.addr, vr.var_to_typevar)
tp.update_variable_types('global', vr.var_to_typevar)
except Exception: # pylint:disable=broad-except
l.warning("Typehoon analysis failed. Variables will not have types. Please report to GitHub.",
exc_info=True)
# Unify SSA variables
tmp_kb.variables.global_manager.assign_variable_names(labels=self.kb.labels, types={SimMemoryVariable})
var_manager.unify_variables()
var_manager.assign_unified_variable_names(
labels=self.kb.labels,
reset=self._reset_variable_names,
)
# Link variables to each statement
for block in ail_graph.nodes():
self._link_variables_on_block(block, tmp_kb)
if self._cache is not None:
self._cache.type_constraints = vr.type_constraints
self._cache.var_to_typevar = vr.var_to_typevar
return tmp_kb
def _link_variables_on_block(self, block, kb):
"""
Link atoms (AIL expressions) in the given block to corresponding variables identified previously.
:param ailment.Block block: The AIL block to work on.
:return: None
"""
variable_manager = kb.variables[self.function.addr]
global_variables = kb.variables['global']
for stmt_idx, stmt in enumerate(block.statements):
stmt_type = type(stmt)
if stmt_type is ailment.Stmt.Store:
# find a memory variable
mem_vars = variable_manager.find_variables_by_atom(block.addr, stmt_idx, stmt)
if len(mem_vars) == 1:
stmt.variable, stmt.offset = next(iter(mem_vars))
else:
# check if the dest address is a variable
stmt: ailment.Stmt.Store
# special handling for constant addresses
if isinstance(stmt.addr, ailment.Expr.Const):
# global variable?
variables = global_variables.get_global_variables(stmt.addr.value)
if variables:
var = next(iter(variables))
stmt.variable = var
stmt.offset = 0
else:
self._link_variables_on_expr(variable_manager, global_variables, block, stmt_idx, stmt,
stmt.addr)
self._link_variables_on_expr(variable_manager, global_variables, block, stmt_idx, stmt, stmt.data)
elif stmt_type is ailment.Stmt.Assignment:
self._link_variables_on_expr(variable_manager, global_variables, block, stmt_idx, stmt, stmt.dst)
self._link_variables_on_expr(variable_manager, global_variables, block, stmt_idx, stmt, stmt.src)
elif stmt_type is ailment.Stmt.ConditionalJump:
self._link_variables_on_expr(variable_manager, global_variables, block, stmt_idx, stmt, stmt.condition)
elif stmt_type is ailment.Stmt.Call:
self._link_variables_on_call(variable_manager, global_variables, block, stmt_idx, stmt, is_expr=False)
elif stmt_type is ailment.Stmt.Return:
self._link_variables_on_return(variable_manager, global_variables, block, stmt_idx, stmt)
def _link_variables_on_return(self, variable_manager, global_variables, block: ailment.Block, stmt_idx: int,
stmt: ailment.Stmt.Return):
if stmt.ret_exprs:
for ret_expr in stmt.ret_exprs:
self._link_variables_on_expr(variable_manager, global_variables, block, stmt_idx, stmt, ret_expr)
def _link_variables_on_call(self, variable_manager, global_variables, block, stmt_idx, stmt, is_expr=False):
if stmt.args:
for arg in stmt.args:
self._link_variables_on_expr(variable_manager, global_variables, block, stmt_idx, stmt,
arg)
if not is_expr and stmt.ret_expr:
self._link_variables_on_expr(variable_manager, global_variables, block, stmt_idx, stmt, stmt.ret_expr)
def _link_variables_on_expr(self, variable_manager, global_variables, block, stmt_idx, stmt, expr):
"""
Link atoms (AIL expressions) in the given expression to corresponding variables identified previously.
:param variable_manager: Variable manager of the function.
:param ailment.Block block: AIL block.
:param int stmt_idx: ID of the statement.
:param stmt: The AIL statement that `expr` belongs to.
:param expr: The AIl expression to work on.
:return: None
"""
if type(expr) is ailment.Expr.Register:
# find a register variable
reg_vars = variable_manager.find_variables_by_atom(block.addr, stmt_idx, expr)
final_reg_vars = set()
if len(reg_vars) > 1:
# take phi variables
for reg_var in reg_vars:
if variable_manager.is_phi_variable(reg_var[0]):
final_reg_vars.add(reg_var)
else:
final_reg_vars = reg_vars
if len(final_reg_vars) == 1:
reg_var, offset = next(iter(final_reg_vars))
expr.variable = reg_var
expr.variable_offset = offset
elif type(expr) is ailment.Expr.Load:
variables = variable_manager.find_variables_by_atom(block.addr, stmt_idx, expr)
if len(variables) == 0:
# this is a local variable
self._link_variables_on_expr(variable_manager, global_variables, block, stmt_idx, stmt, expr.addr)
if 'reference_variable' in expr.addr.tags and expr.addr.reference_variable is not None:
# copy over the variable to this expr since the variable on a constant is supposed to be a
# reference variable.
expr.variable = expr.addr.reference_variable
expr.variable_offset = expr.addr.reference_variable_offset
else:
if len(variables) > 1:
l.error("More than one variable are available for atom %s. Consider fixing it using phi nodes.",
expr
)
var, offset = next(iter(variables))
expr.variable = var
expr.variable_offset = offset
elif type(expr) is ailment.Expr.BinaryOp:
variables = variable_manager.find_variables_by_atom(block.addr, stmt_idx, expr)
if len(variables) == 1:
var, offset = next(iter(variables))
expr.variable = var
expr.variable_offset = offset
else:
self._link_variables_on_expr(
variable_manager, global_variables, block, stmt_idx, stmt, expr.operands[0])
self._link_variables_on_expr(
variable_manager, global_variables, block, stmt_idx, stmt, expr.operands[1])
elif type(expr) is ailment.Expr.UnaryOp:
variables = variable_manager.find_variables_by_atom(block.addr, stmt_idx, expr)
if len(variables) == 1:
var, offset = next(iter(variables))
expr.variable = var
expr.variable_offset = offset
else:
self._link_variables_on_expr(variable_manager, global_variables, block, stmt_idx, stmt, expr.operands)
elif type(expr) is ailment.Expr.Convert:
self._link_variables_on_expr(variable_manager, global_variables, block, stmt_idx, stmt, expr.operand)
elif type(expr) is ailment.Expr.ITE:
variables = variable_manager.find_variables_by_atom(block.addr, stmt_idx, expr)
if len(variables) == 1:
var, offset = next(iter(variables))
expr.variable = var
expr.variable_offset = offset
else:
self._link_variables_on_expr(variable_manager, global_variables, block, stmt_idx, stmt,
expr.cond)
self._link_variables_on_expr(variable_manager, global_variables, block, stmt_idx, stmt,
expr.iftrue)
self._link_variables_on_expr(variable_manager, global_variables, block, stmt_idx, stmt,
expr.iftrue)
elif isinstance(expr, ailment.Expr.BasePointerOffset):
variables = variable_manager.find_variables_by_atom(block.addr, stmt_idx, expr)
if len(variables) == 1:
var, offset = next(iter(variables))
expr.variable = var
expr.variable_offset = offset
elif isinstance(expr, ailment.Expr.Const):
# global variable?
variables = global_variables.get_global_variables(expr.value)
if variables:
var = next(iter(variables))
expr.tags['reference_variable'] = var
expr.tags['reference_variable_offset'] = None
expr.variable = var
expr.variable_offset = None
elif isinstance(expr, ailment.Stmt.Call):
self._link_variables_on_call(variable_manager, global_variables, block, stmt_idx, expr, is_expr=True)
def _function_graph_to_ail_graph(self, func_graph, blocks_by_addr_and_size=None):
if blocks_by_addr_and_size is None:
blocks_by_addr_and_size = self._blocks_by_addr_and_size
node_to_block_mapping = {}
graph = networkx.DiGraph()
for node in func_graph.nodes():
ail_block = blocks_by_addr_and_size.get((node.addr, node.size), node)
node_to_block_mapping[node] = ail_block
if ail_block is not None:
graph.add_node(ail_block)
for src_node, dst_node, data in func_graph.edges(data=True):
src = node_to_block_mapping[src_node]
dst = node_to_block_mapping[dst_node]
if dst is not None:
graph.add_edge(src, dst, **data)
return graph
def _next_atom(self) -> int:
return self._ail_manager.next_atom()
@staticmethod
def _make_callsites_rd_observe_callback(ob_type, **kwargs):
if ob_type != 'insn':
return False
stmt = kwargs.pop('stmt')
op_type = kwargs.pop('op_type')
return isinstance(stmt, ailment.Stmt.Call) and op_type == OP_BEFORE
register_analysis(Clinic, 'Clinic')
| 43.178147
| 179
| 0.609693
|
d87804a405892c3b51049da03736a6dc19bcdcac
| 388
|
py
|
Python
|
src/djapian/utils/__init__.py
|
xyz666/djapian-2.3.1-patched
|
70bb12ec9e8a9dcda33eee37c4a6a0565d63b14a
|
[
"BSD-3-Clause"
] | null | null | null |
src/djapian/utils/__init__.py
|
xyz666/djapian-2.3.1-patched
|
70bb12ec9e8a9dcda33eee37c4a6a0565d63b14a
|
[
"BSD-3-Clause"
] | null | null | null |
src/djapian/utils/__init__.py
|
xyz666/djapian-2.3.1-patched
|
70bb12ec9e8a9dcda33eee37c4a6a0565d63b14a
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf import settings
DEFAULT_MAX_RESULTS = 100000
DEFAULT_WEIGHT = 1
def model_name(model):
return "%s.%s" % (model._meta.app_label, model._meta.object_name)
def load_indexes():
from djapian.utils import loading
for app in settings.INSTALLED_APPS:
try:
loading.get_module(app, "index")
except loading.NoModuleError:
pass
| 24.25
| 69
| 0.685567
|
4f66788bb25b1dc3ba6231696553c39122bb67d6
| 8,921
|
py
|
Python
|
Validation/HGCalValidation/test/python/protoSimValid_cfg.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
Validation/HGCalValidation/test/python/protoSimValid_cfg.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
Validation/HGCalValidation/test/python/protoSimValid_cfg.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
###############################################################################
# Way to use this:
# cmsRun protoSimValid_cfg.py geometry=D77 type=hgcalBHValidation
#
# Options for geometry D49, D68, D77, D83, D84, D86
# type hgcalBHValidation, hgcalSiliconValidation
#
###############################################################################
import FWCore.ParameterSet.Config as cms
import os, sys, imp, re
import FWCore.ParameterSet.VarParsing as VarParsing
############################################################
### SETUP OPTIONS
options = VarParsing.VarParsing('standard')
options.register('geometry',
"D86",
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"geometry of operations: D49, D68, D77, D83, D84, D86")
options.register ('type',
"hgcalBHValidation",
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"type of operations: hgcalBHValidation, hgcalSiliconValidation")
### get and parse the command line arguments
options.parseArguments()
print(options)
############################################################
# Use the options
if (options.geometry == "D49"):
from Configuration.Eras.Era_Phase2C9_cff import Phase2C9
process = cms.Process('PROD',Phase2C9)
process.load('Configuration.Geometry.GeometryExtended2026D49_cff')
process.load('Configuration.Geometry.GeometryExtended2026D49Reco_cff')
if (options.type == "hgcalSiliconValidation"):
fileName = 'hgcSilValidD49.root'
else:
fileName = 'hgcBHValidD49.root'
elif (options.geometry == "D68"):
from Configuration.Eras.Era_Phase2C12_cff import Phase2C12
process = cms.Process('PROD',Phase2C12)
process.load('Configuration.Geometry.GeometryExtended2026D68_cff')
process.load('Configuration.Geometry.GeometryExtended2026D68Reco_cff')
if (options.type == "hgcalSiliconValidation"):
fileName = 'hgcSilValidD68.root'
else:
fileName = 'hgcBHValidD68.root'
elif (options.geometry == "D83"):
from Configuration.Eras.Era_Phase2C11M9_cff import Phase2C11M9
process = cms.Process('PROD',Phase2C11M9)
process.load('Configuration.Geometry.GeometryExtended2026D83_cff')
process.load('Configuration.Geometry.GeometryExtended2026D83Reco_cff')
if (options.type == "hgcalSiliconValidation"):
fileName = 'hgcSilValidD83.root'
else:
fileName = 'hgcBHValidD83.root'
elif (options.geometry == "D84"):
from Configuration.Eras.Era_Phase2C11_cff import Phase2C11
process = cms.Process('PROD',Phase2C11)
process.load('Configuration.Geometry.GeometryExtended2026D84_cff')
process.load('Configuration.Geometry.GeometryExtended2026D84Reco_cff')
if (options.type == "hgcalSiliconValidation"):
fileName = 'hgcSilValidD84.root'
else:
fileName = 'hgcBHValidD84.root'
elif (options.geometry == "D86"):
from Configuration.Eras.Era_Phase2C11_cff import Phase2C11
process = cms.Process('PROD',Phase2C11)
process.load('Configuration.Geometry.GeometryExtended2026D86_cff')
process.load('Configuration.Geometry.GeometryExtended2026D86Reco_cff')
if (options.type == "hgcalSiliconValidation"):
fileName = 'hgcSilValidD86.root'
else:
fileName = 'hgcBHValidD86.root'
else:
from Configuration.Eras.Era_Phase2C11M9_cff import Phase2C11M9
process = cms.Process('PROD',Phase2C11M9)
process.load('Configuration.Geometry.GeometryExtended2026D77_cff')
process.load('Configuration.Geometry.GeometryExtended2026D77Reco_cff')
if (options.type == "hgcalSiliconValidation"):
fileName = 'hgcSilValidD77.root'
else:
fileName = 'hgcBHValidD77.root'
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.Generator_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedRealistic50ns13TeVCollision_cfi')
process.load('GeneratorInterface.Core.genFilterSummary_cff')
process.load('Configuration.StandardSequences.SimIdeal_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.load('Configuration.StandardSequences.Digi_cff')
process.load('Configuration.StandardSequences.SimL1Emulator_cff')
process.load('Configuration.StandardSequences.L1TrackTrigger_cff')
process.load('Configuration.StandardSequences.DigiToRaw_cff')
process.load('HLTrigger.Configuration.HLT_Fake2_cff')
process.load('Configuration.StandardSequences.RawToDigi_cff')
process.load('Configuration.StandardSequences.L1Reco_cff')
process.load('Configuration.StandardSequences.Reconstruction_cff')
process.load('Configuration.StandardSequences.RecoSim_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(2000)
)
process.MessageLogger.cerr.FwkReport.reportEvery = 5
if hasattr(process,'MessageLogger'):
process.MessageLogger.HGCalGeom=dict()
# Input source
process.source = cms.Source("EmptySource")
process.generator = cms.EDProducer("FlatRandomPtGunProducer",
PGunParameters = cms.PSet(
MaxPt = cms.double(35.0),
MinPt = cms.double(35.0),
PartID = cms.vint32(13), #--->muon
MinPhi = cms.double(-3.14159265359),
MaxPhi = cms.double(3.14159265359),
MinEta = cms.double(1.2),
MaxEta = cms.double(3.0)
),
Verbosity = cms.untracked.int32(0),
psethack = cms.string('single muon pt 35'),
AddAntiParticle = cms.bool(True),
firstRun = cms.untracked.uint32(1)
)
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string(''),
annotation = cms.untracked.string(''),
name = cms.untracked.string('Applications')
)
# Other statements
process.genstepfilter.triggerConditions=cms.vstring("generation_step")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase2_realistic', '')
# Additional output definition
process.TFileService = cms.Service("TFileService",
fileName = cms.string(fileName),
closeFileFast = cms.untracked.bool(True) )
#Modified to produce hgceedigis
process.mix.digitizers = cms.PSet(process.theDigitizersValid)
process.ProductionFilterSequence = cms.Sequence(process.generator)
#Following Removes Mag Field
process.g4SimHits.UseMagneticField = False
process.g4SimHits.Physics.bField = cms.double(0.0)
# Path and EndPath definitions
process.generation_step = cms.Path(process.pgen)
process.simulation_step = cms.Path(process.psim)
process.genfiltersummary_step = cms.EndPath(process.genFilterSummary)
process.digitisation_step = cms.Path(process.pdigi_valid)
process.L1simulation_step = cms.Path(process.SimL1Emulator)
process.L1TrackTrigger_step = cms.Path(process.L1TrackTrigger)
process.digi2raw_step = cms.Path(process.DigiToRaw)
process.raw2digi_step = cms.Path(process.RawToDigi)
process.L1Reco_step = cms.Path(process.L1Reco)
process.reconstruction_step = cms.Path(process.localreco)
process.recosim_step = cms.Path(process.recosim)
if (options.type == "hgcalSiliconValidation"):
process.load('Validation.HGCalValidation.hgcalSiliconValidation_cfi')
process.analysis_step = cms.Path(process.hgcalSiliconAnalysisEE+process.hgcalSiliconAnalysisHEF)
else:
process.load('Validation.HGCalValidation.hgcalBHValidation_cfi')
process.analysis_step = cms.Path(process.hgcalBHAnalysis)
# Schedule definition
process.schedule = cms.Schedule(process.generation_step,
process.simulation_step,
process.digitisation_step,
process.L1simulation_step,
process.L1TrackTrigger_step,
process.digi2raw_step,
process.raw2digi_step,
process.L1Reco_step,
process.reconstruction_step,
process.recosim_step,
process.analysis_step,
)
# filter all path with the production filter sequence
for path in process.paths:
if getattr(process,path)._seq is not None: getattr(process,path)._seq = process.ProductionFilterSequence * getattr(process,path)._seq
| 43.096618
| 141
| 0.71483
|
dfec4dbada9d88d80e3fe5167ec1b43a826e2beb
| 626
|
py
|
Python
|
build/config/ios/ios_sdk.py
|
domenic/mojo
|
53dda76fed90a47c35ed6e06baf833a0d44495b8
|
[
"BSD-3-Clause"
] | 231
|
2015-01-08T09:04:44.000Z
|
2021-12-30T03:03:10.000Z
|
build/config/ios/ios_sdk.py
|
domenic/mojo
|
53dda76fed90a47c35ed6e06baf833a0d44495b8
|
[
"BSD-3-Clause"
] | 8
|
2015-08-31T06:39:59.000Z
|
2021-12-04T14:53:28.000Z
|
build/config/ios/ios_sdk.py
|
domenic/mojo
|
53dda76fed90a47c35ed6e06baf833a0d44495b8
|
[
"BSD-3-Clause"
] | 268
|
2015-01-21T05:53:28.000Z
|
2022-03-25T22:09:01.000Z
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import subprocess
import sys
# This script returns the path to the SDK of the given type. Pass the type of
# SDK you want, which is typically "iphone" or "iphonesimulator".
#
# In the GYP build, this is done inside GYP itself based on the SDKROOT
# variable.
if len(sys.argv) != 2:
print "Takes one arg (SDK to find)"
sys.exit(1)
print subprocess.check_output(['xcodebuild', '-version', '-sdk',
sys.argv[1], 'Path']).strip()
| 31.3
| 77
| 0.688498
|
4d486c9ceff51da2363d5a81a84ff137bb8abe2a
| 400
|
py
|
Python
|
api/components/text.py
|
singhprincejeet/in_poster
|
1b0e18631ebede94e679eb0aba6c8e7630a02aba
|
[
"MIT"
] | null | null | null |
api/components/text.py
|
singhprincejeet/in_poster
|
1b0e18631ebede94e679eb0aba6c8e7630a02aba
|
[
"MIT"
] | 4
|
2021-04-30T21:09:19.000Z
|
2022-03-12T00:19:12.000Z
|
api/components/text.py
|
singhprincejeet/in_poster
|
1b0e18631ebede94e679eb0aba6c8e7630a02aba
|
[
"MIT"
] | null | null | null |
class Text:
def __init__(self, value, text_style):
self.value = value
self.text_style = text_style
def get_font(self):
return self.text_style.get_font()
def get_value(self):
return self.value
def get_align(self):
return self.text_style.get_align()
def get_color(self):
return self.text_style.get_color()
| 19.047619
| 43
| 0.6025
|
d8f781fca20d74d91c400693632a1a7abb6f28a8
| 1,993
|
py
|
Python
|
rocket_connect/asterisk/migrations/0001_initial.py
|
diegoromanio/rocket.connect
|
f161ab020ed64333aa35c0e17816ab48e39062b0
|
[
"MIT"
] | 23
|
2021-04-15T23:19:51.000Z
|
2022-02-21T19:58:31.000Z
|
rocket_connect/asterisk/migrations/0001_initial.py
|
diegoromanio/rocket.connect
|
f161ab020ed64333aa35c0e17816ab48e39062b0
|
[
"MIT"
] | 30
|
2021-04-14T22:04:20.000Z
|
2022-03-28T11:22:08.000Z
|
rocket_connect/asterisk/migrations/0001_initial.py
|
diegoromanio/rocket.connect
|
f161ab020ed64333aa35c0e17816ab48e39062b0
|
[
"MIT"
] | 5
|
2021-04-16T14:50:32.000Z
|
2022-03-11T23:50:59.000Z
|
# Generated by Django 3.1.10 on 2021-06-11 14:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Call',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('unique_id', models.CharField(max_length=50, unique=True)),
('answred', models.DateTimeField(blank=True, null=True)),
('hangup', models.DateTimeField(blank=True, null=True)),
('queue', models.CharField(max_length=50)),
('agent', models.CharField(max_length=50)),
('caller', models.CharField(blank=True, max_length=50, null=True)),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('previous_call', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='asterisk.call')),
],
options={
'verbose_name': 'Call',
'verbose_name_plural': 'Calls',
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='CallMessages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('json', models.JSONField(blank=True, null=True)),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('call', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='messages', to='asterisk.call')),
],
),
]
| 43.326087
| 141
| 0.583041
|
936a1a87158db053089547aba54754ed3c4cd5de
| 6,651
|
py
|
Python
|
src/carts/views.py
|
Saruni0305/oop-work-2
|
371787017cdd8446220b646c0070c4e53065bff5
|
[
"MIT"
] | 24
|
2018-11-17T21:02:06.000Z
|
2021-11-18T23:02:00.000Z
|
src/carts/views.py
|
Saruni0305/oop-work-2
|
371787017cdd8446220b646c0070c4e53065bff5
|
[
"MIT"
] | 9
|
2020-06-05T21:43:20.000Z
|
2021-11-15T17:49:01.000Z
|
src/carts/views.py
|
SaruniM/oop-work-2
|
371787017cdd8446220b646c0070c4e53065bff5
|
[
"MIT"
] | 15
|
2019-03-08T20:19:17.000Z
|
2021-12-29T10:16:14.000Z
|
from django.conf import settings
from django.shortcuts import render, redirect
from django.http import JsonResponse, Http404
from django.contrib import messages
from django.core.urlresolvers import reverse
import stripe
from .models import Cart
from products.models import Product
from orders.models import Order
from billing.models import BillingProfile, Card
from addresses.models import Address
from accounts.forms import LoginForm, GuestForm
from addresses.forms import AddressFormCheckout
stripe.api_key = getattr(settings, 'STRIPE_SECRET_KEY', None)
STRIPE_PUB_KEY = getattr(settings, 'STRIPE_PUBLISH_KEY', None)
# check for stripe integration
if stripe.api_key is None:
raise NotImplementedError("STRIPE_SECRET_KEY must be set in the settings")
if STRIPE_PUB_KEY is None:
raise NotImplementedError("STRIPE_PUB_KEY must be set in the settings")
def cart_home_api_view(request):
cart_obj, new_obj = Cart.objects.get_or_new(request)
products = [{
'id': x.id,
'url': x.get_absolute_url(),
'name': x.name,
'price': x.price
} for x in cart_obj.products.all()]
# We can't directly pass the object to ajax, we need to convert it to JSON format
cart_data = {"products": products, "subtotal": cart_obj.subtotal, "total": cart_obj.total}
return JsonResponse(cart_data)
def cart_home(request):
cart_obj, new_obj = Cart.objects.get_or_new(request)
return render(request, 'carts/home.html', {'cart': cart_obj})
def cart_update(request):
# print(request.POST)
product_id = request.POST.get('product_id')
if product_id is not None:
try:
product_obj = Product.objects.get(id=product_id)
except Product.DoesNotExist:
messages.error(request, "Oops! The product does not exist.")
return redirect('cart:home')
# guests cannot buy digital products
if product_obj.is_digital and not request.user.is_authenticated():
if request.is_ajax():
json_data = { # Additional data we want to send along with the form data
"noLoginDigital": True
}
return JsonResponse(json_data, status=200)
messages.error(request, "You must login, in order to purchase any digital items!")
return redirect('login')
cart_obj, new_obj = Cart.objects.get_or_new(request)
if product_obj in cart_obj.products.all():
cart_obj.products.remove(product_obj)
added = False
else:
cart_obj.products.add(product_obj)
added = True
request.session['cart_items_number'] = cart_obj.products.count()
if request.is_ajax(): # If ajax data, then send back form data in JSON
json_data = { # Additional data we want to send along with the form data
"added": added,
"cartItemCount": cart_obj.products.count()
}
return JsonResponse(json_data, status=200) # JsonResponse sends only form data if no parameters are given
# return JsonResponse({"message": "Error 400"}, status=400)
return redirect('cart:home')
def checkout_home(request):
cart_obj, new_cart = Cart.objects.get_or_new(request)
order_obj = None
if new_cart or cart_obj.products.count() == 0:
return redirect('cart:home')
login_form = LoginForm(request=request)
guest_form = GuestForm(request=request)
address_form = AddressFormCheckout()
shipping_address_required = not cart_obj.is_digital
shipping_address_id = request.session.get('shipping_address_id', None)
billing_address_id = request.session.get('billing_address_id', None)
billing_profile, billing_profile_created = BillingProfile.objects.get_or_new(request)
address_qs = None
has_card = False
# if order related to the billing profile exists, use that. Else create one.
if billing_profile is not None: # Without billing profile, order should not exist
if request.user.is_authenticated():
address_qs = Address.objects.filter(billing_profile=billing_profile)
order_obj, order_obj_created = Order.objects.get_or_new(billing_profile, cart_obj)
if shipping_address_id:
order_obj.shipping_address = Address.objects.get(id=shipping_address_id)
del request.session['shipping_address_id']
if billing_address_id:
order_obj.billing_address = Address.objects.get(id=billing_address_id)
del request.session['billing_address_id']
if shipping_address_id or billing_address_id:
order_obj.save()
has_card = billing_profile.has_card
if request.method == 'POST':
if order_obj.check_done():
card_obj = None
card_id = request.session.get('card_id', None)
if card_id:
card_obj = Card.objects.get(id=card_id)
del request.session['card_id']
did_charge_paid, charge_message = billing_profile.charge(order_obj, card_obj)
if did_charge_paid:
order_obj.mark_paid() # acts as a signal when order is paid for
order_obj.send_order_success_email()
request.session['cart_items_number'] = 0
del request.session['cart_id']
if not billing_profile.user:
billing_profile.set_cards_inactive()
request.session['checkout_home'] = True
try: # delete guest session
del request.session['guest_obj_id']
except:
pass
if request.is_ajax():
return JsonResponse({'next_path': reverse('cart:success')})
return redirect('cart:success')
else:
if request.is_ajax():
return JsonResponse({'next_path': reverse('cart:checkout')})
return redirect('cart:checkout')
context = {
"object": order_obj,
"billing_profile": billing_profile,
"login_form": login_form,
"guest_form": guest_form,
"address_form": address_form,
"address_qs": address_qs,
"has_card": has_card,
"publish_key": STRIPE_PUB_KEY,
"shipping_address_required": shipping_address_required
}
return render(request, 'carts/checkout.html', context)
def checkout_done(request):
if request.session.get('checkout_home'):
del request.session['checkout_home']
return render(request, 'carts/checkout_done.html', {})
raise Http404
| 39.589286
| 118
| 0.657796
|
95a798c99f23326a3c068b977cab49add6dd45ac
| 14,421
|
py
|
Python
|
bin/scons-proc.py
|
fire/scons
|
f5f5f99d447bd00e0f2202beddb9d86bf0417589
|
[
"MIT"
] | null | null | null |
bin/scons-proc.py
|
fire/scons
|
f5f5f99d447bd00e0f2202beddb9d86bf0417589
|
[
"MIT"
] | null | null | null |
bin/scons-proc.py
|
fire/scons
|
f5f5f99d447bd00e0f2202beddb9d86bf0417589
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Process a list of Python and/or XML files containing SCons documentation.
#
# This script creates formatted lists of the Builders, functions, Tools
# or construction variables documented in the specified XML files.
#
# Depending on the options, the lists are output in either
# DocBook-formatted generated XML files containing the summary text
# and/or .mod files containing the ENTITY definitions for each item.
#
import getopt
import os
import sys
import SConsDoc
from SConsDoc import tf as stf
base_sys_path = [os.getcwd() + '/build/test-tar-gz/lib/scons'] + sys.path
helpstr = """\
Usage: scons-proc.py [-b file(s)] [-f file(s)] [-t file(s)] [-v file(s)]
[infile ...]
Options:
-b file(s) dump builder information to the specified file(s)
-f file(s) dump function information to the specified file(s)
-t file(s) dump tool information to the specified file(s)
-v file(s) dump variable information to the specified file(s)
The "files" argument following a -[bftv] argument is expected to
be a comma-separated pair of names like: foo.gen,foo.mod
"""
opts, args = getopt.getopt(sys.argv[1:],
"b:f:ht:v:",
['builders=', 'help',
'tools=', 'variables='])
buildersfiles = None
functionsfiles = None
toolsfiles = None
variablesfiles = None
for o, a in opts:
if o in ['-b', '--builders']:
buildersfiles = a
elif o in ['-f', '--functions']:
functionsfiles = a
elif o in ['-h', '--help']:
sys.stdout.write(helpstr)
sys.exit(0)
elif o in ['-t', '--tools']:
toolsfiles = a
elif o in ['-v', '--variables']:
variablesfiles = a
def parse_docs(args, include_entities=True):
h = SConsDoc.SConsDocHandler()
for f in args:
if include_entities:
try:
h.parseXmlFile(f)
except:
sys.stderr.write("error in %s\n" % f)
raise
else:
# mode we read (text/bytes) has to match handling in SConsDoc
with open(f, 'r') as fp:
content = fp.read()
if content:
try:
h.parseContent(content, include_entities)
except:
sys.stderr.write("error in %s\n" % f)
raise
return h
Warning = """\
<!--
THIS IS AN AUTOMATICALLY-GENERATED FILE. DO NOT EDIT.
-->
"""
Regular_Entities_Header = """\
<!--
Regular %s entities.
-->
"""
Link_Entities_Header = """\
<!--
Entities that are links to the %s entries
-->
"""
class SCons_XML:
def __init__(self, entries, **kw):
self.values = entries
for k, v in kw.items():
setattr(self, k, v)
def fopen(self, name, mode='w'):
if name == '-':
return sys.stdout
return open(name, mode)
def write(self, files):
gen, mod = files.split(',')
self.write_gen(gen)
self.write_mod(mod)
def write_gen(self, filename):
if not filename:
return
# Try to split off .gen filename
if filename.count(','):
fl = filename.split(',')
filename = fl[0]
# Start new XML file
root = stf.newXmlTree("variablelist")
for v in self.values:
ve = stf.newNode("varlistentry")
stf.setAttribute(ve, 'id', '%s%s' % (v.prefix, v.idfunc()))
for t in v.xml_terms():
stf.appendNode(ve, t)
vl = stf.newNode("listitem")
added = False
if v.summary is not None:
for s in v.summary:
added = True
stf.appendNode(vl, stf.copyNode(s))
# Generate the text for sets/uses lists of construction vars.
# This used to include an entity reference which would be replaced
# by the link to the cvar, but with lxml, dumping out the tree
# with tostring() will encode the & introducing the entity,
# breaking it. Instead generate the actual link. (issue #3580)
if v.sets:
added = True
vp = stf.newNode("para")
stf.setText(vp, "Sets: ")
for setv in v.sets:
link = stf.newSubNode(vp, "link", linkend="cv-%s" % setv)
linktgt = stf.newSubNode(link, "varname")
stf.setText(linktgt, "$" + setv)
stf.setTail(link, " ")
stf.appendNode(vl, vp)
if v.uses:
added = True
vp = stf.newNode("para")
stf.setText(vp, "Uses: ")
for use in v.uses:
link = stf.newSubNode(vp, "link", linkend="cv-%s" % use)
linktgt = stf.newSubNode(link, "varname")
stf.setText(linktgt, "$" + use)
stf.setTail(link, " ")
stf.appendNode(vl, vp)
# Still nothing added to this list item?
if not added:
# Append an empty para
vp = stf.newNode("para")
stf.appendNode(vl, vp)
stf.appendNode(ve, vl)
stf.appendNode(root, ve)
# Write file
f = self.fopen(filename)
stf.writeGenTree(root, f)
f.close()
def write_mod(self, filename):
try:
description = self.values[0].description
except:
description = ""
if not filename:
return
# Try to split off .mod filename
if filename.count(','):
fl = filename.split(',')
filename = fl[1]
f = self.fopen(filename)
f.write(Warning)
f.write('\n')
f.write(Regular_Entities_Header % description)
f.write('\n')
for v in self.values:
f.write('<!ENTITY %s%s "<%s xmlns=\'%s\'>%s</%s>">\n' %
(v.prefix, v.idfunc(),
v.tag, SConsDoc.dbxsd, v.entityfunc(), v.tag))
if self.env_signatures:
f.write('\n')
for v in self.values:
f.write('<!ENTITY %senv-%s "<%s xmlns=\'%s\'>env.%s</%s>">\n' %
(v.prefix, v.idfunc(),
v.tag, SConsDoc.dbxsd, v.entityfunc(), v.tag))
f.write('\n')
f.write(Link_Entities_Header % description)
f.write('\n')
for v in self.values:
f.write('<!ENTITY %slink-%s "<link linkend=\'%s%s\' xmlns=\'%s\'><%s>%s</%s></link>">\n' %
(v.prefix, v.idfunc(),
v.prefix, v.idfunc(), SConsDoc.dbxsd,
v.tag, v.entityfunc(), v.tag))
if self.env_signatures:
f.write('\n')
for v in self.values:
f.write('<!ENTITY %slink-env-%s "<link linkend=\'%s%s\' xmlns=\'%s\'><%s>env.%s</%s></link>">\n' %
(v.prefix, v.idfunc(),
v.prefix, v.idfunc(), SConsDoc.dbxsd,
v.tag, v.entityfunc(), v.tag))
f.close()
class Proxy:
def __init__(self, subject):
"""Wrap an object as a Proxy object"""
self.__subject = subject
def __getattr__(self, name):
"""Retrieve an attribute from the wrapped object.
If the named attribute doesn't exist, AttributeError is raised
"""
return getattr(self.__subject, name)
def get(self):
"""Retrieve the entire wrapped object"""
return self.__subject
def __eq__(self, other):
if issubclass(other.__class__, self.__subject.__class__):
return self.__subject == other
return self.__dict__ == other.__dict__
## def __lt__(self, other):
## if issubclass(other.__class__, self.__subject.__class__):
## return self.__subject < other
## return self.__dict__ < other.__dict__
class SConsThing(Proxy):
"""Base class for the SConsDoc special elements"""
def idfunc(self):
return self.name
def xml_terms(self):
e = stf.newNode("term")
stf.setText(e, self.name)
return [e]
class Builder(SConsThing):
"""Generate the descriptions and entities for <builder> elements"""
description = 'builder'
prefix = 'b-'
tag = 'function'
def xml_terms(self):
"""emit xml for an scons builder
builders don't show a full signature, just func()
"""
# build term for global function
gterm = stf.newNode("term")
func = stf.newSubNode(gterm, Builder.tag)
stf.setText(func, self.name)
stf.setTail(func, '()')
# build term for env. method
mterm = stf.newNode("term")
inst = stf.newSubNode(mterm, "replaceable")
stf.setText(inst, "env")
stf.setTail(inst, ".")
# we could use <function> here, but it's a "method"
meth = stf.newSubNode(mterm, "methodname")
stf.setText(meth, self.name)
stf.setTail(meth, '()')
return [gterm, mterm]
def entityfunc(self):
return self.name
class Function(SConsThing):
"""Generate the descriptions and entities for <scons_function> elements"""
description = 'function'
prefix = 'f-'
tag = 'function'
def xml_terms(self):
"""emit xml for an scons function
The signature attribute controls whether to emit the
global function, the environment method, or both.
"""
if self.arguments is None:
a = stf.newNode("arguments")
stf.setText(a, '()')
arguments = [a]
else:
arguments = self.arguments
tlist = []
for arg in arguments:
signature = 'both'
if stf.hasAttribute(arg, 'signature'):
signature = stf.getAttribute(arg, 'signature')
sig = stf.getText(arg).strip()[1:-1] # strip (), temporarily
if signature in ('both', 'global'):
# build term for global function
gterm = stf.newNode("term")
func = stf.newSubNode(gterm, Function.tag)
stf.setText(func, self.name)
if sig:
# if there are parameters, use that entity
stf.setTail(func, "(")
s = stf.newSubNode(gterm, "parameter")
stf.setText(s, sig)
stf.setTail(s, ")")
else:
stf.setTail(func, "()")
tlist.append(gterm)
if signature in ('both', 'env'):
# build term for env. method
mterm = stf.newNode("term")
inst = stf.newSubNode(mterm, "replaceable")
stf.setText(inst, "env")
stf.setTail(inst, ".")
# we could use <function> here, but it's a "method"
meth = stf.newSubNode(mterm, "methodname")
stf.setText(meth, self.name)
if sig:
# if there are parameters, use that entity
stf.setTail(meth, "(")
s = stf.newSubNode(mterm, "parameter")
stf.setText(s, sig)
stf.setTail(s, ")")
else:
stf.setTail(meth, "()")
tlist.append(mterm)
if not tlist:
tlist.append(stf.newNode("term"))
return tlist
def entityfunc(self):
return self.name
class Tool(SConsThing):
"""Generate the descriptions and entities for <tool> elements"""
description = 'tool'
prefix = 't-'
tag = 'literal'
def idfunc(self):
return self.name.replace('+', 'X')
def entityfunc(self):
return self.name
class Variable(SConsThing):
"""Generate the descriptions and entities for <cvar> elements"""
description = 'construction variable'
prefix = 'cv-'
tag = 'envar'
def xml_terms(self):
term = stf.newNode("term")
var = stf.newSubNode(term, Variable.tag)
stf.setText(var, self.name)
return [term]
def entityfunc(self):
return '$' + self.name
def write_output_files(h, buildersfiles, functionsfiles,
toolsfiles, variablesfiles, write_func):
if buildersfiles:
g = processor_class([Builder(b) for b in sorted(h.builders.values())],
env_signatures=True)
write_func(g, buildersfiles)
if functionsfiles:
g = processor_class([Function(b) for b in sorted(h.functions.values())],
env_signatures=True)
write_func(g, functionsfiles)
if toolsfiles:
g = processor_class([Tool(t) for t in sorted(h.tools.values())],
env_signatures=False)
write_func(g, toolsfiles)
if variablesfiles:
g = processor_class([Variable(v) for v in sorted(h.cvars.values())],
env_signatures=False)
write_func(g, variablesfiles)
processor_class = SCons_XML
# Step 1: Creating entity files for builders, functions,...
print("Generating entity files...")
h = parse_docs(args, include_entities=False)
write_output_files(h, buildersfiles, functionsfiles, toolsfiles,
variablesfiles, SCons_XML.write_mod)
# Step 2: Validating all input files
print("Validating files against SCons XSD...")
if SConsDoc.validate_all_xml(['SCons']):
print("OK")
else:
print("Validation failed! Please correct the errors above and try again.")
# Step 3: Creating actual documentation snippets, using the
# fully resolved and updated entities from the *.mod files.
print("Updating documentation for builders, tools and functions...")
h = parse_docs(args, include_entities=True)
write_output_files(h, buildersfiles, functionsfiles, toolsfiles,
variablesfiles, SCons_XML.write)
print("Done")
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 33.228111
| 114
| 0.535192
|
bca81757c70aee9b7102e5811393bb087ec6a7e9
| 3,344
|
py
|
Python
|
tests/unit/test_vcs.py
|
ManjushaManju/pip
|
9626d9ccdb03c659fdffb5e2c97bf05effecbc5f
|
[
"MIT"
] | null | null | null |
tests/unit/test_vcs.py
|
ManjushaManju/pip
|
9626d9ccdb03c659fdffb5e2c97bf05effecbc5f
|
[
"MIT"
] | null | null | null |
tests/unit/test_vcs.py
|
ManjushaManju/pip
|
9626d9ccdb03c659fdffb5e2c97bf05effecbc5f
|
[
"MIT"
] | 1
|
2020-04-12T11:58:21.000Z
|
2020-04-12T11:58:21.000Z
|
import pytest
from tests.lib import pyversion
from pip.vcs import VersionControl
from pip.vcs.bazaar import Bazaar
from pip.vcs.git import Git
from mock import Mock
if pyversion >= '3':
VERBOSE_FALSE = False
else:
VERBOSE_FALSE = 0
@pytest.fixture
def git():
git_url = 'http://github.com/pypa/pip-test-package'
refs = {
'0.1': 'a8992fc7ee17e5b9ece022417b64594423caca7c',
'0.1.1': '7d654e66c8fa7149c165ddeffa5b56bc06619458',
'0.1.2': 'f1c1020ebac81f9aeb5c766ff7a772f709e696ee',
'foo': '5547fa909e83df8bd743d3978d6667497983a4b7',
'bar': '5547fa909e83df8bd743d3978d6667497983a4b7',
'master': '5547fa909e83df8bd743d3978d6667497983a4b7',
'origin/master': '5547fa909e83df8bd743d3978d6667497983a4b7',
'origin/HEAD': '5547fa909e83df8bd743d3978d6667497983a4b7',
}
sha = refs['foo']
git = Git()
git.get_url = Mock(return_value=git_url)
git.get_revision = Mock(return_value=sha)
git.get_short_refs = Mock(return_value=refs)
return git
@pytest.fixture
def dist():
dist = Mock()
dist.egg_name = Mock(return_value='pip_test_package')
return dist
def test_git_get_src_requirements(git, dist):
ret = git.get_src_requirement(dist, location='.')
assert ret == ''.join([
'git+http://github.com/pypa/pip-test-package',
'@5547fa909e83df8bd743d3978d6667497983a4b7',
'#egg=pip_test_package'
])
@pytest.mark.parametrize('ref,result', (
('5547fa909e83df8bd743d3978d6667497983a4b7', True),
('5547fa909', True),
('abc123', False),
('foo', False),
))
def test_git_check_version(git, ref, result):
assert git.check_version('foo', ref) is result
def test_translate_egg_surname():
vc = VersionControl()
assert vc.translate_egg_surname("foo") == "foo"
assert vc.translate_egg_surname("foo/bar") == "foo_bar"
assert vc.translate_egg_surname("foo/1.2.3") == "foo_1.2.3"
def test_bazaar_simple_urls():
"""
Test bzr url support.
SSH and launchpad have special handling.
"""
http_bzr_repo = Bazaar(
url='bzr+http://bzr.myproject.org/MyProject/trunk/#egg=MyProject'
)
https_bzr_repo = Bazaar(
url='bzr+https://bzr.myproject.org/MyProject/trunk/#egg=MyProject'
)
ssh_bzr_repo = Bazaar(
url='bzr+ssh://bzr.myproject.org/MyProject/trunk/#egg=MyProject'
)
ftp_bzr_repo = Bazaar(
url='bzr+ftp://bzr.myproject.org/MyProject/trunk/#egg=MyProject'
)
sftp_bzr_repo = Bazaar(
url='bzr+sftp://bzr.myproject.org/MyProject/trunk/#egg=MyProject'
)
launchpad_bzr_repo = Bazaar(
url='bzr+lp:MyLaunchpadProject#egg=MyLaunchpadProject'
)
assert http_bzr_repo.get_url_rev() == (
'http://bzr.myproject.org/MyProject/trunk/', None,
)
assert https_bzr_repo.get_url_rev() == (
'https://bzr.myproject.org/MyProject/trunk/', None,
)
assert ssh_bzr_repo.get_url_rev() == (
'bzr+ssh://bzr.myproject.org/MyProject/trunk/', None,
)
assert ftp_bzr_repo.get_url_rev() == (
'ftp://bzr.myproject.org/MyProject/trunk/', None,
)
assert sftp_bzr_repo.get_url_rev() == (
'sftp://bzr.myproject.org/MyProject/trunk/', None,
)
assert launchpad_bzr_repo.get_url_rev() == (
'lp:MyLaunchpadProject', None,
)
| 29.59292
| 74
| 0.668361
|
0f0d0e3439296ac0bc0f0ca2dad963f569bc6b90
| 5,801
|
py
|
Python
|
examples/example_11_binary_source.py
|
pmehta08/MulensModel
|
261738c445a8d116d09c90e65f6e847cfc8a7ad8
|
[
"MIT"
] | null | null | null |
examples/example_11_binary_source.py
|
pmehta08/MulensModel
|
261738c445a8d116d09c90e65f6e847cfc8a7ad8
|
[
"MIT"
] | null | null | null |
examples/example_11_binary_source.py
|
pmehta08/MulensModel
|
261738c445a8d116d09c90e65f6e847cfc8a7ad8
|
[
"MIT"
] | null | null | null |
"""
Fits binary source model using EMCEE sampler.
The code simulates binary source light curve and fits the model twice:
with source flux ratio found via linear regression and
with source flux ratio as a chain parameter.
"""
import os
import sys
import numpy as np
try:
import emcee
except ImportError as err:
print(err)
print("\nEMCEE could not be imported.")
print("Get it from: http://dfm.io/emcee/current/user/install/")
print("and re-run the script")
sys.exit(1)
import matplotlib.pyplot as plt
import MulensModel as mm
# Fix the seed for the random number generator so the behavior is reproducible.
np.random.seed(12343)
# Define likelihood functions
def ln_like(theta, event, parameters_to_fit):
""" likelihood function """
for (param, theta_) in zip(parameters_to_fit, theta):
# Here we handle fixing source flux ratio:
if param == 'flux_ratio':
event.model.set_source_flux_ratio(theta_)
else:
setattr(event.model.parameters, param, theta_)
return -0.5 * event.get_chi2()
def ln_prior(theta, parameters_to_fit):
"""priors - we only reject obviously wrong models"""
for param in ['t_E', 'u_0_1', 'u_0_2']:
if param in parameters_to_fit:
if theta[parameters_to_fit.index(param)] < 0.:
return -np.inf
return 0.0
def ln_prob(theta, event, parameters_to_fit):
""" combines likelihood and priors"""
ln_prior_ = ln_prior(theta, parameters_to_fit)
if not np.isfinite(ln_prior_):
return -np.inf
ln_like_ = ln_like(theta, event, parameters_to_fit)
# In the cases that source fluxes are negative we want to return
# these as if they were not in priors.
if np.isnan(ln_like_):
return -np.inf
return ln_prior_ + ln_like_
def fit_EMCEE(parameters_to_fit, starting_params, sigmas, ln_prob, event,
n_walkers=20, n_steps=3000, n_burn=1500):
"""
Fit model using EMCEE and print results.
Arguments:
parameters_to_fit - list of parameters
starting_params - dict that specifies values of these parameters
sigmas - list of sigma values used to find starting values
ln_prob - function returning logarithm of probability
event - MulensModel.Event instance
n_walkers - number of walkers in EMCEE
n_steps - number of steps per walker
n_burn - number of steps considered as burn-in ( < n_steps)
"""
n_dim = len(parameters_to_fit)
mean = [starting_params[p] for p in parameters_to_fit]
start = [mean + np.random.randn(n_dim) * sigmas for i in range(n_walkers)]
# Run emcee (this can take some time):
sampler = emcee.EnsembleSampler(
n_walkers, n_dim, ln_prob, args=(event, parameters_to_fit))
sampler.run_mcmc(start, n_steps)
# Remove burn-in samples and reshape:
samples = sampler.chain[:, n_burn:, :].reshape((-1, n_dim))
# Results:
results = np.percentile(samples, [16, 50, 84], axis=0)
print("Fitted parameters:")
for i in range(n_dim):
r = results[1, i]
msg = parameters_to_fit[i] + ": {:.5f} +{:.5f} -{:.5f}"
print(msg.format(r, results[2, i]-r, r-results[0, i]))
# We extract best model parameters and chi2 from event:
print("\nSmallest chi2 model:")
if "flux_ratio" in parameters_to_fit:
parameters_to_fit.pop(parameters_to_fit.index("flux_ratio"))
best = [event.best_chi2_parameters[p] for p in parameters_to_fit]
print(*[repr(b) if isinstance(b, float) else b.value for b in best])
print('chi^2 =', event.best_chi2)
# Set model parameters to best value. Note that
# event.model.parameters does not know flux_ratio.
for param in parameters_to_fit:
if param != 'flux_ratio':
setattr(event.model.parameters, param,
event.best_chi2_parameters[param])
# First, prepare the data. There is nothing very exciting in this part,
# so you may skip it.
t_0_1 = 6100.
u_0_1 = 0.2
t_0_2 = 6140.
u_0_2 = 0.01
t_E = 25.
assumed_flux_1 = 100.
assumed_flux_2 = 5.
assumed_flux_blend = 10.
n_a = 1000
n_b = 600
time_a = np.linspace(6000., 6300., n_a)
time_b = np.linspace(6139., 6141., n_b)
time = np.sort(np.concatenate((time_a, time_b)))
model_1 = mm.Model({'t_0': t_0_1, 'u_0': u_0_1, 't_E': t_E})
A_1 = model_1.magnification(time)
model_2 = mm.Model({'t_0': t_0_2, 'u_0': u_0_2, 't_E': t_E})
A_2 = model_2.magnification(time)
flux = A_1 * assumed_flux_1 + A_2 * assumed_flux_2 + assumed_flux_blend
flux_err = 6. + 0. * time
flux += flux_err * np.random.normal(size=n_a+n_b)
my_dataset = mm.MulensData([time, flux, flux_err], phot_fmt='flux')
# If you want to plot, then just uncomment:
# plt.plot(time, flux, 'ro')
# plt.show()
# Starting parameters:
params = {'t_0_1': 6101., 'u_0_1': 0.19, 't_0_2': 6140.123, 'u_0_2': 0.04,
't_E': 20.}
my_model = mm.Model(params)
my_event = mm.Event(datasets=my_dataset, model=my_model)
# First fit - source flux ratio not set, hence found by regression:
parameters_to_fit = ["t_0_1", "u_0_1", "t_0_2", "u_0_2", "t_E"]
sigmas = [0.1, 0.05, 1., 0.01, 1.]
print("\nFirst fit. This can take some time...")
fit_EMCEE(parameters_to_fit, params, sigmas, ln_prob, my_event)
# Starting parameters for second fit:
params = {'t_0_1': 6101., 'u_0_1': 0.19, 't_0_2': 6140.123, 'u_0_2': 0.04,
't_E': 25.987}
my_model = mm.Model(params)
my_event = mm.Event(datasets=my_dataset, model=my_model)
params['flux_ratio'] = 0.02
# Second fit - source flux ratio as one of the chain parameters:
parameters_to_fit = ["t_0_1", "u_0_1", "t_0_2", "u_0_2", "t_E", "flux_ratio"]
sigmas = [0.1, 0.05, 1., 0.01, 1., 0.001]
print("\nSecond fit. This can take some time...")
fit_EMCEE(parameters_to_fit, params, sigmas, ln_prob, my_event)
| 35.157576
| 79
| 0.675401
|
cb9222592fe0db7f586c712ce569b49d50a05421
| 4,282
|
py
|
Python
|
FeatureCollection/add_area_column.py
|
c11/earthengine-py-notebooks
|
144b57e4d952da095ba73c3cc8ce2f36291162ff
|
[
"MIT"
] | 1
|
2020-05-31T14:19:59.000Z
|
2020-05-31T14:19:59.000Z
|
FeatureCollection/add_area_column.py
|
c11/earthengine-py-notebooks
|
144b57e4d952da095ba73c3cc8ce2f36291162ff
|
[
"MIT"
] | null | null | null |
FeatureCollection/add_area_column.py
|
c11/earthengine-py-notebooks
|
144b57e4d952da095ba73c3cc8ce2f36291162ff
|
[
"MIT"
] | null | null | null |
# %%
"""
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/FeatureCollection/add_area_column.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/FeatureCollection/add_area_column.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/FeatureCollection/add_area_column.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
"""
# %%
"""
## Install Earth Engine API and geemap
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
"""
# %%
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as emap
except:
import geemap as emap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
# %%
"""
## Create an interactive map
The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function.
"""
# %%
Map = emap.Map(center=[40,-100], zoom=4)
Map.add_basemap('ROADMAP') # Add Google Map
Map
# %%
"""
## Add Earth Engine Python script
"""
# %%
# Add Earth Engine dataset
fromFT = ee.FeatureCollection("users/wqs/Pipestem/Pipestem_HUC10")
# This function computes the feature's geometry area and adds it as a property.
def addArea(feature):
return feature.set({'areaHa': feature.geometry().area().divide(100 * 100)})
# Map the area getting function over the FeatureCollection.
areaAdded = fromFT.map(addArea)
# Print the first feature from the collection with the added property.
first = areaAdded.first()
print('First feature: ', first.getInfo())
print("areaHa: ", first.get("areaHa").getInfo())
# %%
"""
## Display Earth Engine data layers
"""
# %%
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
| 50.376471
| 1,021
| 0.751051
|
8a81be2aba07a38dee4419e1026dac6f1153ec36
| 957
|
py
|
Python
|
packages/pyre/constraints/Subset.py
|
BryanRiel/pyre
|
179359634a7091979cced427b6133dd0ec4726ea
|
[
"BSD-3-Clause"
] | null | null | null |
packages/pyre/constraints/Subset.py
|
BryanRiel/pyre
|
179359634a7091979cced427b6133dd0ec4726ea
|
[
"BSD-3-Clause"
] | null | null | null |
packages/pyre/constraints/Subset.py
|
BryanRiel/pyre
|
179359634a7091979cced427b6133dd0ec4726ea
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2018 all rights reserved
#
# superclass
from .Constraint import Constraint
# declaration
class Subset(Constraint):
"""
Constraint that is satisfied when the candidate is a subset of a given set
"""
# interface
def validate(self, value, **kwds):
"""
Check whether {value} satisfies this constraint
"""
# if my set is a superset of {value}
if self.choices.issuperset(value):
# indicate success
return value
# otherwise, chain up
return super().validate(value=value, **kwds)
# meta-methods
def __init__(self, choices, **kwds):
# chain up
super().__init__(**kwds)
# save my choices
self.choices = set(choices)
# all done
return
def __str__(self):
return "a subset of {!r}".format(self.choices)
# end of file
| 19.9375
| 78
| 0.585162
|
dcee08fd5d3009b6f952e447fe04196d3a331859
| 4,732
|
py
|
Python
|
projects/tests/analysis-weibel.py
|
Krissmedt/imprunko
|
94171d0d47171cc4b199cd52f5f29385cbff903e
|
[
"MIT"
] | 5
|
2018-10-26T07:08:16.000Z
|
2019-05-10T06:47:37.000Z
|
projects/tests/analysis-weibel.py
|
Krissmedt/imprunko
|
94171d0d47171cc4b199cd52f5f29385cbff903e
|
[
"MIT"
] | 9
|
2018-11-09T08:50:48.000Z
|
2019-06-06T20:11:12.000Z
|
projects/tests/analysis-weibel.py
|
Krissmedt/imprunko
|
94171d0d47171cc4b199cd52f5f29385cbff903e
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import h5py
import sys, os
import matplotlib.ticker as ticker
from scipy.stats import mstats
from scipy.optimize import curve_fit
from combine_files import combine_files, combine_tiles
from configSetup import Configuration
#--------------------------------------------------
# combine multiple files
fdir = 'weibel/out/'
conf = Configuration('config-weibel.ini')
print("files...")
ex = combine_files(fdir, "field", "ex", conf)
ey = combine_files(fdir, "field", "ey", conf)
ez = combine_files(fdir, "field", "ez", conf)
bx = combine_files(fdir, "field", "bx", conf)
by = combine_files(fdir, "field", "by", conf)
bz = combine_files(fdir, "field", "bz", conf)
rho = combine_files(fdir, "field", "rho", conf)
ekin = combine_files(fdir, "analysis", "edens",conf, isp=0)
#--------------------------------------------------
# read simulation data from file
print "Ex shape:", np.shape(ex)
#Read simulation values
dt = conf.dt*conf.interval
dx = conf.dx
print dt, dx
nt, nx, ny, nz = np.shape(ex)
maxi = -1
time = np.arange(nt)*dt
maxtime = time[maxi]
#--------------------------------------------------
#set up figure
plt.rc('font', family='serif')
plt.rc('xtick', labelsize=7)
plt.rc('ytick', labelsize=7)
plt.rc('axes', labelsize=7)
fig = plt.figure(figsize=(3.54, 6.0)) #single column fig
#fig = plt.figure(figsize=(7.48, 4.0)) #two column figure
gs = plt.GridSpec(5, 1, wspace=0.0)
axs = []
axs.append( plt.subplot(gs[0,0]) )
axs.append( plt.subplot(gs[1,0]) )
axs.append( plt.subplot(gs[2,0]) )
axs.append( plt.subplot(gs[3,0]) )
axs.append( plt.subplot(gs[4,0]) )
for ax in axs:
ax.minorticks_on()
ax.set_xlabel(r'time $t\omega_{\mathrm{p}}$ ')
ax.set_xlim((0.0, maxtime))
axs[0].set_ylabel(r'$\ln \delta E_x$')
axs[1].set_ylabel(r'Energy $\epsilon$')
axs[2].set_ylabel(r'$\Delta m$')
axs[3].set_ylabel(r'$\epsilon_K$')
axs[4].set_ylabel(r'$E_T$')
def flatten_spatial(arr):
return arr.reshape( arr.shape[:-3] + (-1,) )
#--------------------------------------------------
# max{| X |}
axs[0].set_yscale('log')
ex_max = np.max( np.abs( flatten_spatial(ex) ),1 )
axs[0].plot(time, ex_max, 'b.-')
#ey_max = np.max( np.abs( flatten_spatial(ey) ),1 )
#axs[0].plot(time, ey_max, 'g-')
#
#bz_max = np.max( np.abs( flatten_spatial(bz) ),1 )
#axs[0].plot(time, bz_max, 'r-')
#axs[0].set_ylim(-20.0, 1.0)
#TODO:
Gm = 0.040
Gms = -22.7 + time*Gm
#axs[0].plot(time, Gms, 'r--')
##################################################
axs[1].set_yscale('log')
ex_edens = np.sum( flatten_spatial(ex*ex), 1 )
axs[1].plot(time, ex_edens, linestyle="dashed", color="blue")
ey_edens = np.sum( flatten_spatial(ey*ey), 1 )
axs[1].plot(time, ey_edens, linestyle="dotted", color="blue")
bz_edens = np.sum( flatten_spatial(bz*bz), 1 )
axs[1].plot(time, ex_edens, linestyle="-.", color="red")
plt.subplots_adjust(left=0.18, bottom=0.12, right=0.98, top=0.85, wspace=0.0, hspace=0.0)
plt.savefig('weibel/weibel.pdf')
#ey_edens = np.sum( flatten_spatial(ey*ey), 1 )
#axs[1].plot(time, ey_edens)
#
#bz_edens = np.sum( flatten_spatial(bz*bz), 1 )
#axs[1].plot(time, bz_edens)
edens = np.sum( flatten_spatial( ex*ex + ey*ey + ez*ez ), 1 )
#edens = np.sum( flatten_spatial( ex*ex ), 1 )
bdens = np.sum( flatten_spatial( bx*bx + by*by + bz*bz ), 1 )
#bdens = np.sum( flatten_spatial( bz*bz ), 1 )
axs[1].plot(time, edens, "b-")
axs[1].plot(time, bdens, "r-")
#TODO: model prediction
#Gms = -16.0 + time*Gm # 1/2 comes from compensation of E_x^2
#axs[1].plot(time, Gms, 'r--')
#axs[1].set_ylim(-10.0, 4.0)
##################################################
prtcls = np.sum( flatten_spatial(rho), 1) #integrate particle density
#prtcls /= prtcls[0]
prtcls = np.abs(prtcls - prtcls[0] )/prtcls[0]
#prtcls = np.clip(prtcls, 1.0e-8, 1.0e2)
axs[2].plot(time, np.log10(prtcls))
#axs[2].plot(time, prtcls)
##################################################
ekintot = np.sum( flatten_spatial(ekin), 1)
axs[3].plot(time, ekintot)
##################################################
print("ekin max:", np.max(ekintot))
print("efield max:", np.max(edens))
print("bfield max:", np.max(bdens))
print("ratio: ekin/e", np.mean(ekintot)/np.mean(edens))
print("ratio: ekin/b", np.mean(ekintot)/np.mean(bdens))
etot = ekintot + edens + bdens
#axs[4].plot(time, etot, "k-" )
#axs[4].plot(time, ekintot, "b--")
axs[4].plot(time, edens, "b--")
axs[4].plot(time, bdens, "r--")
#axs[4].plot(time, ex_edens, "r--")
#axs[4].plot(time, ey_edens, "r--")
#axs[4].plot(time, bz_edens, "r--")
plt.subplots_adjust(left=0.18, bottom=0.12, right=0.98, top=0.85, wspace=0.0, hspace=0.0)
plt.savefig('weibel/weibel.pdf')
| 25.44086
| 89
| 0.595309
|
18d578f2f59bdd1038e3f9d5c1566a9e84e9233c
| 2,033
|
py
|
Python
|
mlrun/runtimes/sparkjob/spark2job.py
|
george0st/mlrun
|
6467d3a5ceadf6cd35512b84b3ddc3da611cf39a
|
[
"Apache-2.0"
] | null | null | null |
mlrun/runtimes/sparkjob/spark2job.py
|
george0st/mlrun
|
6467d3a5ceadf6cd35512b84b3ddc3da611cf39a
|
[
"Apache-2.0"
] | null | null | null |
mlrun/runtimes/sparkjob/spark2job.py
|
george0st/mlrun
|
6467d3a5ceadf6cd35512b84b3ddc3da611cf39a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...utils import update_in, verify_and_update_in
from .abstract import AbstractSparkJobSpec, AbstractSparkRuntime
class Spark2JobSpec(AbstractSparkJobSpec):
pass
class Spark2Runtime(AbstractSparkRuntime):
def _enrich_job(self, job):
update_in(job, "spec.serviceAccount", self.spec.service_account or "sparkapp")
if "requests" in self.spec.driver_resources:
if "cpu" in self.spec.driver_resources["requests"]:
verify_and_update_in(
job,
"spec.driver.cores",
self.spec.driver_resources["requests"]["cpu"],
int,
)
return
def with_priority_class(self, name: str = None):
raise NotImplementedError("Not supported in spark 2 operator")
def _get_spark_version(self):
return "2.4.5"
def _get_igz_deps(self):
return {
"jars": [
"/spark/v3io-libs/v3io-hcfs_2.11.jar",
"/spark/v3io-libs/v3io-spark2-streaming_2.11.jar",
"/spark/v3io-libs/v3io-spark2-object-dataframe_2.11.jar",
"/igz/java/libs/scala-library-2.11.12.jar",
],
"files": ["/igz/java/libs/v3io-pyspark.zip"],
}
@property
def spec(self) -> Spark2JobSpec:
return self._spec
@spec.setter
def spec(self, spec):
self._spec = self._verify_dict(spec, "spec", Spark2JobSpec)
| 33.883333
| 86
| 0.640925
|
7cad1a354e729f3f4c8ba9ba097e46b9dd2b620e
| 19,460
|
py
|
Python
|
vendor/Twisted-10.0.0/twisted/web/test/test_proxy.py
|
bopopescu/cc-2
|
37444fb16b36743c439b0d6c3cac2347e0cc0a94
|
[
"Apache-2.0"
] | null | null | null |
vendor/Twisted-10.0.0/twisted/web/test/test_proxy.py
|
bopopescu/cc-2
|
37444fb16b36743c439b0d6c3cac2347e0cc0a94
|
[
"Apache-2.0"
] | 1
|
2020-08-02T15:40:49.000Z
|
2020-08-02T15:40:49.000Z
|
vendor/Twisted-10.0.0/twisted/web/test/test_proxy.py
|
bopopescu/cc-2
|
37444fb16b36743c439b0d6c3cac2347e0cc0a94
|
[
"Apache-2.0"
] | 1
|
2020-07-25T19:36:05.000Z
|
2020-07-25T19:36:05.000Z
|
# Copyright (c) 2007-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test for L{twisted.web.proxy}.
"""
from twisted.trial.unittest import TestCase
from twisted.test.proto_helpers import StringTransportWithDisconnection
from twisted.test.proto_helpers import MemoryReactor
from twisted.web.resource import Resource
from twisted.web.server import Site
from twisted.web.proxy import ReverseProxyResource, ProxyClientFactory
from twisted.web.proxy import ProxyClient, ProxyRequest, ReverseProxyRequest
from twisted.web.test.test_web import DummyRequest
class ReverseProxyResourceTestCase(TestCase):
"""
Tests for L{ReverseProxyResource}.
"""
def _testRender(self, uri, expectedURI):
"""
Check that a request pointing at C{uri} produce a new proxy connection,
with the path of this request pointing at C{expectedURI}.
"""
root = Resource()
reactor = MemoryReactor()
resource = ReverseProxyResource("127.0.0.1", 1234, "/path", reactor)
root.putChild('index', resource)
site = Site(root)
transport = StringTransportWithDisconnection()
channel = site.buildProtocol(None)
channel.makeConnection(transport)
# Clear the timeout if the tests failed
self.addCleanup(channel.connectionLost, None)
channel.dataReceived("GET %s HTTP/1.1\r\nAccept: text/html\r\n\r\n" %
(uri,))
# Check that one connection has been created, to the good host/port
self.assertEquals(len(reactor.tcpClients), 1)
self.assertEquals(reactor.tcpClients[0][0], "127.0.0.1")
self.assertEquals(reactor.tcpClients[0][1], 1234)
# Check the factory passed to the connect, and its given path
factory = reactor.tcpClients[0][2]
self.assertIsInstance(factory, ProxyClientFactory)
self.assertEquals(factory.rest, expectedURI)
self.assertEquals(factory.headers["host"], "127.0.0.1:1234")
def test_render(self):
"""
Test that L{ReverseProxyResource.render} initiates a connection to the
given server with a L{ProxyClientFactory} as parameter.
"""
return self._testRender("/index", "/path")
def test_renderWithQuery(self):
"""
Test that L{ReverseProxyResource.render} passes query parameters to the
created factory.
"""
return self._testRender("/index?foo=bar", "/path?foo=bar")
def test_getChild(self):
"""
The L{ReverseProxyResource.getChild} method should return a resource
instance with the same class as the originating resource, forward port
and host values, and update the path value with the value passed.
"""
resource = ReverseProxyResource("127.0.0.1", 1234, "/path")
child = resource.getChild('foo', None)
# The child should keep the same class
self.assertIsInstance(child, ReverseProxyResource)
self.assertEquals(child.path, "/path/foo")
self.assertEquals(child.port, 1234)
self.assertEquals(child.host, "127.0.0.1")
def test_getChildWithSpecial(self):
"""
The L{ReverseProxyResource} return by C{getChild} has a path which has
already been quoted.
"""
resource = ReverseProxyResource("127.0.0.1", 1234, "/path")
child = resource.getChild(' /%', None)
self.assertEqual(child.path, "/path/%20%2F%25")
class DummyChannel(object):
"""
A dummy HTTP channel, that does nothing but holds a transport and saves
connection lost.
@ivar transport: the transport used by the client.
@ivar lostReason: the reason saved at connection lost.
"""
def __init__(self, transport):
"""
Hold a reference to the transport.
"""
self.transport = transport
self.lostReason = None
def connectionLost(self, reason):
"""
Keep track of the connection lost reason.
"""
self.lostReason = reason
class ProxyClientTestCase(TestCase):
"""
Tests for L{ProxyClient}.
"""
def _parseOutHeaders(self, content):
"""
Parse the headers out of some web content.
@param content: Bytes received from a web server.
@return: A tuple of (requestLine, headers, body). C{headers} is a dict
of headers, C{requestLine} is the first line (e.g. "POST /foo ...")
and C{body} is whatever is left.
"""
headers, body = content.split('\r\n\r\n')
headers = headers.split('\r\n')
requestLine = headers.pop(0)
return (
requestLine, dict(header.split(': ') for header in headers), body)
def makeRequest(self, path):
"""
Make a dummy request object for the URL path.
@param path: A URL path, beginning with a slash.
@return: A L{DummyRequest}.
"""
return DummyRequest(path)
def makeProxyClient(self, request, method="GET", headers=None,
requestBody=""):
"""
Make a L{ProxyClient} object used for testing.
@param request: The request to use.
@param method: The HTTP method to use, GET by default.
@param headers: The HTTP headers to use expressed as a dict. If not
provided, defaults to {'accept': 'text/html'}.
@param requestBody: The body of the request. Defaults to the empty
string.
@return: A L{ProxyClient}
"""
if headers is None:
headers = {"accept": "text/html"}
path = '/' + request.postpath
return ProxyClient(
method, path, 'HTTP/1.0', headers, requestBody, request)
def connectProxy(self, proxyClient):
"""
Connect a proxy client to a L{StringTransportWithDisconnection}.
@param proxyClient: A L{ProxyClient}.
@return: The L{StringTransportWithDisconnection}.
"""
clientTransport = StringTransportWithDisconnection()
clientTransport.protocol = proxyClient
proxyClient.makeConnection(clientTransport)
return clientTransport
def assertForwardsHeaders(self, proxyClient, requestLine, headers):
"""
Assert that C{proxyClient} sends C{headers} when it connects.
@param proxyClient: A L{ProxyClient}.
@param requestLine: The request line we expect to be sent.
@param headers: A dict of headers we expect to be sent.
@return: If the assertion is successful, return the request body as
bytes.
"""
self.connectProxy(proxyClient)
requestContent = proxyClient.transport.value()
receivedLine, receivedHeaders, body = self._parseOutHeaders(
requestContent)
self.assertEquals(receivedLine, requestLine)
self.assertEquals(receivedHeaders, headers)
return body
def makeResponseBytes(self, code, message, headers, body):
lines = ["HTTP/1.0 %d %s" % (code, message)]
for header, values in headers:
for value in values:
lines.append("%s: %s" % (header, value))
lines.extend(['', body])
return '\r\n'.join(lines)
def assertForwardsResponse(self, request, code, message, headers, body):
"""
Assert that C{request} has forwarded a response from the server.
@param request: A L{DummyRequest}.
@param code: The expected HTTP response code.
@param message: The expected HTTP message.
@param headers: The expected HTTP headers.
@param body: The expected response body.
"""
self.assertEquals(request.responseCode, code)
self.assertEquals(request.responseMessage, message)
receivedHeaders = list(request.responseHeaders.getAllRawHeaders())
receivedHeaders.sort()
expectedHeaders = headers[:]
expectedHeaders.sort()
self.assertEquals(receivedHeaders, expectedHeaders)
self.assertEquals(''.join(request.written), body)
def _testDataForward(self, code, message, headers, body, method="GET",
requestBody="", loseConnection=True):
"""
Build a fake proxy connection, and send C{data} over it, checking that
it's forwarded to the originating request.
"""
request = self.makeRequest('foo')
client = self.makeProxyClient(
request, method, {'accept': 'text/html'}, requestBody)
receivedBody = self.assertForwardsHeaders(
client, '%s /foo HTTP/1.0' % (method,),
{'connection': 'close', 'accept': 'text/html'})
self.assertEquals(receivedBody, requestBody)
# Fake an answer
client.dataReceived(
self.makeResponseBytes(code, message, headers, body))
# Check that the response data has been forwarded back to the original
# requester.
self.assertForwardsResponse(request, code, message, headers, body)
# Check that when the response is done, the request is finished.
if loseConnection:
client.transport.loseConnection()
# Even if we didn't call loseConnection, the transport should be
# disconnected. This lets us not rely on the server to close our
# sockets for us.
self.assertFalse(client.transport.connected)
self.assertEquals(request.finished, 1)
def test_forward(self):
"""
When connected to the server, L{ProxyClient} should send the saved
request, with modifications of the headers, and then forward the result
to the parent request.
"""
return self._testDataForward(
200, "OK", [("Foo", ["bar", "baz"])], "Some data\r\n")
def test_postData(self):
"""
Try to post content in the request, and check that the proxy client
forward the body of the request.
"""
return self._testDataForward(
200, "OK", [("Foo", ["bar"])], "Some data\r\n", "POST", "Some content")
def test_statusWithMessage(self):
"""
If the response contains a status with a message, it should be
forwarded to the parent request with all the information.
"""
return self._testDataForward(
404, "Not Found", [], "")
def test_contentLength(self):
"""
If the response contains a I{Content-Length} header, the inbound
request object should still only have C{finish} called on it once.
"""
data = "foo bar baz"
return self._testDataForward(
200, "OK", [("Content-Length", [str(len(data))])], data)
def test_losesConnection(self):
"""
If the response contains a I{Content-Length} header, the outgoing
connection is closed when all response body data has been received.
"""
data = "foo bar baz"
return self._testDataForward(
200, "OK", [("Content-Length", [str(len(data))])], data,
loseConnection=False)
def test_headersCleanups(self):
"""
The headers given at initialization should be modified:
B{proxy-connection} should be removed if present, and B{connection}
should be added.
"""
client = ProxyClient('GET', '/foo', 'HTTP/1.0',
{"accept": "text/html", "proxy-connection": "foo"}, '', None)
self.assertEquals(client.headers,
{"accept": "text/html", "connection": "close"})
def test_keepaliveNotForwarded(self):
"""
The proxy doesn't really know what to do with keepalive things from
the remote server, so we stomp over any keepalive header we get from
the client.
"""
headers = {
"accept": "text/html",
'keep-alive': '300',
'connection': 'keep-alive',
}
expectedHeaders = headers.copy()
expectedHeaders['connection'] = 'close'
del expectedHeaders['keep-alive']
client = ProxyClient('GET', '/foo', 'HTTP/1.0', headers, '', None)
self.assertForwardsHeaders(
client, 'GET /foo HTTP/1.0', expectedHeaders)
def test_defaultHeadersOverridden(self):
"""
L{server.Request} within the proxy sets certain response headers by
default. When we get these headers back from the remote server, the
defaults are overridden rather than simply appended.
"""
request = self.makeRequest('foo')
request.responseHeaders.setRawHeaders('server', ['old-bar'])
request.responseHeaders.setRawHeaders('date', ['old-baz'])
request.responseHeaders.setRawHeaders('content-type', ["old/qux"])
client = self.makeProxyClient(request, headers={'accept': 'text/html'})
self.connectProxy(client)
headers = {
'Server': ['bar'],
'Date': ['2010-01-01'],
'Content-Type': ['application/x-baz'],
}
client.dataReceived(
self.makeResponseBytes(200, "OK", headers.items(), ''))
self.assertForwardsResponse(
request, 200, 'OK', headers.items(), '')
class ProxyClientFactoryTestCase(TestCase):
"""
Tests for L{ProxyClientFactory}.
"""
def test_connectionFailed(self):
"""
Check that L{ProxyClientFactory.clientConnectionFailed} produces
a B{501} response to the parent request.
"""
request = DummyRequest(['foo'])
factory = ProxyClientFactory('GET', '/foo', 'HTTP/1.0',
{"accept": "text/html"}, '', request)
factory.clientConnectionFailed(None, None)
self.assertEquals(request.responseCode, 501)
self.assertEquals(request.responseMessage, "Gateway error")
self.assertEquals(
list(request.responseHeaders.getAllRawHeaders()),
[("Content-Type", ["text/html"])])
self.assertEquals(
''.join(request.written),
"<H1>Could not connect</H1>")
self.assertEquals(request.finished, 1)
def test_buildProtocol(self):
"""
L{ProxyClientFactory.buildProtocol} should produce a L{ProxyClient}
with the same values of attributes (with updates on the headers).
"""
factory = ProxyClientFactory('GET', '/foo', 'HTTP/1.0',
{"accept": "text/html"}, 'Some data',
None)
proto = factory.buildProtocol(None)
self.assertIsInstance(proto, ProxyClient)
self.assertEquals(proto.command, 'GET')
self.assertEquals(proto.rest, '/foo')
self.assertEquals(proto.data, 'Some data')
self.assertEquals(proto.headers,
{"accept": "text/html", "connection": "close"})
class ProxyRequestTestCase(TestCase):
"""
Tests for L{ProxyRequest}.
"""
def _testProcess(self, uri, expectedURI, method="GET", data=""):
"""
Build a request pointing at C{uri}, and check that a proxied request
is created, pointing a C{expectedURI}.
"""
transport = StringTransportWithDisconnection()
channel = DummyChannel(transport)
reactor = MemoryReactor()
request = ProxyRequest(channel, False, reactor)
request.gotLength(len(data))
request.handleContentChunk(data)
request.requestReceived(method, 'http://example.com%s' % (uri,),
'HTTP/1.0')
self.assertEquals(len(reactor.tcpClients), 1)
self.assertEquals(reactor.tcpClients[0][0], "example.com")
self.assertEquals(reactor.tcpClients[0][1], 80)
factory = reactor.tcpClients[0][2]
self.assertIsInstance(factory, ProxyClientFactory)
self.assertEquals(factory.command, method)
self.assertEquals(factory.version, 'HTTP/1.0')
self.assertEquals(factory.headers, {'host': 'example.com'})
self.assertEquals(factory.data, data)
self.assertEquals(factory.rest, expectedURI)
self.assertEquals(factory.father, request)
def test_process(self):
"""
L{ProxyRequest.process} should create a connection to the given server,
with a L{ProxyClientFactory} as connection factory, with the correct
parameters:
- forward comment, version and data values
- update headers with the B{host} value
- remove the host from the URL
- pass the request as parent request
"""
return self._testProcess("/foo/bar", "/foo/bar")
def test_processWithoutTrailingSlash(self):
"""
If the incoming request doesn't contain a slash,
L{ProxyRequest.process} should add one when instantiating
L{ProxyClientFactory}.
"""
return self._testProcess("", "/")
def test_processWithData(self):
"""
L{ProxyRequest.process} should be able to retrieve request body and
to forward it.
"""
return self._testProcess(
"/foo/bar", "/foo/bar", "POST", "Some content")
def test_processWithPort(self):
"""
Check that L{ProxyRequest.process} correctly parse port in the incoming
URL, and create a outgoing connection with this port.
"""
transport = StringTransportWithDisconnection()
channel = DummyChannel(transport)
reactor = MemoryReactor()
request = ProxyRequest(channel, False, reactor)
request.gotLength(0)
request.requestReceived('GET', 'http://example.com:1234/foo/bar',
'HTTP/1.0')
# That should create one connection, with the port parsed from the URL
self.assertEquals(len(reactor.tcpClients), 1)
self.assertEquals(reactor.tcpClients[0][0], "example.com")
self.assertEquals(reactor.tcpClients[0][1], 1234)
class DummyFactory(object):
"""
A simple holder for C{host} and C{port} information.
"""
def __init__(self, host, port):
self.host = host
self.port = port
class ReverseProxyRequestTestCase(TestCase):
"""
Tests for L{ReverseProxyRequest}.
"""
def test_process(self):
"""
L{ReverseProxyRequest.process} should create a connection to its
factory host/port, using a L{ProxyClientFactory} instantiated with the
correct parameters, and particulary set the B{host} header to the
factory host.
"""
transport = StringTransportWithDisconnection()
channel = DummyChannel(transport)
reactor = MemoryReactor()
request = ReverseProxyRequest(channel, False, reactor)
request.factory = DummyFactory("example.com", 1234)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
# Check that one connection has been created, to the good host/port
self.assertEquals(len(reactor.tcpClients), 1)
self.assertEquals(reactor.tcpClients[0][0], "example.com")
self.assertEquals(reactor.tcpClients[0][1], 1234)
# Check the factory passed to the connect, and its headers
factory = reactor.tcpClients[0][2]
self.assertIsInstance(factory, ProxyClientFactory)
self.assertEquals(factory.headers, {'host': 'example.com'})
| 35.904059
| 83
| 0.620041
|
044f8e01f9bcead7d4a94836dc44912388272632
| 9,986
|
py
|
Python
|
Pytorch/convForecastNet.py
|
khsibr/forecastNet
|
f3a3d8a7a675dfdd37365e9945c1d02548465c61
|
[
"MIT"
] | 81
|
2020-02-18T19:07:28.000Z
|
2022-03-22T23:08:09.000Z
|
Pytorch/convForecastNet.py
|
khsibr/forecastNet
|
f3a3d8a7a675dfdd37365e9945c1d02548465c61
|
[
"MIT"
] | 12
|
2020-05-02T14:48:10.000Z
|
2021-08-16T02:51:21.000Z
|
Pytorch/convForecastNet.py
|
khsibr/forecastNet
|
f3a3d8a7a675dfdd37365e9945c1d02548465c61
|
[
"MIT"
] | 23
|
2020-02-20T11:22:21.000Z
|
2022-03-26T07:46:58.000Z
|
"""
ForecastNet with cells comprising a convolutional neural network.
ForecastNetConvModel provides the mixture density network outputs.
ForecastNetConvModel2 provides the linear outputs.
Paper:
"ForecastNet: A Time-Variant Deep Feed-Forward Neural Network Architecture for Multi-Step-Ahead Time-Series Forecasting"
by Joel Janek Dabrowski, YiFan Zhang, and Ashfaqur Rahman
Link to the paper: https://arxiv.org/abs/2002.04155
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class ForecastNetConvModel(nn.Module):
"""
Class for the convolutional hidden cell version of the model
"""
def __init__(self, input_dim, hidden_dim, output_dim, in_seq_length, out_seq_length, device):
"""
Constructor
:param input_dim: Dimension of the inputs
:param hidden_dim: Number of hidden units
:param output_dim: Dimension of the outputs
:param in_seq_length: Length of the input sequence
:param out_seq_length: Length of the output sequence
:param device: The device on which compuations are perfomed.
"""
super(ForecastNetConvModel, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.in_seq_length = in_seq_length
self.out_seq_length = out_seq_length
self.device = device
self.conv_layer1 = nn.ModuleList([nn.Conv1d(in_channels=1, out_channels=hidden_dim, kernel_size=5, padding=2) for i in range(out_seq_length)])
self.conv_layer2 = nn.ModuleList([nn.Conv1d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=3, padding=1) for i in range(out_seq_length)])
flatten_layer = [nn.Linear(hidden_dim * (input_dim * in_seq_length), hidden_dim)]
for i in range(out_seq_length - 1):
flatten_layer.append(nn.Linear(hidden_dim * (input_dim * in_seq_length + hidden_dim + output_dim), hidden_dim))
self.flatten_layer = nn.ModuleList(flatten_layer)
self.mu_layer = nn.ModuleList([nn.Linear(hidden_dim, output_dim) for i in range(out_seq_length)])
self.sigma_layer = nn.ModuleList([nn.Linear(hidden_dim, output_dim) for i in range(out_seq_length)])
# # Convolutional Layers with Pooling
# self.conv_layer1 = nn.ModuleList([nn.Conv1d(in_channels=1, out_channels=hidden_dim, kernel_size=5, padding=2) for i in range(out_seq_length)])
# self.pool_layer1 = nn.ModuleList([nn.AvgPool1d(kernel_size=2, padding=0) for i in range(out_seq_length)])
# self.conv_layer2 = nn.ModuleList([nn.Conv1d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=3, padding=1) for i in range(out_seq_length)])
# self.pool_layer2 = nn.ModuleList([nn.AvgPool1d(kernel_size=2, padding=0) for i in range(out_seq_length) for i in range(out_seq_length)])
# flatten_layer = [nn.Linear(hidden_dim//4 * (input_dim * in_seq_length), hidden_dim)]
# for i in range(out_seq_length - 1):
# flatten_layer.append(nn.Linear(hidden_dim * ((input_dim * in_seq_length + hidden_dim + output_dim) // 4), hidden_dim))
# self.flatten_layer = nn.ModuleList(flatten_layer)
# self.mu_layer = nn.ModuleList([nn.Linear(hidden_dim, output_dim) for i in range(out_seq_length)])
# self.sigma_layer = nn.ModuleList([nn.Linear(hidden_dim, output_dim) for i in range(out_seq_length)])
def forward(self, input, target, is_training=False):
"""
Forward propagation of the convolutional ForecastNet model
:param input: Input data in the form [input_seq_length, batch_size, input_dim]
:param target: Target data in the form [output_seq_length, batch_size, output_dim]
:param is_training: If true, use target data for training, else use the previous output.
:return: outputs: Sampled forecast outputs in the form [decoder_seq_length, batch_size, input_dim]
:return: mu: Outputs of the mean layer [decoder_seq_length, batch_size, input_dim]
:return: sigma: Outputs of the standard deviation layer [decoder_seq_length, batch_size, input_dim]
"""
# Initialise outputs
outputs = torch.zeros((self.out_seq_length, input.shape[0], self.output_dim)).to(self.device)
mu = torch.zeros((self.out_seq_length, input.shape[0], self.output_dim)).to(self.device)
sigma = torch.zeros((self.out_seq_length, input.shape[0], self.output_dim)).to(self.device)
# First input
next_cell_input = input.unsqueeze(dim=1)
# Propagate through network
for i in range(self.out_seq_length):
# Propagate through the cell
hidden = F.relu(self.conv_layer1[i](next_cell_input))
# hidden = self.pool_layer1[i](hidden)
hidden = F.relu(self.conv_layer2[i](hidden))
# hidden = self.pool_layer2[i](hidden)
hidden = hidden.reshape((input.shape[0], -1))
hidden = F.relu(self.flatten_layer[i](hidden))
# Calculate output
mu_ = self.mu_layer[i](hidden)
sigma_ = F.softplus(self.sigma_layer[i](hidden))
mu[i,:,:] = mu_
sigma[i,:,:] = sigma_
outputs[i,:,:] = torch.normal(mu_, sigma_).to(self.device)
# Prepare the next input
if is_training:
next_cell_input = torch.cat((input, hidden, target[i, :, :]), dim=1).unsqueeze(dim=1)
else:
next_cell_input = torch.cat((input, hidden, outputs[i, :, :]), dim=1).unsqueeze(dim=1)
# Concatenate next input and
return outputs, mu, sigma
class ForecastNetConvModel2(nn.Module):
"""
Class for the convolutional hidden cell version of the model
"""
def __init__(self, input_dim, hidden_dim, output_dim, in_seq_length, out_seq_length, device):
"""
Constructor
:param input_dim: Dimension of the inputs
:param hidden_dim: Number of hidden units
:param output_dim: Dimension of the outputs
:param in_seq_length: Length of the input sequence
:param out_seq_length: Length of the output sequence
:param device: The device on which compuations are perfomed.
"""
super(ForecastNetConvModel2, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.in_seq_length = in_seq_length
self.out_seq_length = out_seq_length
self.device = device
self.conv_layer1 = nn.ModuleList([nn.Conv1d(in_channels=1, out_channels=hidden_dim, kernel_size=5, padding=2) for i in range(out_seq_length)])
self.conv_layer2 = nn.ModuleList([nn.Conv1d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=3, padding=1) for i in range(out_seq_length)])
flatten_layer = [nn.Linear(hidden_dim * (input_dim * in_seq_length), hidden_dim)]
for i in range(out_seq_length - 1):
flatten_layer.append(nn.Linear(hidden_dim * (input_dim * in_seq_length + hidden_dim + output_dim), hidden_dim))
self.flatten_layer = nn.ModuleList(flatten_layer)
self.output_layer = nn.ModuleList([nn.Linear(hidden_dim, output_dim) for i in range(out_seq_length)])
# # Convolutional Layers with Pooling
# self.conv_layer1 = nn.ModuleList([nn.Conv1d(in_channels=1, out_channels=hidden_dim, kernel_size=5, padding=2) for i in range(out_seq_length)])
# self.pool_layer1 = nn.ModuleList([nn.AvgPool1d(kernel_size=2, padding=0) for i in range(out_seq_length)])
# self.conv_layer2 = nn.ModuleList([nn.Conv1d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=3, padding=1) for i in range(out_seq_length)])
# self.pool_layer2 = nn.ModuleList([nn.AvgPool1d(kernel_size=2, padding=0) for i in range(out_seq_length) for i in range(out_seq_length)])
# flatten_layer = [nn.Linear(hidden_dim//4 * (input_dim * in_seq_length), hidden_dim)]
# for i in range(out_seq_length - 1):
# flatten_layer.append(nn.Linear(hidden_dim * ((input_dim * in_seq_length + hidden_dim + output_dim) // 4), hidden_dim))
# self.flatten_layer = nn.ModuleList(flatten_layer)
# self.output_layer = nn.ModuleList([nn.Linear(hidden_dim, output_dim) for i in range(out_seq_length)])
def forward(self, input, target, is_training=False):
"""
Forward propagation of the convolutional ForecastNet model
:param input: Input data in the form [input_seq_length, batch_size, input_dim]
:param target: Target data in the form [output_seq_length, batch_size, output_dim]
:param is_training: If true, use target data for training, else use the previous output.
:return: outputs: Forecast outputs in the form [decoder_seq_length, batch_size, input_dim]
"""
# Initialise outputs
outputs = torch.zeros((self.out_seq_length, input.shape[0], self.output_dim)).to(self.device)
# First input
next_cell_input = input.unsqueeze(dim=1)
# Propagate through network
for i in range(self.out_seq_length):
# Propagate through the cell
hidden = F.relu(self.conv_layer1[i](next_cell_input))
# hidden = self.pool_layer1[i](hidden)
hidden = F.relu(self.conv_layer2[i](hidden))
# hidden = self.pool_layer2[i](hidden)
hidden = hidden.reshape((input.shape[0], -1))
hidden = F.relu(self.flatten_layer[i](hidden))
# Calculate output
output = self.output_layer[i](hidden)
outputs[i,:,:] = output
# Prepare the next input
if is_training:
next_cell_input = torch.cat((input, hidden, target[i, :, :]), dim=1).unsqueeze(dim=1)
else:
next_cell_input = torch.cat((input, hidden, outputs[i, :, :]), dim=1).unsqueeze(dim=1)
# Concatenate next input and
return outputs
| 56.418079
| 161
| 0.676948
|
f1f666c1c27e5dc1c4ce346ace673bf99e4051b6
| 358
|
py
|
Python
|
src/344-reverse-string.py
|
sahilrider/LeetCode-Solutions
|
9cac844c27b5dbf37a70c2981a09cd92457f7ff1
|
[
"MIT"
] | 2
|
2020-03-06T11:44:25.000Z
|
2020-03-13T20:07:48.000Z
|
src/344-reverse-string.py
|
sahilrider/LeetCode-Solutions
|
9cac844c27b5dbf37a70c2981a09cd92457f7ff1
|
[
"MIT"
] | null | null | null |
src/344-reverse-string.py
|
sahilrider/LeetCode-Solutions
|
9cac844c27b5dbf37a70c2981a09cd92457f7ff1
|
[
"MIT"
] | null | null | null |
'''https://leetcode.com/problems/reverse-string/'''
class Solution:
def reverseString(self, s: List[str]) -> None:
"""
Do not return anything, modify s in-place instead.
"""
def helper(l, r):
if l<r:
s[l], s[r] = s[r], s[l]
helper(l+1, r-1)
helper(0, len(s)-1)
| 27.538462
| 58
| 0.46648
|
fd70ec0925eb330593e16636f67c4394b13ecb77
| 197
|
py
|
Python
|
py-scripts/utf-clean.py
|
Inverseit/dotfiles
|
0a8d6abcc67c0f4db9913b3915635e99c8a0a52c
|
[
"BSL-1.0"
] | 1
|
2021-12-12T10:07:18.000Z
|
2021-12-12T10:07:18.000Z
|
py-scripts/utf-clean.py
|
Inverseit/dotfiles
|
0a8d6abcc67c0f4db9913b3915635e99c8a0a52c
|
[
"BSL-1.0"
] | null | null | null |
py-scripts/utf-clean.py
|
Inverseit/dotfiles
|
0a8d6abcc67c0f4db9913b3915635e99c8a0a52c
|
[
"BSL-1.0"
] | null | null | null |
#!/usr/bin/python3
import sys
text = ''
for line in sys.stdin:
line = line.replace(r'“', r'"')
line = line.replace(r'”', r'"')
line = line.replace(r'’', r"'")
text += line
print(text.strip())
| 17.909091
| 32
| 0.588832
|
55b72d7954c0b2fba21ac6e80edcf9a4afc6194d
| 13,456
|
py
|
Python
|
inverse_warp.py
|
WestCityInstitute/DeepSFM
|
393ad8f39c5a305f3b351af497dc510697ee931e
|
[
"BSD-3-Clause"
] | 235
|
2020-08-14T13:30:39.000Z
|
2022-03-23T09:56:12.000Z
|
inverse_warp.py
|
WestCityInstitute/DeepSFM
|
393ad8f39c5a305f3b351af497dc510697ee931e
|
[
"BSD-3-Clause"
] | 12
|
2020-08-26T13:09:59.000Z
|
2022-03-22T04:41:46.000Z
|
inverse_warp.py
|
WestCityInstitute/DeepSFM
|
393ad8f39c5a305f3b351af497dc510697ee931e
|
[
"BSD-3-Clause"
] | 40
|
2020-08-24T06:29:41.000Z
|
2022-03-13T01:45:40.000Z
|
from __future__ import division
import torch
from torch.autograd import Variable
import torch.nn.functional as F
pixel_coords = None
def set_id_grid(depth):
global pixel_coords
b, h, w = depth.size()
i_range = Variable(torch.arange(0, h).view(1, h, 1).expand(1, h, w)).type_as(depth) # [1, H, W]
j_range = Variable(torch.arange(0, w).view(1, 1, w).expand(1, h, w)).type_as(depth) # [1, H, W]
ones = Variable(torch.ones(1, h, w)).type_as(depth)
pixel_coords = torch.stack((j_range, i_range, ones), dim=1) # [1, 3, H, W]
def check_sizes(input, input_name, expected):
condition = [input.ndimension() == len(expected)]
for i, size in enumerate(expected):
if size.isdigit():
condition.append(input.size(i) == int(size))
assert (all(condition)), "wrong size for {}, expected {}, got {}".format(input_name, 'x'.join(expected),
list(input.size()))
def pixel2cam(depth, intrinsics_inv):
global pixel_coords
"""Transform coordinates in the pixel frame to the camera frame.
Args:
depth: depth maps -- [B, H, W]
intrinsics_inv: intrinsics_inv matrix for each element of batch -- [B, 3, 3]
Returns:
array of (u,v,1) cam coordinates -- [B, 3, H, W]
"""
b, h, w = depth.size()
if (pixel_coords is None) or pixel_coords.size(2) < h:
set_id_grid(depth)
current_pixel_coords = pixel_coords[:, :, :h, :w].expand(b, 3, h, w).contiguous().view(b, 3,
-1).cuda() # [B, 3, H*W]
cam_coords = intrinsics_inv.bmm(current_pixel_coords).view(b, 3, h, w)
return cam_coords * depth.unsqueeze(1)
def cam2pixel(cam_coords, proj_c2p_rot, proj_c2p_tr, padding_mode, rounded=False):
"""Transform coordinates in the camera frame to the pixel frame.
Args:
cam_coords: pixel coordinates defined in the first camera coordinates system -- [B, 4, H, W]
proj_c2p_rot: rotation matrix of cameras -- [B, 3, 4]
proj_c2p_tr: translation vectors of cameras -- [B, 3, 1]
Returns:
array of [-1,1] coordinates -- [B, 2, H, W]
"""
b, _, h, w = cam_coords.size()
cam_coords_flat = cam_coords.view(b, 3, -1) # [B, 3, H*W]
if proj_c2p_rot is not None:
pcoords = proj_c2p_rot.bmm(cam_coords_flat)
else:
pcoords = cam_coords_flat
if proj_c2p_tr is not None:
pcoords = pcoords + proj_c2p_tr # [B, 3, H*W]
X = pcoords[:, 0]
Y = pcoords[:, 1]
Z = pcoords[:, 2].clamp(min=1e-3)
if rounded:
X_norm = torch.round(2 * (X / Z)) / (
w - 1) - 1 # Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]
Y_norm = torch.round(2 * (Y / Z)) / (h - 1) - 1 # Idem [B, H*W]
else:
X_norm = 2 * (X / Z) / (
w - 1) - 1 # Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]
Y_norm = 2 * (Y / Z) / (h - 1) - 1 # Idem [B, H*W]
if padding_mode == 'zeros':
X_mask = ((X_norm > 1) + (X_norm < -1)).detach()
X_norm[X_mask] = 2 # make sure that no point in warped image is a combinaison of im and gray
Y_mask = ((Y_norm > 1) + (Y_norm < -1)).detach()
Y_norm[Y_mask] = 2
pixel_coords = torch.stack([X_norm, Y_norm], dim=2) # [B, H*W, 2]
return pixel_coords.view(b, h, w, 2)
def cam2pixel_cost(cam_coords, proj_c2p_rot, proj_c2p_tr, padding_mode):
"""Transform coordinates in the camera frame to the pixel frame.
Args:
cam_coords: [B, 3, H, W]
proj_c2p_rot: rotation -- b * NNN* 3 * 3
proj_c2p_tr: translation -- b * NNN * 3 * 1
Returns:
array of [-1,1] coordinates -- [B, NNN, 2, H, W]
"""
b, _, h, w = cam_coords.size()
n = proj_c2p_rot.shape[1]
cam_coords_flat = cam_coords.view(b, 3, -1) # [B, 3, H*W]
# if proj_c2p_rot is not None:
pcoords = proj_c2p_rot.matmul(cam_coords_flat.view(b, 1, 3, h * w)) # b * NNN * 3 * (h*w)
if proj_c2p_tr is not None:
pcoords = pcoords + proj_c2p_tr # [B, NNN, 3, H*W]
X = pcoords[:, :, 0] # [B, NNN, H*W]
Y = pcoords[:, :, 1]
Z = pcoords[:, :, 2].clamp(min=1e-3)
X_norm = 2 * (X / Z) / (w - 1) - 1 # Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]
Y_norm = 2 * (Y / Z) / (h - 1) - 1 # Idem [B, H*W]
if padding_mode == 'zeros':
X_mask = ((X_norm > 1) + (X_norm < -1)).detach()
X_norm[X_mask] = 2 # make sure that no point in warped image is a combinaison of im and gray
Y_mask = ((Y_norm > 1) + (Y_norm < -1)).detach()
Y_norm[Y_mask] = 2
pixel_coords = torch.stack([X_norm, Y_norm], dim=3) # [B, NNN, H*W, 2]
return pixel_coords.view(b, -1, h, w, 2)
def cam2depth(cam_coords, proj_c2p_rot, proj_c2p_tr):
"""Transform coordinates in the camera frame to the pixel frame.
Args:
cam_coords: pixel coordinates defined in the first camera coordinates system -- [B, 4, H, W]
proj_c2p_rot: rotation matrix of cameras -- [B, 3, 4]
proj_c2p_tr: translation vectors of cameras -- [B, 3, 1]
Returns:
depth -- [B, H, W]
"""
b, _, h, w = cam_coords.size()
cam_coords_flat = cam_coords.view(b, 3, -1) # [B, 3, H*W]
if proj_c2p_rot is not None:
pcoords = proj_c2p_rot.bmm(cam_coords_flat)
else:
pcoords = cam_coords_flat
if proj_c2p_tr is not None:
pcoords = pcoords + proj_c2p_tr # [B, 3, H*W]
z = pcoords[:, 2, :].contiguous()
return z.view(b, h, w)
def cam2depth_cost(cam_coords, proj_c2p_rot, proj_c2p_tr):
"""Transform coordinates in the camera frame to the pixel frame.
Args:
cam_coords: pixel coordinates defined in the first camera coordinates system -- [B, 3, H, W]
proj_c2p_rot: rotation matrix of cameras -- b * nnn* 3 * 3
proj_c2p_tr: translation vectors of cameras -- b * nnn* 3 * 1
Returns:
depth -- [B, nnn, H, W]
"""
b, _, h, w = cam_coords.size()
n = proj_c2p_rot.shape[1]
cam_coords_flat = cam_coords.view(b, 3, -1) # [B, 3, H*W]
# if proj_c2p_rot is not None:
pcoords = proj_c2p_rot.matmul(cam_coords_flat.resize(b, 1, 3, h * w)) # b, nnn, 3, h*w
# else:
# pcoords = cam_coords_flat
if proj_c2p_tr is not None:
pcoords = pcoords + proj_c2p_tr # b, nnn, 3, h*w
z = pcoords[:, :, 2, :].contiguous()
return z.view(b, n, h, w)
def depth_warp(fdepth, depth, pose, intrinsics, intrinsics_inv, padding_mode='zeros'):
"""
warp a target depth to the source image plane.
Args:
fdepth: the source depth (where to sample pixels) -- [B, H, W]
depth: depth map of the target image -- [B, H, W]
pose: 6DoF pose parameters from target to source -- [B, 6]
intrinsics: camera intrinsic matrix -- [B, 3, 3]
intrinsics_inv: inverse of the intrinsic matrix -- [B, 3, 3]
Returns:
target depth warped to the source image plane
"""
check_sizes(depth, 'depth', 'BHW')
check_sizes(pose, 'pose', 'B34')
check_sizes(intrinsics, 'intrinsics', 'B33')
check_sizes(intrinsics_inv, 'intrinsics', 'B33')
assert (intrinsics_inv.size() == intrinsics.size())
batch_size, feat_height, feat_width = depth.size()
cam_coords = pixel2cam(depth, intrinsics_inv)
pose_mat = pose
pose_mat = pose_mat.cuda()
# Get projection matrix for tgt camera frame to source pixel frame
proj_cam_to_src_pixel = intrinsics.bmm(pose_mat) # [B, 3, 4]
src_pixel_coords = cam2pixel(cam_coords, proj_cam_to_src_pixel[:, :, :3], proj_cam_to_src_pixel[:, :, -1:],
padding_mode, rounded=True) # [B,H,W,2]
projected_depth = cam2depth(cam_coords, pose_mat[:, :, :3], pose_mat[:, :, -1:])
# projected_depth = projected_depth.clamp(min=-1e1, max=1e3)
fdepth_expand = fdepth.unsqueeze(1)
fdepth_expand = torch.nn.functional.upsample(fdepth_expand, [feat_height, feat_width], mode='bilinear')
warped_depth = torch.nn.functional.grid_sample(fdepth_expand, src_pixel_coords, mode="nearest",
padding_mode=padding_mode)
warped_depth = warped_depth.view(batch_size, feat_height, feat_width)
# [B, H, W]
projected_depth = projected_depth.clamp(min=1e-3, max=float(torch.max(warped_depth) + 10))
return projected_depth, warped_depth
def depth_warp_cost(fdepth, depth, pose, intrinsics, intrinsics_inv, padding_mode='zeros'):
"""
warp a target depth to the source image plane.
Args:
fdepth: the source depth (where to sample pixels) -- [B, H, W]
depth: depth map of the target image -- [B, H, W]
pose: 6DoF pose parameters from target to source -- b * n * n * n * 3 * 4
intrinsics: camera intrinsic matrix -- [B, 3, 3]
intrinsics_inv: inverse of the intrinsic matrix -- [B, 3, 3]
Returns:
target depth warped to the source image plane
"""
check_sizes(depth, 'depth', 'BHW')
# check_sizes(pose, 'pose', 'BNN34')
check_sizes(intrinsics, 'intrinsics', 'B33')
check_sizes(intrinsics_inv, 'intrinsics', 'B33')
assert (intrinsics_inv.size() == intrinsics.size())
batch_size, feat_height, feat_width = depth.size()
pose = pose.view(batch_size, -1, 3, 4) # [B,NNN, 3, 4]
cost_n = pose.shape[1]
cam_coords = pixel2cam(depth, intrinsics_inv)
pose_mat = pose
pose_mat = pose_mat.cuda()
# Get projection matrix for tgt camera frame to source pixel frame
intrinsics = intrinsics.resize(batch_size, 1, 3, 3)
proj_cam_to_src_pixel = intrinsics.matmul(pose_mat) # b * nnn * 3 * 4
src_pixel_coords = cam2pixel_cost(cam_coords, proj_cam_to_src_pixel[:, :, :, :3],
proj_cam_to_src_pixel[:, :, :, -1:],
padding_mode).view(-1, feat_height, feat_width, 2) # [B,nnn,H,W,2]
projected_depth = cam2depth_cost(cam_coords, pose_mat[:, :, :, :3], pose_mat[:, :, :, -1:]) # b nnn h w
fdepth_expand = fdepth.unsqueeze(1)
fdepth_expand = fdepth_expand.resize(batch_size, 1, feat_height, feat_width).repeat(
1, cost_n, 1, 1).view(-1, 1, feat_height, feat_width)
warped_depth = torch.nn.functional.grid_sample(fdepth_expand, src_pixel_coords, mode='nearest',
padding_mode=padding_mode)
warped_depth = warped_depth.view(-1, 1, feat_height, feat_width)
projected_depth = projected_depth.clamp(min=1e-3, max=float(torch.max(warped_depth) + 10))
return projected_depth.view(-1, 1, cost_n, feat_height, feat_width), \
warped_depth.view(-1, 1, cost_n, feat_height, feat_width) # b *nnn * 1 * h * w
def inverse_warp(feat, depth, pose, intrinsics, intrinsics_inv, padding_mode='zeros'):
"""
Inverse warp a source image to the target image plane.
Args:
feat: the source feature (where to sample pixels) -- [B, CH, H, W]
depth: depth map of the target image -- [B, H, W]
pose: 6DoF pose parameters from target to source -- [B, 6]
intrinsics: camera intrinsic matrix -- [B, 3, 3]
intrinsics_inv: inverse of the intrinsic matrix -- [B, 3, 3]
Returns:
Source image warped to the target image plane
"""
check_sizes(depth, 'depth', 'BHW')
check_sizes(pose, 'pose', 'B34')
check_sizes(intrinsics, 'intrinsics', 'B33')
check_sizes(intrinsics_inv, 'intrinsics', 'B33')
assert (intrinsics_inv.size() == intrinsics.size())
batch_size, _, feat_height, feat_width = feat.size()
cam_coords = pixel2cam(depth, intrinsics_inv)
pose_mat = pose
pose_mat = pose_mat.cuda()
# Get projection matrix for tgt camera frame to source pixel frame
proj_cam_to_src_pixel = intrinsics.bmm(pose_mat) # [B, 3, 4]
src_pixel_coords = cam2pixel(cam_coords, proj_cam_to_src_pixel[:, :, :3], proj_cam_to_src_pixel[:, :, -1:],
padding_mode, rounded=True) # [B,H,W,2]
projected_feat = torch.nn.functional.grid_sample(feat, src_pixel_coords, mode='nearest', padding_mode=padding_mode)
return projected_feat
def inverse_warp_cost(feat, depth, pose, intrinsics, intrinsics_inv, padding_mode='zeros'):
"""
ref -> targets
Args:
feat: b * c * h * w
depth: b * h * w
pose: b * n (* n * n) * 3 * 4
intrinsics: [B, 3, 3]
intrinsics_inv: [B, 3, 3]
"""
check_sizes(depth, 'depth', 'BHW')
check_sizes(intrinsics, 'intrinsics', 'B33')
check_sizes(intrinsics_inv, 'intrinsics', 'B33')
assert (intrinsics_inv.size() == intrinsics.size())
batch_size, channal, feat_height, feat_width = feat.size()
cam_coords = pixel2cam(depth, intrinsics_inv) # [B, 3, H, W]
pose = pose.view(batch_size, -1, 3, 4) # [B,NNN, 3, 4]
cost_n = pose.shape[1]
pose_mat = pose
pose_mat = pose_mat.cuda()
# Get projection matrix for tgt camera frame to source pixel frame
intrinsics = intrinsics.view(batch_size, 1, 3, 3)
proj_cam_to_src_pixel = intrinsics.matmul(pose_mat) # b * NNN * 3 * 4
src_pixel_coords = cam2pixel_cost(cam_coords, proj_cam_to_src_pixel[:, :, :, :3],
proj_cam_to_src_pixel[:, :, :, -1:],
padding_mode) # [B,NNN,H,W,2]
feat = feat.view(batch_size, 1, channal, feat_height, feat_width).repeat(1, cost_n, 1, 1, 1).view(-1, channal,
feat_height,
feat_width)
projected_feat = torch.nn.functional.grid_sample(feat, src_pixel_coords.view(-1, feat_height, feat_width, 2),
padding_mode=padding_mode)
return projected_feat # (bNNN) * c * h * w
| 40.652568
| 116
| 0.627155
|
e299d0de9fb33ed4225d273a57592282ba871f95
| 2,845
|
py
|
Python
|
recipe_modules/context/examples/full.py
|
luci/recipes-py
|
13b77f0e1dc3e0f15f540679262ce966917c15a3
|
[
"Apache-2.0"
] | 23
|
2016-01-20T00:45:26.000Z
|
2022-02-26T04:25:30.000Z
|
recipe_modules/context/examples/full.py
|
luci/recipes-py
|
13b77f0e1dc3e0f15f540679262ce966917c15a3
|
[
"Apache-2.0"
] | 8
|
2016-01-15T19:00:38.000Z
|
2018-03-06T00:15:24.000Z
|
recipe_modules/context/examples/full.py
|
luci/recipes-py
|
13b77f0e1dc3e0f15f540679262ce966917c15a3
|
[
"Apache-2.0"
] | 13
|
2015-09-05T05:52:43.000Z
|
2019-07-08T17:34:27.000Z
|
# Copyright 2017 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
from recipe_engine import recipe_api, config
from PB.go.chromium.org.luci.lucictx.sections import Deadline
PYTHON_VERSION_COMPATIBILITY = 'PY2+3'
DEPS = [
'context',
'path',
'raw_io',
'step',
'time',
]
def RunSteps(api):
api.step('default step', ['bash', '-c', 'echo default!'])
noop_context = {}
with api.context(**noop_context):
# nothing happens! this is exactly the same as above, but this optimization
# is helpful when recipes need to calculate contextual values.
api.step('default step', ['bash', '-c', 'echo default!'])
# can change cwd
api.step('mk subdir', ['mkdir', '-p', 'subdir'])
with api.context(cwd=api.path['start_dir'].join('subdir')):
api.step('subdir step', ['bash', '-c', 'pwd'])
api.step('other subdir step', ['bash', '-c', 'echo hi again!'])
# can set envvars, and path prefix.
pants = api.path['start_dir'].join('pants')
shirt = api.path['start_dir'].join('shirt')
with api.context(env={'FOO': 'bar'}):
api.step('env step', ['bash', '-c', 'echo $FOO'])
with api.context(env_prefixes={'FOO': [pants, shirt]}):
api.step('env step with prefix', ['bash', '-c', 'echo $FOO'])
# Path prefix won't append empty environment variables.
with api.context(env={'FOO': ''}, env_prefixes={'FOO': [pants, shirt]}):
result = api.step('env prefixes with empty value',
['bash', '-c', 'echo $FOO'])
# %-formats are errors (for now). Double-% escape them.
bad_examples = ['%format', '%s']
for example in bad_examples:
try:
with api.context(env={'BAD': example}):
assert False # pragma: no cover
except ValueError:
pass
# this is fine though:
with api.context(env={'FINE': '%%format'}):
pass
# Adjusting deadlines; default is "0" deadline in tests, 30s grace period.
# Tests display timeout==deadline.
# low-level method
now = api.time.time()
with api.context(deadline=Deadline(soft_deadline=now+20, grace_period=30)):
api.step('20 sec deadline', ['bash', '-c', 'echo default!'])
try:
with api.context(deadline=Deadline(soft_deadline=now+30, grace_period=30)):
assert False # pragma: no cover
except ValueError:
# cannot increase grace_period
pass
with api.context(deadline=Deadline(soft_deadline=now+20, grace_period=10)):
api.step('and 10 sec grace_period', ['bash', '-c', 'echo default!'])
try:
with api.context(deadline=Deadline(soft_deadline=now+20, grace_period=30)):
assert False # pragma: no cover
except ValueError:
# cannot increase grace_period
pass
def GenTests(api):
yield api.test('basic')
| 31.611111
| 83
| 0.642882
|
599237c48584ab10b2506ff1ba3651fcd8d2256c
| 2,547
|
py
|
Python
|
payments/stripe/__init__.py
|
jonejone/django-payments
|
5d595d38ef88b4a26d847f35974926bba4a3fcde
|
[
"BSD-3-Clause"
] | null | null | null |
payments/stripe/__init__.py
|
jonejone/django-payments
|
5d595d38ef88b4a26d847f35974926bba4a3fcde
|
[
"BSD-3-Clause"
] | null | null | null |
payments/stripe/__init__.py
|
jonejone/django-payments
|
5d595d38ef88b4a26d847f35974926bba4a3fcde
|
[
"BSD-3-Clause"
] | null | null | null |
import json
from decimal import Decimal
import stripe
from .. import PaymentError
from .. import PaymentStatus
from .. import RedirectNeeded
from ..core import BasicProvider
from .forms import ModalPaymentForm
from .forms import PaymentForm
class StripeProvider(BasicProvider):
"""Provider backend using `Stripe <https://stripe.com/>`_.
This backend does not support fraud detection.
:param secret_key: Secret key assigned by Stripe.
:param public_key: Public key assigned by Stripe.
:param name: A friendly name for your store.
:param image: Your logo.
"""
form_class = ModalPaymentForm
def __init__(self, public_key, secret_key, image='', name='', **kwargs):
stripe.api_key = secret_key
self.secret_key = secret_key
self.public_key = public_key
self.image = image
self.name = name
super().__init__(**kwargs)
def get_form(self, payment, data=None):
if payment.status == PaymentStatus.WAITING:
payment.change_status(PaymentStatus.INPUT)
form = self.form_class(
data=data, payment=payment, provider=self)
if form.is_valid():
form.save()
raise RedirectNeeded(payment.get_success_url())
return form
def capture(self, payment, amount=None):
amount = int((amount or payment.total) * 100)
charge = stripe.Charge.retrieve(payment.transaction_id)
try:
charge.capture(amount=amount)
except stripe.InvalidRequestError:
payment.change_status(PaymentStatus.REFUNDED)
raise PaymentError('Payment already refunded')
payment.attrs.capture = json.dumps(charge)
return Decimal(amount) / 100
def release(self, payment):
charge = stripe.Charge.retrieve(payment.transaction_id)
charge.refund()
payment.attrs.release = json.dumps(charge)
def refund(self, payment, amount=None):
amount = int((amount or payment.total) * 100)
charge = stripe.Charge.retrieve(payment.transaction_id)
charge.refund(amount=amount)
payment.attrs.refund = json.dumps(charge)
return Decimal(amount) / 100
class StripeCardProvider(StripeProvider):
"""Provider backend using `Stripe <https://stripe.com/>`_, form-based.
This backend implements payments using `Stripe <https://stripe.com/>`_ but
the credit card data is collected by your site.
Parameters are the same as :class:`~StripeProvider`.
"""
form_class = PaymentForm
| 32.240506
| 78
| 0.673341
|
ec06c525dc70c70811ab08c4d1c7483c47d62c26
| 1,508
|
py
|
Python
|
network/unmix/test/configuration/testconfiguration.py
|
splitstrument/training
|
03d1edd68f3079dc1fba890eebf16fa90eb900f0
|
[
"MIT"
] | 4
|
2019-04-10T22:20:12.000Z
|
2020-01-27T17:43:48.000Z
|
network/unmix/test/configuration/testconfiguration.py
|
splitstrument/training
|
03d1edd68f3079dc1fba890eebf16fa90eb900f0
|
[
"MIT"
] | 3
|
2019-03-09T12:03:58.000Z
|
2020-01-27T17:42:21.000Z
|
unmix/test/configuration/testconfiguration.py
|
unmix-io/unmix-net
|
873d99da42f80574543c096fcd5b7c8748d2cca0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# coding: utf8
"""
Tests JSON(C) configuration.
"""
__author__ = 'David Flury, Andreas Kaufmann, Raphael Müller'
__email__ = "info@unmix.io"
import os
from unmix.source.configuration import Configuration
def test_configuration_initialize():
current_path = os.path.dirname(__file__)
config_file = os.path.join(os.path.dirname(__file__), 'test.jsonc')
Configuration.initialize(config_file, current_path, create_output=False)
assert Configuration.get("test") == "test-root"
assert Configuration.get("level1.level2.level3") == "test-level3"
assert Configuration.get("level1").level2.level3 == "test-level3"
assert type(Configuration.get("float_implicit")) == float
assert type(Configuration.get("float_explicit")) == float
assert type(Configuration.get("int_implicit")) == int
assert type(Configuration.get("int_explicit")) == int
assert type(Configuration.get("bool_implicit")) == bool
assert type(Configuration.get("bool_explicit")) == bool
test_path = Configuration.get_path("path")
assert os.path.isabs(test_path)
assert test_path.startswith(current_path)
assert Configuration.get("nonexistent", optional=True) is None
assert Configuration.get("nonexistent", optional=True, default=42) == 42
try:
Configuration.get("nonexistent", optional=False)
assert False
except:
assert True
if __name__ == "__main__":
test_configuration_initialize()
print("Test run successful.")
| 33.511111
| 76
| 0.720822
|
bf0d8fb2046a9f00ff8746b0c0eb245a80c2a2a2
| 798
|
py
|
Python
|
zadanie1.2.py
|
StPluto/Test11
|
5ba48d819fc3b0ca34daff14151810d72d7ad052
|
[
"MIT"
] | null | null | null |
zadanie1.2.py
|
StPluto/Test11
|
5ba48d819fc3b0ca34daff14151810d72d7ad052
|
[
"MIT"
] | null | null | null |
zadanie1.2.py
|
StPluto/Test11
|
5ba48d819fc3b0ca34daff14151810d72d7ad052
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- config: utf-8 -*-
from functools import lru_cache
import timeit
@lru_cache
def fib(n):
if n == 0 or n == 1:
return n
else:
return fib(n - 2) + fib(n - 1)
@lru_cache
def factorial(n):
if n == 0:
return 1
elif n == 1:
return 1
else:
return n * factorial(n - 1)
if __name__ == '__main__':
r_fib = fib(30)
r_factorial = factorial(30)
print(f'Результат работы рекурсивной функции чисел Фибоначчи(30) = {r_fib}.')
print(f'Время выполнения: {timeit.timeit("r_fib", setup="from __main__ import r_fib")} секунд.')
print(f'Результат работы рекурсивной функции факториала(30) = {r_factorial}. ')
print(f'Время выполнения: {timeit.timeit("r_factorial", setup="from __main__ import r_factorial")} секунд.')
| 24.181818
| 108
| 0.642857
|
6a335f63c20e7dbca3e2b20c8d580898808bccaf
| 12,997
|
py
|
Python
|
fast_rnnt/python/tests/mutual_information_test.py
|
pkufool/fast_rnnt
|
a283cddfc9864d7d5ada3dea9afffbe53506c1a2
|
[
"Apache-2.0"
] | 52
|
2021-12-01T09:33:12.000Z
|
2022-03-31T07:55:53.000Z
|
fast_rnnt/python/tests/mutual_information_test.py
|
pkufool/fast_rnnt
|
a283cddfc9864d7d5ada3dea9afffbe53506c1a2
|
[
"Apache-2.0"
] | null | null | null |
fast_rnnt/python/tests/mutual_information_test.py
|
pkufool/fast_rnnt
|
a283cddfc9864d7d5ada3dea9afffbe53506c1a2
|
[
"Apache-2.0"
] | 6
|
2021-12-02T08:38:45.000Z
|
2022-01-19T02:22:15.000Z
|
#!/usr/bin/env python3
#
# Copyright 2021 Xiaomi Corporation (authors: Daniel Povey,
# Wei Kang)
#
# See ../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To run this single test, use
#
# ctest --verbose -R mutual_information_test_py
import random
import unittest
import fast_rnnt
import torch
# Caution: this will fail occasionally due to cutoffs not being quite large
# enough. As long as it passes most of the time, it's OK.
class TestMutualInformation(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.devices = [torch.device("cpu")]
if torch.cuda.is_available() and fast_rnnt.with_cuda():
cls.devices.append(torch.device("cuda", 0))
if torch.cuda.device_count() > 1:
torch.cuda.set_device(1)
cls.devices.append(torch.device("cuda", 1))
cls.dtypes = [torch.float32, torch.float64]
def test_mutual_information_basic(self):
for _iter in range(10):
(B, S, T) = (
random.randint(1, 10),
random.randint(1, 16),
random.randint(1, 500),
)
random_px = random.random() < 0.2
random_py = random.random() < 0.2
random_boundary = random.random() < 0.7
big_px = random.random() < 0.2
big_py = random.random() < 0.2
modified = random.random() < 0.5
if modified and T < S:
T = S + random.randint(0, 30)
for dtype in self.dtypes:
for device in self.devices:
if random_boundary:
def get_boundary_row():
this_S = random.randint(
0, S
) # allow empty sequence
this_T = random.randint(
this_S if modified else 1, T
)
s_begin = random.randint(0, S - this_S)
t_begin = random.randint(0, T - this_T)
s_end = s_begin + this_S
t_end = t_begin + this_T
return [s_begin, t_begin, s_end, t_end]
if device == torch.device("cpu"):
boundary = torch.tensor(
[get_boundary_row() for _ in range(B)],
dtype=torch.int64,
device=device,
)
else:
boundary = boundary.to(device)
else:
# Use default boundary, but either specified directly
# or not.
if random.random() < 0.5:
boundary = (
torch.tensor([0, 0, S, T], dtype=torch.int64)
.unsqueeze(0)
.expand(B, 4)
.to(device)
)
else:
boundary = None
if device == torch.device("cpu"):
if random_px:
# log of an odds ratio
px = torch.randn(
B, S, T + (0 if modified else 1), dtype=dtype
).to(device)
if S > 1 and not random_boundary and not modified:
px[:, :, -1:] = float("-inf")
else:
# log of an odds ratio
px = torch.zeros(
B, S, T + (0 if modified else 1), dtype=dtype
).to(device)
# px and py get exponentiated, and then multiplied
# together up to 32 times (BLOCK_SIZE in the CUDA code),
# so 15 is actually a big number that could lead to
# overflow.
if big_px:
px += 15.0
if random_py:
# log of an odds ratio
py = torch.randn(B, S + 1, T, dtype=dtype).to(
device
)
else:
# log of an odds ratio
py = torch.zeros(B, S + 1, T, dtype=dtype).to(
device
)
if big_py:
py += 15.0
else:
px = px.to(device).detach()
py = py.to(device).detach()
px.requires_grad = True
py.requires_grad = True
m = fast_rnnt.mutual_information_recursion(px, py, boundary)
m2 = fast_rnnt.joint_mutual_information_recursion(
(px,), (py,), boundary
)
m3 = fast_rnnt.joint_mutual_information_recursion(
(px * 0.5, px * 0.5), (py * 0.5, py * 0.5), boundary
)
# it is supposed to be identical only after
# summing over dim 0, corresponding to the
# sequence dim
m3 = m3.sum(dim=0)
assert torch.allclose(m, m2)
assert torch.allclose(m, m3)
# the loop this is in checks that the CPU and CUDA versions
# give the same derivative;
# by randomizing which of m, m2 or m3 we backprop, we also
# ensure that the joint version of the code gives the same
# derivative as the regular version
scale = 3
if random.random() < 0.5:
(m.sum() * scale).backward()
elif random.random() < 0.5:
(m2.sum() * scale).backward()
else:
(m3.sum() * scale).backward()
if device == torch.device("cpu"):
expected_px_grad = px.grad
expected_py_grad = py.grad
expected_m = m
assert torch.allclose(
px.grad,
expected_px_grad.to(device),
atol=1.0e-02,
rtol=1.0e-02,
)
assert torch.allclose(
py.grad,
expected_py_grad.to(device),
atol=1.0e-02,
rtol=1.0e-02,
)
assert torch.allclose(
m, expected_m.to(device), atol=1.0e-02, rtol=1.0e-02
)
def test_mutual_information_deriv(self):
for _iter in range(10):
(B, S, T) = (
random.randint(1, 100),
random.randint(1, 200),
random.randint(1, 200),
)
random_px = random.random() < 0.2
random_py = random.random() < 0.2
random_boundary = random.random() < 0.7
big_px = random.random() < 0.2
big_py = random.random() < 0.2
modified = random.random() < 0.5
if modified and T < S:
T = S + random.randint(0, 30)
for dtype in self.dtypes:
for device in self.devices:
if random_boundary:
def get_boundary_row():
this_S = random.randint(1, S)
this_T = random.randint(
this_S if modified else 1, T
)
s_begin = random.randint(0, S - this_S)
t_begin = random.randint(0, T - this_T)
s_end = s_begin + this_S
t_end = t_begin + this_T
return [s_begin, t_begin, s_end, t_end]
if device == torch.device("cpu"):
boundary = torch.tensor(
[get_boundary_row() for _ in range(B)],
dtype=torch.int64,
device=device,
)
else:
boundary = boundary.to(device)
else:
# Use default boundary, but either specified directly
# or not.
if random.random() < 0.5:
boundary = (
torch.tensor([0, 0, S, T], dtype=torch.int64)
.unsqueeze(0)
.expand(B, 4)
.to(device)
)
else:
boundary = None
T1 = T + (0 if modified else 1)
if device == torch.device("cpu"):
if random_px:
# log of an odds ratio
px = torch.randn(B, S, T1, dtype=dtype).to(device)
else:
# log of an odds ratio
px = torch.zeros(B, S, T1, dtype=dtype).to(device)
# px and py get exponentiated, and then multiplied
# together up to 32 times (BLOCK_SIZE in the CUDA code),
# so 15 is actually a big number that could lead to
# overflow.
if big_px:
px += 15.0
if random_py:
# log of an odds ratio
py = torch.randn(B, S + 1, T, dtype=dtype).to(
device
)
else:
# log of an odds ratio
py = torch.zeros(B, S + 1, T, dtype=dtype).to(
device
)
if big_py:
py += 15.0
else:
px = px.to(device).detach()
py = py.to(device).detach()
px.requires_grad = True
py.requires_grad = True
m = fast_rnnt.mutual_information_recursion(px, py, boundary)
m_grad = torch.randn(B, dtype=dtype, device=device)
m.backward(gradient=m_grad)
delta = 1.0e-04
delta_px = delta * torch.randn_like(px)
m2 = fast_rnnt.mutual_information_recursion(
px + delta_px, py, boundary
)
delta_m = m2 - m
observed_delta = (delta_m * m_grad).sum().to("cpu")
predicted_delta = (delta_px * px.grad).sum().to("cpu")
atol = 1.0e-02 if dtype == torch.float32 else 1.0e-04
rtol = 1.0e-02 if dtype == torch.float32 else 1.0e-04
assert torch.allclose(
observed_delta, predicted_delta, atol=atol, rtol=rtol
)
delta_py = delta * torch.randn_like(py)
m2 = fast_rnnt.mutual_information_recursion(
px, py + delta_py, boundary
)
delta_m = m2 - m
observed_delta = (delta_m * m_grad).sum().to("cpu")
predicted_delta = (delta_py * py.grad).sum().to("cpu")
assert torch.allclose(
observed_delta, predicted_delta, atol=atol, rtol=rtol
)
if __name__ == "__main__":
unittest.main()
| 41.790997
| 80
| 0.407171
|
0454e15e84ebd6d2e8a6c5fca70017f2498a3852
| 930
|
py
|
Python
|
sina/sina/utils/dateTimeUtils.py
|
judypol/pytonStudy
|
11a6982d36e39e9653eccdf5a2ad41cf0fba6c38
|
[
"MIT"
] | null | null | null |
sina/sina/utils/dateTimeUtils.py
|
judypol/pytonStudy
|
11a6982d36e39e9653eccdf5a2ad41cf0fba6c38
|
[
"MIT"
] | null | null | null |
sina/sina/utils/dateTimeUtils.py
|
judypol/pytonStudy
|
11a6982d36e39e9653eccdf5a2ad41cf0fba6c38
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import datetime
class DateTimeUtils:
@staticmethod
def getNow():
return datetime.datetime.now()
@staticmethod
def getStartTime():
nowTime = datetime.datetime.now()
return nowTime.strftime('%Y%m%d%H%M') + '00'
@staticmethod
def getContractList():
contractList = []
nowDate = datetime.datetime.today()
contractList.append(nowDate.strftime('%y%m'))
nowYear = nowDate.year
nowMonth = nowDate.month
for num in range(1, 13):
month = nowMonth + num
if month > 12:
month = month - 12
year = nowYear + 1
else:
month = month
year = nowYear
tmpDate = datetime.datetime(year=year, month=month, day=1)
contractList.append(tmpDate.strftime('%y%m'))
return contractList
| 26.571429
| 70
| 0.550538
|
ce49d5a5990ecff97a3c279866da7abf56bbb1a5
| 725
|
py
|
Python
|
locale/pot/api/core/pointsets-6.py
|
tkoyama010/pyvista-doc-translations
|
23bb813387b7f8bfe17e86c2244d5dd2243990db
|
[
"MIT"
] | 4
|
2020-08-07T08:19:19.000Z
|
2020-12-04T09:51:11.000Z
|
locale/pot/api/core/pointsets-6.py
|
tkoyama010/pyvista-doc-translations
|
23bb813387b7f8bfe17e86c2244d5dd2243990db
|
[
"MIT"
] | 19
|
2020-08-06T00:24:30.000Z
|
2022-03-30T19:22:24.000Z
|
locale/pot/api/core/pointsets-6.py
|
tkoyama010/pyvista-doc-translations
|
23bb813387b7f8bfe17e86c2244d5dd2243990db
|
[
"MIT"
] | 1
|
2021-03-09T07:50:40.000Z
|
2021-03-09T07:50:40.000Z
|
# Load module and example file
import pyvista as pv
from pyvista import examples
# Load example beam file
grid = pv.UnstructuredGrid(examples.hexbeamfile)
# Create plotting class and add the unstructured grid
plotter = pv.Plotter()
plotter.add_mesh(grid, show_edges=True, color='tan')
# Add labels to points on the yz plane (where x == 0)
points = grid.points
mask = points[:, 0] == 0
plotter.add_point_labels(points[mask], points[mask].tolist())
plotter.camera_position = [
(-1.4643015810492384, 1.5603923627830638, 3.16318236536270),
(0.05268120500967251, 0.639442034364944, 1.204095304165153),
(0.2364061044392675, 0.9369426029156169, -0.25739213784721)]
plotter.show()
| 32.954545
| 76
| 0.718621
|
cc134d24f471e628e8d04282eb4822fa9f1f0dae
| 4,712
|
py
|
Python
|
sorting.py
|
JustGk/fds
|
3ea5017df8f51d7adf4af1ad07dcf5e4ea55fef9
|
[
"Apache-2.0"
] | null | null | null |
sorting.py
|
JustGk/fds
|
3ea5017df8f51d7adf4af1ad07dcf5e4ea55fef9
|
[
"Apache-2.0"
] | null | null | null |
sorting.py
|
JustGk/fds
|
3ea5017df8f51d7adf4af1ad07dcf5e4ea55fef9
|
[
"Apache-2.0"
] | null | null | null |
def bubble(arr):
print("SORTED USING BUBBLE SORT METHOD")
print("ORIGINAL ARRAY :", arr)
for i in range(len(arr)-1):
for j in range(0,len(arr)-1):
if arr[j]<arr[j+1]:
arr[j],arr[j+1]=arr[j+1],arr[j]
print("---------------------------------------------")
print("SORTED IN DESCENDING ORDER : ",end="")
print(arr)
for i in range(0,len(arr)):
for j in range(0,len(arr)-1):
if arr[j]>arr[j+1]:
arr[j],arr[j+1]=arr[j+1],arr[j]
print("---------------------------------------------")
print("SORTED IN ASCENDING ORDER : ",end="")
print(arr)
print("---------------------------------------------")
# bubble(b)
def selection(arr):
pos=0
print("SORTED USING SELECTION SORT METHOD")
print("ORIGINAL ARRAY :", arr)
for i in range(len(arr)-1):
mn=arr[i]
for j in range(i+1,len(arr)):
if arr[j]>mn:
mn=arr[j]
pos=j
arr[i],arr[pos]=arr[pos],arr[i]
print("---------------------------------------------")
print("SORTED IN DESCENDING ORDER: ",end="")
print(arr)
for i in range(len(arr)-1):
mx=arr[i]
for j in range(i+1,len(arr)):
if arr[j]<mx:
mx=arr[j]
pos=j
arr[i],arr[pos]=arr[pos],arr[i]
print("---------------------------------------------")
print("SORTED IN ASCENDING ORDER: ", end="")
print(arr)
print("---------------------------------------------")
def insertion(arr):
print("SORTED USING INSERTION SORT METHOD")
print("ORIGINAL ARRAY :", arr)
for i in range(1,len(arr)):
temp=arr[i]
j=i-1
while j>=0 and arr[j]<temp:
arr[j+1]=arr[j]
j-=1
arr[j+1]=temp
print("---------------------------------------------")
print("SORTED IN DESCENDING ORDER: ", end="")
print(arr)
for i in range(1,len(arr)):
temp=arr[i]
j=i-1
while j>=0 and arr[j]>temp:
arr[j+1]=arr[j]
j-=1
arr[j+1]=temp
print("---------------------------------------------")
print("SORTED IN ASCENDING ORDER: ", end="")
print(arr)
print("---------------------------------------------")
# Python program for implementation of Quicksort Sort
# This function takes last element as pivot, places
# the pivot element at its correct position in sorted
# array, and places all smaller (smaller than pivot)
# to left of pivot and all greater elements to right
# of pivot
def partition(arr, low, high):
i = (low-1) # index of smaller element
pivot = arr[high] # pivot
for j in range(low, high):
# If current element is smaller than or
# equal to pivot
if arr[j] <= pivot:
# increment index of smaller element
i = i+1
arr[i], arr[j] = arr[j], arr[i]
arr[i+1], arr[high] = arr[high], arr[i+1]
return (i+1)
# The main function that implements QuickSort
# arr[] --> Array to be sorted,
# low --> Starting index,
# high --> Ending index
# Function to do Quick sort
def quickSort(arr, low, high):
if len(arr) == 1:
return arr
if low < high:
# pi is partitioning index, arr[p] is now
# at right place
pi = partition(arr, low, high)
# Separately sort elements before
# partition and after partition
quickSort(arr, low, pi-1)
quickSort(arr, pi+1, high)
# This code is contributed by Mohit Kumra
#This code in improved by https://github.com/anushkrishnav
if __name__ == '__main__':
arr=[]
def creat():
global arr
arr=[]
n=int(input("ENTER SIZE OF ARRAY:"))
print("ENTER ELEMENTS IN ARRAY")
for i in range(n):
ele=int(input())
arr.append(ele)
while True:
print("--------------------OPITIONS--------------------")
print("FOR BUBBLE SORT TYPE 1")
print("FOR SELECTION SORT TYPE 2")
print("FOR INSERTION SORT TYPE 3")
print("FOR QUICK SORT TYPE 4")
choice=int(input("ENTER YOUR CHOICE: "))
if choice==1:
creat()
bubble(arr)
elif choice==2:
creat()
selection(arr)
elif choice==3:
creat()
insertion(arr)
elif choice==4:
creat()
quickSort(arr,0,len(arr)-1)
print("Sorted array is:")
for i in range(len(arr)):
print("%d" % arr[i],end=" ")
print("\n\n")
else:
break
| 29.08642
| 66
| 0.467105
|
43a07c00dddda83c4ad6b672555374622ffd9cc8
| 15,688
|
py
|
Python
|
numericalunits.py
|
sbyrnes321/numericalunits
|
b66726f681e519d07dea4babe182ba1d96260524
|
[
"MIT"
] | 86
|
2015-02-13T10:50:21.000Z
|
2022-03-10T05:56:01.000Z
|
numericalunits.py
|
sbyrnes321/numericalunits
|
b66726f681e519d07dea4babe182ba1d96260524
|
[
"MIT"
] | 15
|
2017-06-23T20:01:22.000Z
|
2022-01-30T19:23:17.000Z
|
numericalunits.py
|
sbyrnes321/numericalunits
|
b66726f681e519d07dea4babe182ba1d96260524
|
[
"MIT"
] | 14
|
2015-02-15T12:51:29.000Z
|
2021-12-22T09:15:36.000Z
|
# -*- coding: utf-8 -*-
"""
For information and usage see README, or http://pypi.python.org/pypi/numericalunits
"""
# Copyright (C) 2012-2020 Steven J. Byrnes
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from math import pi
__version__ = 1.25
########## Set all variables, to help introspection libraries ################
# This part is functionally pointless, it only helps IDE autocompletion
# / introspection libraries know that these variables exist. The actual
# values are set below, using the "global" keyword inside functions.
m = kg = s = C = K = 0.
cm = mm = um = nm = pm = fm = km = angstrom = Å = lightyear = \
astro_unit = pc = kpc = Mpc = Gpc = inch = foot = mile = thou = 0.
L = mL = uL = nL = pL = fL = aL = kL = ML = GL = 0.
ms = us = ns = ps = fs = minute = hour = day = week = year = 0.
Hz = mHz = kHz = MHz = GHz = THz = PHz = rtHz = rpm = 0.
Hz·2π = mHz·2π = kHz·2π = MHz·2π = GHz·2π = THz·2π = PHz·2π = rpm·2π = 0.
g = mg = ug = ng = pg = fg = tonne = amu = Da = kDa = lbm = 0.
J = mJ = uJ = nJ = pJ = fJ = kJ = MJ = GJ = erg = eV = meV = keV = MeV = GeV = \
TeV = btu = smallcal = kcal = Wh = kWh = 0.
NA = mol = mmol = umol = nmol = pmol = fmol = M = mM = uM = nM = pM = fM = 0.
N = mN = uN = nN = pN = fN = kN = MN = GN = dyn = lbf = 0.
Pa = hPa = kPa = MPa = GPa = bar = mbar = cbar = dbar = kbar = Mbar = atm = \
torr = mtorr = psi = 0.
W = mW = uW = nW = pW = kW = MW = GW = TW = horsepower_imperial = \
horsepower_metric = 0.
Gal = mGal = uGal = eotvos = 0.
degFinterval = degCinterval = mK = uK = nK = pK = 0.
mC = uC = nC = Ah = mAh = 0.
A = mA = uA = nA = pA = fA = 0.
V = mV = uV = nV = kV = MV = GV = TV = 0.
ohm = mohm = kohm = Mohm = Gohm = Ω = mΩ = kΩ = MΩ = GΩ = S = mS = uS = nS = 0.
T = mT = uT = nT = G = mG = uG = kG = Oe = Wb = 0.
F = uF = nF = pF = fF = aF = H = mH = uH = nH = 0.
c0 = mu0 = μ0 = eps0 = ε0 = Z0 = hPlanck = hbar = ħ = kB = GNewton = sigmaSB = \
σSB = alphaFS = αFS = 0.
Rgas = e = uBohr = uNuc = aBohr = me = mp = mn = Rinf = Ry = Hartree = \
ARichardson = Phi0 = KJos = RKlitz = 0.
REarth = g0 = Msolar = MEarth = 0.
########################### Main code #######################################
def reset_units(seed=None):
"""
Set all units to new, self-consistent, floating-point values. See package
documentation for detailed explanation and examples:
http://pypi.python.org/pypi/numericalunits
reset_units() --> units are randomized. This is the suggested use, and is
done automatically the first time the module is imported. So you don't need
to call this function explicitly; just do your calculation, display the
final answer, then repeat in a fresh Python session. If you get the same
answer both times, then your calculations are almost guaranteed to be free of
dimensional-analysis-violating errors.
reset_units('SI') --> Set units so that all values are given in standard SI
units (meters-kilograms-seconds) by default. In this mode, there is no way
to test for dimensional-analysis-violating errors.
reset_units(x) --> If you pass any other argument x, it's used as the seed
for the random number generator.
"""
import random
global m, kg, s, C, K
if seed == 'SI':
m = 1.
kg = 1.
s = 1.
C = 1.
K = 1.
else:
prior_random_state = random.getstate()
if seed is None:
random.seed()
else:
random.seed(seed)
m = 10 ** random.uniform(-2,2) # meter
kg = 10 ** random.uniform(-2,2) # kilogram
s = 10 ** random.uniform(-2,2) # second
C = 10 ** random.uniform(-2,2) # coulomb
K = 10 ** random.uniform(-2,2) # kelvin
# Leave the random generator like I found it, in case something else is
# using it.
random.setstate(prior_random_state)
set_derived_units_and_constants()
def set_derived_units_and_constants():
"""
Assuming that the base units (m, kg, s, C, K) have already been set as
floating-point values, this function sets all other units and constants
to the appropriate, self-consistent values.
"""
# Length
global cm, mm, um, nm, pm, fm, km, angstrom, Å, lightyear, \
astro_unit, pc, kpc, Mpc, Gpc, inch, foot, mile, thou
cm = 1e-2 * m
mm = 1e-3 * m
um = 1e-6 * m
nm = 1e-9 * m
pm = 1e-12 * m
fm = 1e-15 * m
km = 1e3 * m
angstrom = 1e-10 * m
Å = angstrom # easier-to-read alias (see https://sjbyrnes.com/unicode.html )
lightyear = 9460730472580800. * m
astro_unit = 149597870700. * m # astronomical unit
pc = (648000./pi) * astro_unit # parsec
kpc = 1e3 * pc
Mpc = 1e6 * pc
Gpc = 1e9 * pc
inch = 2.54 * cm
foot = 12. * inch
mile = 5280. * foot
thou = 1e-3 * inch # thousandth of an inch; also called mil
# Volume
global L, mL, uL, nL, pL, fL, aL, kL, ML, GL
L = 1e-3 * m**3 # liter
mL = 1e-3 * L
uL = 1e-6 * L
nL = 1e-9 * L
pL = 1e-12 * L
fL = 1e-15 * L
aL = 1e-18 * L
kL = 1e3 * L
ML = 1e6 * L
GL = 1e9 * L
# Time
global ms, us, ns, ps, fs, minute, hour, day, week, year
ms = 1e-3 * s
us = 1e-6 * s
ns = 1e-9 * s
ps = 1e-12 * s
fs = 1e-15 * s
minute = 60. * s
hour = 60. * minute
day = 24. * hour # solar day
week = 7. * day
year = 365.256363004 * day # sidereal year
# Frequency
global Hz, mHz, kHz, MHz, GHz, THz, PHz, rtHz, rpm
Hz = 1./s
mHz = 1e-3 * Hz
kHz = 1e3 * Hz
MHz = 1e6 * Hz
GHz = 1e9 * Hz
THz = 1e12 * Hz
PHz = 1e15 * Hz
rtHz = Hz**0.5 # "root Hertz"
rpm = 1/minute # revolutions per minute
# Angular frequency
# Example: ω = 3 * kHz·2π means that ω is the angular frequency
# corresponding to a rotation whose *ordinary* frequency is 3 kHz.
global Hz·2π, mHz·2π, kHz·2π, MHz·2π, GHz·2π, THz·2π, PHz·2π, rpm·2π
Hz·2π = Hz * 2*pi
mHz·2π = mHz * 2*pi
kHz·2π = kHz * 2*pi
MHz·2π = MHz * 2*pi
GHz·2π = GHz * 2*pi
THz·2π = THz * 2*pi
PHz·2π = PHz * 2*pi
rpm·2π = rpm * 2*pi
# Mass
global g, mg, ug, ng, pg, fg, tonne, amu, Da, kDa, lbm
g = 1e-3 * kg
mg = 1e-3 * g
ug = 1e-6 * g
ng = 1e-9 * g
pg = 1e-12 * g
fg = 1e-15 * g
tonne = 1e3 * kg
amu = 1.66053906660e-27 * kg # atomic mass unit
Da = amu # Dalton
kDa = 1e3 * Da
lbm = 0.45359237 * kg # pound mass (international avoirdupois pound)
# Energy
global J, mJ, uJ, nJ, pJ, fJ, kJ, MJ, GJ, erg, eV, meV, keV, MeV, GeV, \
TeV, btu, smallcal, kcal, Wh, kWh
J = (kg * m**2)/s**2
mJ = 1e-3 * J
uJ = 1e-6 * J
nJ = 1e-9 * J
pJ = 1e-12 * J
fJ = 1e-15 * J
kJ = 1e3 * J
MJ = 1e6 * J
GJ = 1e9 * J
erg = 1e-7 * J
eV = 1.602176634e-19 * J
meV = 1e-3 * eV
keV = 1e3 * eV
MeV = 1e6 * eV
GeV = 1e9 * eV
TeV = 1e12 * eV
btu = 1055.06 * J # British thermal unit
smallcal = 4.184 * J # small calorie ("gram calorie")
kcal = 4184. * J # kilocalorie ("large Calorie", "dietary Calorie")
Wh = 3600. * J # watt-hour
kWh = 1e3 * Wh # kilowatt-hour
# Moles, concentration / molarity
global NA, mol, mmol, umol, nmol, pmol, fmol, M, mM, uM, nM, pM, fM
NA = 6.02214076e23 # Avogadro's number
mol = NA #1 mole (see README)
mmol = 1e-3 * mol
umol = 1e-6 * mol
nmol = 1e-9 * mol
pmol = 1e-12 * mol
fmol = 1e-15 * mol
M = mol/L # molar
mM = 1e-3 * M
uM = 1e-6 * M
nM = 1e-9 * M
pM = 1e-12 * M
fM = 1e-15 * M
# Force
global N, mN, uN, nN, pN, fN, kN, MN, GN, dyn, lbf
N = (kg * m)/s**2 # newton
mN = 1e-3 * N
uN = 1e-6 * N
nN = 1e-9 * N
pN = 1e-12 * N
fN = 1e-15 * N
kN = 1e3 * N
MN = 1e6 * N
GN = 1e9 * N
dyn = 1e-5 * N # dyne
lbf = lbm * (9.80665 * m/s**2) # pound-force (international avoirdupois pound)
# Pressure
global Pa, hPa, kPa, MPa, GPa, bar, mbar, cbar, dbar, kbar, Mbar, atm, \
torr, mtorr, psi
Pa = N/m**2 # pascal
hPa = 1e2 * Pa # hectopascal
kPa = 1e3 * Pa
MPa = 1e6 * Pa
GPa = 1e9 * Pa
bar = 1e5 * Pa
mbar = 1e-3 * bar
cbar = 1e-2 * bar # centibar
dbar = 0.1 * bar # decibar
kbar = 1e3 * bar
Mbar = 1e6 * bar
atm = 101325. * Pa
torr = (1./760.) * atm
mtorr = 1e-3 * torr
psi = lbf / inch**2
# Power
global W, mW, uW, nW, pW, kW, MW, GW, TW, \
horsepower_imperial, horsepower_metric
W = J/s
mW = 1e-3 * W
uW = 1e-6 * W
nW = 1e-9 * W
pW = 1e-12 * W
kW = 1e3 * W
MW = 1e6 * W
GW = 1e9 * W
TW = 1e12 * W
horsepower_imperial = 33000 * foot * lbf / minute
horsepower_metric = (75 * kg) * (9.80665 * m/s**2) * (1 * m/s)
# Acceleration and related
global Gal, mGal, uGal, eotvos
Gal = 1*cm/s**2
mGal = 1e-3 * Gal
uGal = 1e-6 * Gal
eotvos = 1e-9 / s**2
# Temperature
global degFinterval, degCinterval, mK, uK, nK, pK
degFinterval = (5./9.) * K # A temperature difference in degrees Fahrenheit
degCinterval = K # A temperature difference in degrees Celsius
mK = 1e-3 * K
uK = 1e-6 * K
nK = 1e-9 * K
pK = 1e-12 * K
# Charge
global mC, uC, nC, Ah, mAh
mC = 1e-3 * C
uC = 1e-6 * C
nC = 1e-9 * C
Ah = 3600. * C # amp-hour
mAh = 1e-3 * Ah
# Current
global A, mA, uA, nA, pA, fA
A = C/s
mA = 1e-3 * A
uA = 1e-6 * A
nA = 1e-9 * A
pA = 1e-12 * A
fA = 1e-15 * A
# Voltage
global V, mV, uV, nV, kV, MV, GV, TV
V = J/C
mV = 1e-3 * V
uV = 1e-6 * V
nV = 1e-9 * V
kV = 1e3 * V
MV = 1e6 * V
GV = 1e9 * V
TV = 1e12 * V
# Resistance and conductivity
global ohm, mohm, kohm, Mohm, Gohm, Ω, mΩ, kΩ, MΩ, GΩ, S, mS, uS, nS
ohm = V / A
mohm = 1e-3 * ohm
kohm = 1e3 * ohm
Mohm = 1e6 * ohm
Gohm = 1e9 * ohm
Ω = ohm # easier-to-read alias (see https://sjbyrnes.com/unicode.html )
mΩ = mohm # easier-to-read alias
kΩ = kohm # easier-to-read alias
MΩ = Mohm # easier-to-read alias
GΩ = Gohm # easier-to-read alias
S = 1./ohm # siemens
mS = 1e-3 * S
uS = 1e-6 * S
nS = 1e-9 * S
# Magnetic fields and fluxes
global T, mT, uT, nT, G, mG, uG, kG, Oe, Wb
T = (V * s) / m**2 # tesla
mT = 1e-3 * T
uT = 1e-6 * T
nT = 1e-9 * T
G = 1e-4 * T # gauss
mG = 1e-3 * G
uG = 1e-6 * G
kG = 1e3 * G
Oe = (1000./(4.*pi)) * A/m # oersted
Wb = J/A # weber
# Capacitance and inductance
global F, uF, nF, pF, fF, aF, H, mH, uH, nH
F = C / V # farad
uF = 1e-6 * F
nF = 1e-9 * F
pF = 1e-12 * F
fF = 1e-15 * F
aF = 1e-18 * F
H = m**2 * kg / C**2 # henry
mH = 1e-3 * H
uH = 1e-6 * H
nH = 1e-9 * H
# Constants--general
global c0, mu0, μ0, eps0, ε0, Z0, hPlanck, hbar, ħ, kB, GNewton, sigmaSB, σSB, alphaFS, αFS
c0 = 299792458. * m/s # speed of light in vacuum
mu0 = 1.25663706212e-6 * N/A**2 # magnetic constant, permeability of vacuum
μ0 = mu0 # easier-to-read alias (see https://sjbyrnes.com/unicode.html )
eps0 = 1./(mu0 * c0**2) # electric constant, permittivity of vacuum
ε0 = eps0 # easier-to-read alias
Z0 = mu0 * c0 # vacuum impedance, 377 ohms
hPlanck = 6.62607015e-34 * J*s # planck constant
hbar = hPlanck / (2.*pi) # reduced planck constant
ħ = hbar # easier-to-read alias
kB = 1.380649e-23 * J/K # Boltzmann constant
GNewton = 6.67430e-11 * m**3 / (kg * s**2) # Gravitational constant
sigmaSB = (pi**2 / 60.) * kB**4 / (hbar**3 * c0**2) # Stefan-Boltzmann constant
σSB = sigmaSB # easier-to-read alias
alphaFS = 7.2973525693e-3 # fine-structure constant
αFS = alphaFS # easier-to-read alias
# Constants--chemistry, atomic physics, electrons
global Rgas, e, uBohr, uNuc, aBohr, me, mp, mn, Rinf, Ry, Hartree, \
ARichardson, Phi0, KJos, RKlitz
Rgas = kB # ideal gas constant (see README)
e = 1.602176634e-19 * C # charge of proton
uBohr = 9.2740100783e-24 * J/T # Bohr magneton
uNuc = 5.0507837461e-27 * J/T # nuclear magneton
aBohr = 5.29177210903e-11 * m # Bohr radius
me = 9.1093837015e-31 * kg # electron mass
mp = 1.67262192369e-27 * kg # proton mass
mn = 1.67492749804e-27 * kg # neutron mass
Rinf = 10973731.568160 / m # Rydberg constant
Ry = 2.1798723611035e-18 * J # Rydberg energy, approximately 13.6 eV
Hartree = 2*Ry # Hartree energy, approximately 27.2 eV
ARichardson = (4.*pi*e*me*kB**2) / hPlanck**3 # Richardson constant
Phi0 = hPlanck / (2*e) # magnetic flux quantum
KJos = (2*e) / hPlanck # Josephson constant
RKlitz = hPlanck / e**2 # von Klitzing constant
# Constants--astronomical and properties of earth
global REarth, g0, Msolar, MEarth
REarth = 6371. * km # radius of earth
g0 = 9.80665 * m / s**2 # standard earth gravitational acceleration
Msolar = 1.98847e30 * kg # mass of the sun
MEarth = 5.9722e24 * kg # mass of earth
# Set units randomly when this module is initialized. (Don't worry: If the
# module is imported many times from many places, this command will only
# execute during the first import.)
reset_units()
def nu_eval(expression):
"""
Evaluates a string expression in the context of this module, so that you
can make APIs that don't require their users to import numericalunits;
instead the API user can run a function like load_data(data, unit='km')
For example:
import numericalunits as nu
x = nu.nu_eval('kg * m / s**2')
...is exactly equivalent to...
import numericalunits as nu
x = nu.kg * nu.m / nu.s**2
Input strings are required to be of the form of stereotypical unit
expressions—e.g. addition and subtraction are banned—to catch user errors.
"""
# Based on https://stackoverflow.com/a/9558001
import ast
import operator as op
operators = {ast.Mult: op.mul, ast.Div: op.truediv, ast.Pow: op.pow, ast.USub: op.neg}
def _eval(node):
if isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.BinOp): # <left> <operator> <right>
return operators[type(node.op)](_eval(node.left), _eval(node.right))
elif isinstance(node, ast.UnaryOp): # <operator> <operand> e.g., -1
return operators[type(node.op)](_eval(node.operand))
elif isinstance(node, ast.Name):
return globals()[node.id]
else:
raise TypeError(node)
return _eval(ast.parse(expression, mode='eval').body)
| 33.810345
| 462
| 0.575217
|
a3da33d2e48bf0fe4fc8d31d19bf9d3b3ef19271
| 2,944
|
py
|
Python
|
python/nano/test/test_models_onnx.py
|
EmiCareOfCell44/BigDL
|
6278ee8eed09b5072da53dab3a99530cf5f69ba2
|
[
"Apache-2.0"
] | null | null | null |
python/nano/test/test_models_onnx.py
|
EmiCareOfCell44/BigDL
|
6278ee8eed09b5072da53dab3a99530cf5f69ba2
|
[
"Apache-2.0"
] | null | null | null |
python/nano/test/test_models_onnx.py
|
EmiCareOfCell44/BigDL
|
6278ee8eed09b5072da53dab3a99530cf5f69ba2
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import os
from unittest import TestCase
import torch
from torch import nn
import numpy as np
from test._train_torch_lightning import create_data_loader, data_transform
from bigdl.nano.pytorch.trainer import Trainer
from bigdl.nano.pytorch.vision.models import vision
from test._train_torch_lightning import train_with_linear_top_layer
batch_size = 256
num_workers = 0
data_dir = os.path.join(os.path.dirname(__file__), "data")
class ResNet18(nn.Module):
def __init__(self, num_classes, pretrained=True, include_top=False, freeze=True):
super().__init__()
backbone = vision.resnet18(pretrained=pretrained, include_top=include_top, freeze=freeze)
output_size = backbone.get_output_size()
head = nn.Linear(output_size, num_classes)
self.model = nn.Sequential(backbone, head)
def forward(self, x):
return self.model(x)
class TestModelsVision(TestCase):
def test_trainer_compile_with_onnx(self):
model = ResNet18(10, pretrained=False, include_top=False, freeze=True)
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
trainer = Trainer(max_epochs=1)
pl_model = Trainer.compile(model, loss, optimizer, onnx=True)
train_loader = create_data_loader(data_dir, batch_size,\
num_workers, data_transform, subset=200)
trainer.fit(pl_model, train_loader)
assert pl_model._ortsess_up_to_date is False # ortsess is not up-to-date after training
for x, y in train_loader:
onnx_res = pl_model.inference(x.numpy(), file_path="/tmp/model.onnx") # onnxruntime
pytorch_res = pl_model.inference(x, backend=None).numpy() # native pytorch
assert pl_model._ortsess_up_to_date is True # ortsess is up-to-date while inferencing
np.testing.assert_almost_equal(onnx_res, pytorch_res, decimal=5) # same result
trainer.fit(pl_model, train_loader)
assert pl_model._ortsess_up_to_date is False # ortsess is not up-to-date after training
pl_model.update_ortsess() # update the ortsess with default settings
assert pl_model._ortsess_up_to_date is True # ortsess is up-to-date after updating
trainer.predict(pl_model, train_loader)
if __name__ == '__main__':
pytest.main([__file__])
| 37.74359
| 98
| 0.728601
|
8ff046c862c31fca5202813784435c6d2d7f33cc
| 805
|
py
|
Python
|
tests/analyzers/hashers/entropy.py
|
nflexfo/plaso
|
5da7aa51c39b593773687fdf20a93ba35fc492b4
|
[
"Apache-2.0"
] | 27
|
2019-04-05T12:01:49.000Z
|
2022-02-08T02:26:25.000Z
|
tests/analyzers/hashers/entropy.py
|
nflexfo/plaso
|
5da7aa51c39b593773687fdf20a93ba35fc492b4
|
[
"Apache-2.0"
] | null | null | null |
tests/analyzers/hashers/entropy.py
|
nflexfo/plaso
|
5da7aa51c39b593773687fdf20a93ba35fc492b4
|
[
"Apache-2.0"
] | 8
|
2019-11-28T08:06:34.000Z
|
2020-08-29T13:53:30.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the Entropy hasher."""
from __future__ import unicode_literals
import unittest
from plaso.analyzers.hashers import entropy
from tests.analyzers.hashers import test_lib
class EntropyHasherTest(test_lib.HasherTestCase):
"""Tests the Entropy hasher."""
def testFileHashMatchesEmptyFile(self):
"""Tests that hasher matches the hash of an empty file."""
hasher = entropy.EntropyHasher()
self._AssertTestPathStringDigestMatch(hasher, ['empty_file'], '0.000000')
def testFileHashMatchesKnownFile(self):
"""Tests that hasher matches the hash of a known file."""
hasher = entropy.EntropyHasher()
self._AssertTestPathStringDigestMatch(hasher, ['syslog.zip'], '7.264319')
if __name__ == '__main__':
unittest.main()
| 26.833333
| 77
| 0.736646
|
09833d517bc092ad254a806bfd6283aeeb3a133b
| 929
|
py
|
Python
|
config.py
|
gabrielcoder247/News-Highlight-v2
|
595f4ee9739b173142d1012bdda63526818930e4
|
[
"Unlicense"
] | null | null | null |
config.py
|
gabrielcoder247/News-Highlight-v2
|
595f4ee9739b173142d1012bdda63526818930e4
|
[
"Unlicense"
] | null | null | null |
config.py
|
gabrielcoder247/News-Highlight-v2
|
595f4ee9739b173142d1012bdda63526818930e4
|
[
"Unlicense"
] | null | null | null |
import os # Module allowing our application to interact with the operating system dependent functionality.
class Config:
'''
General configuration parent class
'''
NEWS_SOURCES_BASE_URL = 'https://newsapi.org/v1/sources?language=en&category={}'
NEWS_ARTICLES_BASE_URL = 'https://newsapi.org/v1/articles?source={}&apiKey={}'
NEWS_API_KEY = 'a463576900664bcb87cc440c2cb22785'
class ProdConfig(Config):
'''
Production configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
pass
class DevConfig(Config):
'''
Development configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
DEBUG = True
config_options = {
# A to help us access different configuration option classes.
'development' : DevConfig,
'production' : ProdConfig
}
| 29.03125
| 106
| 0.711518
|
8a818629a734752cff130c637a3ecdea24e2d7bb
| 7,155
|
py
|
Python
|
pysteps/cascade/bandpass_filters.py
|
fox91/pysteps
|
dce3b8e7acffeffedcdddb7dd3cfaa497f758446
|
[
"BSD-3-Clause"
] | 1
|
2021-08-25T03:07:07.000Z
|
2021-08-25T03:07:07.000Z
|
pysteps/cascade/bandpass_filters.py
|
fox91/pysteps
|
dce3b8e7acffeffedcdddb7dd3cfaa497f758446
|
[
"BSD-3-Clause"
] | null | null | null |
pysteps/cascade/bandpass_filters.py
|
fox91/pysteps
|
dce3b8e7acffeffedcdddb7dd3cfaa497f758446
|
[
"BSD-3-Clause"
] | null | null | null |
"""
pysteps.cascade.bandpass_filters
================================
Bandpass filters for separating different spatial scales from two-dimensional
images in the frequency domain.
The methods in this module implement the following interface::
filter_xxx(shape, n, optional arguments)
where shape is the shape of the input field, respectively, and n is the number
of frequency bands to use.
The output of each filter function is a dictionary containing the following
key-value pairs:
.. tabularcolumns:: |p{1.8cm}|L|
+-----------------+-----------------------------------------------------------+
| Key | Value |
+=================+===========================================================+
| weights_1d | 2d array of shape (n, r) containing 1d filter weights for |
| | each frequency band k=1,2,...,n |
+-----------------+-----------------------------------------------------------+
| weights_2d | 3d array of shape (n, M, int(N/2)+1) containing the 2d |
| | filter weights for each frequency band k=1,2,...,n |
+-----------------+-----------------------------------------------------------+
| central_freqs | 1d array of shape n containing the central frequencies of |
| | the filters |
+-----------------+-----------------------------------------------------------+
| shape | the shape of the input field in the spatial domain |
+-----------------+-----------------------------------------------------------+
where r = int(max(N, M)/2)+1
By default, the filter weights are normalized so that for any Fourier
wavenumber they sum to one.
Available filters
-----------------
.. autosummary::
:toctree: ../generated/
filter_uniform
filter_gaussian
"""
import numpy as np
def filter_uniform(shape, n):
"""A dummy filter with one frequency band covering the whole domain. The
weights are set to one.
Parameters
----------
shape : int or tuple
The dimensions (height, width) of the input field. If shape is an int,
the domain is assumed to have square shape.
n : int
Not used. Needed for compatibility with the filter interface.
"""
del n # Unused
result = {}
try:
height, width = shape
except TypeError:
height, width = (shape, shape)
r_max = int(max(width, height) / 2) + 1
result["weights_1d"] = np.ones((1, r_max))
result["weights_2d"] = np.ones((1, height, int(width / 2) + 1))
result["central_freqs"] = None
result["central_wavenumbers"] = None
result["shape"] = shape
return result
def filter_gaussian(shape, n, l_0=3, gauss_scale=0.5, gauss_scale_0=0.5, d=1.0,
normalize=True):
"""Implements a set of Gaussian bandpass filters in logarithmic frequency
scale.
Parameters
----------
shape : int or tuple
The dimensions (height, width) of the input field. If shape is an int,
the domain is assumed to have square shape.
n : int
The number of frequency bands to use. Must be greater than 2.
l_0 : int
Central frequency of the second band (the first band is always centered
at zero).
gauss_scale : float
Optional scaling prameter. Proportional to the standard deviation of
the Gaussian weight functions.
gauss_scale_0 : float
Optional scaling parameter for the Gaussian function corresponding to
the first frequency band.
d : scalar, optional
Sample spacing (inverse of the sampling rate). Defaults to 1.
normalize : bool
If True, normalize the weights so that for any given wavenumber
they sum to one.
Returns
-------
out : dict
A dictionary containing the bandpass filters corresponding to the
specified frequency bands.
References
----------
:cite:`PCH2018`
"""
if n < 3:
raise ValueError("n must be greater than 2")
try:
height, width = shape
except TypeError:
height, width = (shape, shape)
rx = np.s_[:int(width / 2) + 1]
if (height % 2) == 1:
ry = np.s_[-int(height / 2):int(height / 2) + 1]
else:
ry = np.s_[-int(height / 2):int(height / 2)]
y_grid, x_grid = np.ogrid[ry, rx]
dy = int(height / 2) if height % 2 == 0 else int(height / 2) + 1
r_2d = np.roll(np.sqrt(x_grid * x_grid + y_grid * y_grid), dy, axis=0)
max_length = max(width, height)
r_max = int(max_length / 2) + 1
r_1d = np.arange(r_max)
wfs, central_wavenumbers = _gaussweights_1d(max_length, n, l_0=l_0,
gauss_scale=gauss_scale,
gauss_scale_0=gauss_scale_0)
weights_1d = np.empty((n, r_max))
weights_2d = np.empty((n, height, int(width / 2) + 1))
for i, wf in enumerate(wfs):
weights_1d[i, :] = wf(r_1d)
weights_2d[i, :, :] = wf(r_2d)
if normalize:
weights_1d_sum = np.sum(weights_1d, axis=0)
weights_2d_sum = np.sum(weights_2d, axis=0)
for k in range(weights_2d.shape[0]):
weights_1d[k, :] /= weights_1d_sum
weights_2d[k, :, :] /= weights_2d_sum
result = {"weights_1d": weights_1d, "weights_2d": weights_2d}
result["shape"] = shape
central_wavenumbers = np.array(central_wavenumbers)
result["central_wavenumbers"] = central_wavenumbers
# Compute frequencies
central_freqs = 1.0 * central_wavenumbers / max_length
central_freqs[0] = 1.0 / max_length
central_freqs[-1] = 0.5 # Nyquist freq
central_freqs = 1.0 * d * central_freqs
result["central_freqs"] = central_freqs
return result
def _gaussweights_1d(l, n, l_0=3, gauss_scale=0.5, gauss_scale_0=0.5):
e = pow(0.5 * l / l_0, 1.0 / (n - 2))
r = [(l_0 * pow(e, k - 1), l_0 * pow(e, k)) for k in range(1, n - 1)]
def log_e(x):
if len(np.shape(x)) > 0:
res = np.empty(x.shape)
res[x == 0] = 0.0
res[x > 0] = np.log(x[x > 0]) / np.log(e)
else:
if x == 0.0:
res = 0.0
else:
res = np.log(x) / np.log(e)
return res
class GaussFunc:
def __init__(self, c, s):
self.c = c
self.s = s
def __call__(self, x):
x = log_e(x) - self.c
return np.exp(-x ** 2.0 / (2.0 * self.s ** 2.0))
weight_funcs = []
central_wavenumbers = [0.0]
weight_funcs.append(GaussFunc(0.0, gauss_scale_0))
for i, ri in enumerate(r):
rc = log_e(ri[0])
weight_funcs.append(GaussFunc(rc, gauss_scale))
central_wavenumbers.append(ri[0])
gf = GaussFunc(log_e(l / 2), gauss_scale)
def g(x):
res = np.ones(x.shape)
mask = x <= l / 2
res[mask] = gf(x[mask])
return res
weight_funcs.append(g)
central_wavenumbers.append(l / 2)
return weight_funcs, central_wavenumbers
| 30.708155
| 79
| 0.542138
|
cf26e49f7e9a9e4c485d0a60ab11eeb59eeb1674
| 4,150
|
py
|
Python
|
convokit/model/utterance.py
|
rgangela99/Cornell-Conversational-Analysis-Toolkit
|
db0f839000da777e2485ba9d69f7407123746942
|
[
"MIT"
] | null | null | null |
convokit/model/utterance.py
|
rgangela99/Cornell-Conversational-Analysis-Toolkit
|
db0f839000da777e2485ba9d69f7407123746942
|
[
"MIT"
] | null | null | null |
convokit/model/utterance.py
|
rgangela99/Cornell-Conversational-Analysis-Toolkit
|
db0f839000da777e2485ba9d69f7407123746942
|
[
"MIT"
] | null | null | null |
from typing import Dict, Optional
from convokit.util import deprecation
from .corpusComponent import CorpusComponent
from .speaker import Speaker
class Utterance(CorpusComponent):
"""Represents a single utterance in the dataset.
:param id: the unique id of the utterance.
:param speaker: the speaker giving the utterance.
:param conversation_id: the id of the root utterance of the conversation.
:param reply_to: id of the utterance this was a reply to.
:param timestamp: timestamp of the utterance. Can be any
comparable type.
:param text: text of the utterance.
:ivar id: the unique id of the utterance.
:ivar speaker: the speaker giving the utterance.
:ivar conversation_id: the id of the root utterance of the conversation.
:ivar reply_to: id of the utterance this was a reply to.
:ivar timestamp: timestamp of the utterance.
:ivar text: text of the utterance.
:ivar meta: A dictionary-like view object providing read-write access to
utterance-level metadata.
"""
def __init__(self, owner=None, id: Optional[str] = None, speaker: Optional[Speaker] = None,
user: Optional[Speaker] = None, conversation_id: Optional[str] = None,
root: Optional[str] = None, reply_to: Optional[str] = None,
timestamp: Optional[int] = None, text: Optional[str] = None,
meta: Optional[Dict] = None):
super().__init__(obj_type="utterance", owner=owner, id=id, meta=meta)
speaker_ = speaker if speaker is not None else user
self.speaker = speaker_
self.user = speaker # for backwards compatbility
self.conversation_id = conversation_id if conversation_id is not None else root
self._root = self.conversation_id
self.reply_to = reply_to
self.timestamp = timestamp # int(timestamp) if timestamp is not None else timestamp
self.text = text
def _get_root(self):
deprecation("utterance.root", "utterance.conversation_id")
return self.conversation_id
def _set_root(self, value: str):
deprecation("utterance.root", "utterance.conversation_id")
self.conversation_id = value
# self._update_uid()
root = property(_get_root, _set_root)
def get_conversation(self):
"""
Get the Conversation (identified by Utterance.conversation_id) this Utterance belongs to
:return: a Conversation object
"""
return self.owner.get_conversation(self.conversation_id)
def get_speaker(self):
"""
Get the Speaker that made this Utterance.
:return: a Speaker object
"""
return self.speaker
def __hash__(self):
return super().__hash__()
def __eq__(self, other):
if not isinstance(other, Utterance):
return False
try:
return self.id == other.id and self.conversation_id == other.conversation_id and self.reply_to == other.reply_to and \
self.speaker == other.speaker and self.timestamp == other.timestamp and self.text == other.text
except AttributeError: # for backwards compatibility with wikiconv
return self.__dict__ == other.__dict__
def __str__(self):
return "Utterance(id: {}, conversation_id: {}, reply-to: {}, " \
"speaker: {}, timestamp: {}, text: {}, vectors: {}, meta: {})".format(repr(self.id),
self.conversation_id,
self.reply_to,
self.speaker,
self.timestamp,
repr(self.text),
self.vectors,
self.meta)
| 43.684211
| 130
| 0.570361
|
c555f1a3932d39a5b7b8fb7832101409dfdc1de2
| 2,913
|
py
|
Python
|
migrations/versions/65458f3846b3_.py
|
KIbet1998/Minute-Pitch
|
e841011330e5e88017b3d422f8812e4e32c910b8
|
[
"MIT"
] | null | null | null |
migrations/versions/65458f3846b3_.py
|
KIbet1998/Minute-Pitch
|
e841011330e5e88017b3d422f8812e4e32c910b8
|
[
"MIT"
] | null | null | null |
migrations/versions/65458f3846b3_.py
|
KIbet1998/Minute-Pitch
|
e841011330e5e88017b3d422f8812e4e32c910b8
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: 65458f3846b3
Revises:
Create Date: 2021-06-17 10:20:20.336311
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '65458f3846b3'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=255), nullable=False),
sa.Column('email', sa.String(length=255), nullable=False),
sa.Column('secure_password', sa.String(length=255), nullable=False),
sa.Column('bio', sa.String(length=255), nullable=True),
sa.Column('profile_pic_path', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('username')
)
op.create_table('pitches',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=255), nullable=False),
sa.Column('post', sa.Text(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('time', sa.DateTime(), nullable=True),
sa.Column('category', sa.String(length=255), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_pitches_category'), 'pitches', ['category'], unique=False)
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('comment', sa.Text(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('pitch_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['pitch_id'], ['pitches.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('downvotes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('pitch_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['pitch_id'], ['pitches.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('upvotes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('pitch_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['pitch_id'], ['pitches.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('upvotes')
op.drop_table('downvotes')
op.drop_table('comments')
op.drop_index(op.f('ix_pitches_category'), table_name='pitches')
op.drop_table('pitches')
op.drop_table('users')
# ### end Alembic commands ###
| 36.4125
| 87
| 0.661861
|
500e742af64bf6473ad0fbbd33ab4087e53f2250
| 4,018
|
py
|
Python
|
scan.py
|
habx/attachment-scanner
|
4f7b1f8850272d4a4fea8c83380ddc6c637257e9
|
[
"Apache-2.0"
] | null | null | null |
scan.py
|
habx/attachment-scanner
|
4f7b1f8850272d4a4fea8c83380ddc6c637257e9
|
[
"Apache-2.0"
] | 103
|
2018-06-14T18:14:29.000Z
|
2022-03-16T20:53:44.000Z
|
scan.py
|
habx/attachment-scanner
|
4f7b1f8850272d4a4fea8c83380ddc6c637257e9
|
[
"Apache-2.0"
] | null | null | null |
# Upside Travel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto3
import clamav
import copy
import json
import metrics
import urllib
from common import *
from datetime import datetime
ENV = os.getenv("ENV", "")
def event_object(event):
bucket = event['Records'][0]['s3']['bucket']['name']
key = urllib.unquote_plus(event['Records'][0]['s3']['object']['key'].encode('utf8'))
if (not bucket) or (not key):
print("Unable to retrieve object from event.\n%s" % event)
raise Exception("Unable to retrieve object from event.")
return s3.Object(bucket, key)
def download_s3_object(s3_object, local_prefix):
local_path = "%s/%s/%s" % (local_prefix, s3_object.bucket_name, s3_object.key)
create_dir(os.path.dirname(local_path))
s3_object.download_file(local_path)
return local_path
def set_av_metadata(s3_object, result):
content_type = s3_object.content_type
metadata = s3_object.metadata
metadata[AV_STATUS_METADATA] = result
metadata[AV_TIMESTAMP_METADATA] = datetime.utcnow().strftime("%Y/%m/%d %H:%M:%S UTC")
s3_object.copy(
{
'Bucket': s3_object.bucket_name,
'Key': s3_object.key
},
ExtraArgs={
"ContentType": content_type,
"Metadata": metadata,
"MetadataDirective": "REPLACE"
}
)
def set_av_tags(s3_object, result):
curr_tags = s3_client.get_object_tagging(Bucket=s3_object.bucket_name, Key=s3_object.key)["TagSet"]
new_tags = copy.copy(curr_tags)
for tag in curr_tags:
if tag["Key"] in [AV_STATUS_METADATA, AV_TIMESTAMP_METADATA]:
new_tags.remove(tag)
new_tags.append({"Key": AV_STATUS_METADATA, "Value": result})
new_tags.append({"Key": AV_TIMESTAMP_METADATA, "Value": datetime.utcnow().strftime("%Y/%m/%d %H:%M:%S UTC")})
s3_client.put_object_tagging(
Bucket=s3_object.bucket_name,
Key=s3_object.key,
Tagging={"TagSet": new_tags}
)
def sns_scan_results(s3_object, result):
if AV_STATUS_SNS_ARN is None:
return
message = {
"bucket": s3_object.bucket_name,
"key": s3_object.key,
AV_STATUS_METADATA: result,
AV_TIMESTAMP_METADATA: datetime.utcnow().strftime("%Y/%m/%d %H:%M:%S UTC")
}
sns_client = boto3.client("sns")
sns_client.publish(
TargetArn=AV_STATUS_SNS_ARN,
Message=json.dumps({'default': json.dumps(message)}),
MessageStructure="json"
)
def lambda_handler(event, context):
start_time = datetime.utcnow()
print("Script starting at %s\n" %
(start_time.strftime("%Y/%m/%d %H:%M:%S UTC")))
s3_object = event_object(event)
file_path = download_s3_object(s3_object, "/tmp")
clamav.update_defs_from_s3(AV_DEFINITION_S3_BUCKET, AV_DEFINITION_S3_PREFIX)
scan_result = clamav.scan_file(file_path)
print("Scan of s3://%s resulted in %s\n" % (os.path.join(s3_object.bucket_name, s3_object.key), scan_result))
if "AV_UPDATE_METADATA" in os.environ:
set_av_metadata(s3_object, scan_result)
set_av_tags(s3_object, scan_result)
sns_scan_results(s3_object, scan_result)
metrics.send(env=ENV, bucket=s3_object.bucket_name, key=s3_object.key, status=scan_result)
# Delete downloaded file to free up room on re-usable lambda function container
try:
os.remove(file_path)
except OSError:
pass
print("Script finished at %s\n" %
datetime.utcnow().strftime("%Y/%m/%d %H:%M:%S UTC"))
| 35.245614
| 113
| 0.68218
|
b85e606d3a3cc44c8c149452b40f84d80ad7dada
| 4,689
|
py
|
Python
|
SciAnalysis/CurveAnalysis/Protocols.py
|
zhouzhouxpyf/CFN-softbio
|
21e4f4845e7a49c97f4ed2b0aa78a7eb831f6bcc
|
[
"BSD-3-Clause"
] | 13
|
2018-04-17T06:35:20.000Z
|
2021-09-22T08:49:47.000Z
|
SciAnalysis/CurveAnalysis/Protocols.py
|
zhouzhouxpyf/CFN-softbio
|
21e4f4845e7a49c97f4ed2b0aa78a7eb831f6bcc
|
[
"BSD-3-Clause"
] | 14
|
2018-04-18T01:05:57.000Z
|
2020-11-05T21:57:09.000Z
|
SciAnalysis/CurveAnalysis/Protocols.py
|
zhouzhouxpyf/CFN-softbio
|
21e4f4845e7a49c97f4ed2b0aa78a7eb831f6bcc
|
[
"BSD-3-Clause"
] | 9
|
2017-05-26T14:47:38.000Z
|
2021-03-24T02:44:59.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vi: ts=4 sw=4
'''
:mod:`SciAnalysis.CurveAnalysis.Protocols` - 1D curve analysis protocols
================================================
.. module:: SciAnalysis.CurveAnalysis.Protocols
:synopsis: Convenient protocols for data analysis.
.. moduleauthor:: Dr. Kevin G. Yager <kyager@bnl.gov>
Brookhaven National Laboratory
'''
################################################################################
# Data analysis protocols.
################################################################################
# Known Bugs:
# N/A
################################################################################
# TODO:
# Search for "TODO" below.
################################################################################
import os
from .Data import *
from ..tools import *
class ProcessorCurve(Processor):
def load(self, infile, **kwargs):
load_args = {
'skiplines' : 1,
'comment_char' : '#',
'xindex' : 0,
'yindex' : -1,
}
load_args.update(kwargs)
data = DataLine(infile, **load_args)
data.infile = infile
return data
class ProcessorCurveStructure(ProcessorCurve):
def load(self, infile, **kwargs):
load_args = {
'skiplines' : 1,
'comment_char' : '#',
'xindex' : 0,
'yindex' : -1,
}
load_args.update(kwargs)
data = DataLineStructured(infile, **load_args)
data.infile = infile
return data
class plot(Protocol):
def __init__(self, name='plot', **kwargs):
self.name = self.__class__.__name__ if name is None else name
self.default_ext = '.png'
self.run_args = {
'blur' : None,
}
self.run_args.update(kwargs)
@run_default
def run(self, data, output_dir, **run_args):
results = {}
if run_args['blur'] is not None:
data.smooth(run_args['blur'])
outfile = self.get_outfile(data.name, output_dir)
data.plot(outfile)
return results
class structure(Protocol):
def __init__(self, name='structure', **kwargs):
self.name = self.__class__.__name__ if name is None else name
self.default_ext = '.png'
self.run_args = {
'blur' : None,
}
self.run_args.update(kwargs)
@run_default
def run(self, data, output_dir, **run_args):
#output_dir = os.path.join(output_dir, data.name)
#make_dir(output_dir)
results = {}
if run_args['blur'] is not None:
data.smooth(run_args['blur'])
if run_args['verbosity']>=2:
plot = True
else:
plot = False
if True:
# Analyze the data by 'sorting' the curve's y-values
outfile = self.get_outfile('sort_{}'.format(data.name), output_dir)
new_results = data.stats(prepend='stats_')
results.update(new_results)
data_n = DataLineStructuredSort(x=data.x, y=data.y)
new_results = data_n.analyze(outfile, plot=plot, **run_args)
new_results = self.prepend_keys(new_results, 'sort_')
results.update(new_results)
new_results = data_n.stats(prepend='stats_normed_')
results.update(new_results)
if True:
# Analyze the variation in variance
outfile = self.get_outfile('std_{}'.format(data.name), output_dir)
data_n = DataLineStructuredStd(x=data.x, y=data.y)
new_results = data_n.analyze(outfile, plot=plot, **run_args)
new_results = self.prepend_keys(new_results, 'std_')
results.update(new_results)
if True:
# Analyze curve spectrally
outfile = self.get_outfile('fft_{}'.format(data.name), output_dir)
data_n = DataLineStructuredFFT(x=data.x, y=data.y)
new_results = data_n.analyze(outfile, plot=plot, **run_args)
new_results = self.prepend_keys(new_results, 'fft_')
results.update(new_results)
new_results = data_n.stats(prepend='fft_stats_')
results.update(new_results)
return results
| 28.077844
| 80
| 0.487524
|
604b215e12fffa882131fe1c6d327f3da76d23bb
| 692
|
py
|
Python
|
setup.py
|
roylanmartinez/Numlet
|
fb7f8ab294dbc4861f9f264d79da054f7c13ac19
|
[
"MIT"
] | 28
|
2020-02-07T01:02:40.000Z
|
2022-01-25T19:59:33.000Z
|
setup.py
|
roylanmartinez/Numeros-naturales-y-cero-a-letras
|
fb7f8ab294dbc4861f9f264d79da054f7c13ac19
|
[
"MIT"
] | null | null | null |
setup.py
|
roylanmartinez/Numeros-naturales-y-cero-a-letras
|
fb7f8ab294dbc4861f9f264d79da054f7c13ac19
|
[
"MIT"
] | 5
|
2020-02-07T05:41:50.000Z
|
2021-01-07T00:03:21.000Z
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="Paquete-nlt-roylanmartinez",
version="2.0.0",
author="Roylan Martinez Vargas",
author_email="roylanmartinez97@gmail.com",
description="Convierte a letras más de 10^600 números",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/roylanmartinez/Numlet",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 30.086957
| 59
| 0.677746
|
0fda70c253628146ffc32b5e386d879cc558d931
| 6,588
|
py
|
Python
|
dems/setsm/rema_strip_r1.1_2m_rgi.py
|
subond/ww_tvol_study
|
6fbcae251015a7cd49220abbb054914266b3b4a1
|
[
"MIT"
] | 20
|
2021-04-28T18:11:43.000Z
|
2022-03-09T13:15:56.000Z
|
dems/setsm/rema_strip_r1.1_2m_rgi.py
|
subond/ww_tvol_study
|
6fbcae251015a7cd49220abbb054914266b3b4a1
|
[
"MIT"
] | 4
|
2021-04-28T15:51:43.000Z
|
2022-01-02T19:10:25.000Z
|
dems/setsm/rema_strip_r1.1_2m_rgi.py
|
rhugonnet/ww_tvol_study
|
f29fc2fca358aa169f6b7cc790e6b6f9f8b55c6f
|
[
"MIT"
] | 9
|
2021-04-28T17:58:27.000Z
|
2021-12-19T05:51:56.000Z
|
"""
@author: hugonnet
download, extract, reproject and pairwise-coregister all REMA 2m strips intersecting glaciers
"""
import os, sys, shutil
from subprocess import Popen
sys.path.append('/home/echos/hugonnet/code/devel/rh_pygeotools')
import pandas as pd
import multiprocessing as mp
from demstripproducts import REMA_strip_r1_1
from shlib import merged_stderr_stdout, stdout_redirected
import matplotlib
import time
import traceback
matplotlib.use('Agg')
os.environ["OMP_NUM_THREADS"] = "8" # export OMP_NUM_THREADS=4
os.environ["OPENBLAS_NUM_THREADS"] = "8" # export OPENBLAS_NUM_THREADS=4
os.environ["MKL_NUM_THREADS"] = "8" # export MKL_NUM_THREADS=6
os.environ["VECLIB_MAXIMUM_THREADS"] = "8" # export VECLIB_MAXIMUM_THREADS=4
os.environ["NUMEXPR_NUM_THREADS"] = "8" # export NUMEXPR_NUM_THREADS=6
main_dir = '/data/icesat/travail_en_cours/romain/ww_tvol_study/worldwide'
tmp_dir = '/calcul/santo/hugonnet/setsm/'
final_dir = '/data/icesat/travail_en_cours/romain/data/dems/rema_2m/'
arcdem_directory_listing = '/data/icesat/travail_en_cours/romain/ww_tvol_study/worldwide/global/SETSM_directory_listing/REMA_v1-1_directory_listing_2m.csv'
rgi_naming_txt = '/data/icesat/travail_en_cours/romain/ww_tvol_study/worldwide/rgi_neighb_merged_naming_convention.txt'
nb_tasks = 8
# read csv list of tiles with more than 0km2 of ice for each RGI region
text_file = open(rgi_naming_txt, 'r')
rgi_list = text_file.readlines()
tile_list_csv = [os.path.join(main_dir, rgi[:-1].split('rgi60')[0] + 'rgi60', 'cov', 'list_glacierized_tiles_' + rgi[:-1].split('rgi60')[0] + 'rgi60' + '.csv') for rgi in rgi_list]
tiles_per_rgi = []
for list_csv in tile_list_csv:
df = pd.read_csv(list_csv)
tiles = df['Tile_name'].tolist()
tiles_per_rgi.append(tiles)
all_tiles = []
for tiles in tiles_per_rgi:
all_tiles = all_tiles + tiles
#list of REMA tiles: directory listing
with open(arcdem_directory_listing) as f:
list_arcdem_dir= f.readlines()
list_arc_tiles = [os.path.split(arcdem_dir)[-1][:-1].upper() for arcdem_dir in list_arcdem_dir]
list_common_tiles=[]
#loop over all tiles of RGI regions
for tile in all_tiles:
#if ArcticDEM tile is common with tile covering glacier
if tile in list_arc_tiles:
list_common_tiles.append(tile)
list_common_tiles = list(set(list_common_tiles)) # remove duplicates
print('Found '+str(len(list_common_tiles))+' REMA r1.1 2m tiles intersecting glaciers.')
list_mosaic_log = [os.path.join(tmp_dir,mosaic_log) for mosaic_log in os.listdir(tmp_dir) if 'mosaic' in mosaic_log and mosaic_log.endswith('log')]
list_remaining_tiles = list_common_tiles.copy()
list_tiles_to_move = []
for mosaic_log in list_mosaic_log:
log = open(mosaic_log,'r')
lines = log.readlines()
log.close()
if 'Fin' in lines[-1]:
tile_name = os.path.splitext(os.path.basename(mosaic_log))[0].split('_')[1]
list_remaining_tiles.remove(tile_name.upper())
list_tiles_to_move.append(tile_name.lower())
for tile_name in list_tiles_to_move:
processed_dir = os.path.join(tmp_dir,'2m','processed_'+tile_name)
if os.path.exists(processed_dir):
tgt_dir = os.path.join(final_dir,'processed_'+tile_name)
if os.path.exists(tgt_dir):
shutil.rmtree(tgt_dir)
shutil.copytree(processed_dir,tgt_dir)
print('Found '+str(len(list_remaining_tiles))+' REMA r1.1 2m tiles not yet processed.')
def get_process_tile(tmp_dir,tile):
def check_wget(log_file):
chk=0
with open(log_file) as s:
text = s.readlines()
for line in text:
# if 'error' in line or 'Error' in line or 'ERROR' in line:
if ('ERROR 404' not in line and 'robots.txt' not in line) and (
'error' in line or 'Error' in line or 'ERROR' in line or 'fail' in line or 'Fail' in line or 'FAIL' in line):
print(text.index(line))
print(line)
chk=1
return chk==0
#log files
wget_log_file = os.path.join(tmp_dir,'wget_'+tile+'.log')
mosaic_log_file = os.path.join(tmp_dir,'mosaic_'+tile+'.log')
wget_fail = os.path.join(tmp_dir,'wget_fails.log')
#get REMA tile naming
arc_tile_name = tile.lower()
print('Downloading '+arc_tile_name+'... writing to: '+wget_log_file)
check = True
u=0
while check and u<5:
u=u+1
#if download already failed, sleep 10min before trying again
if u>1:
time.sleep(600)
#download strip data
cmd = 'wget -r -N -nH -np -R index.html* --cut-dirs=6 http://data.pgc.umn.edu/elev/dem/setsm/REMA/geocell/v1.0/2m/'+arc_tile_name+'/ -P '+tmp_dir
log = open(wget_log_file,'w')
p = Popen(cmd,stdout=log, stderr=log,shell=True)
p.wait()
log.close()
check = not check_wget(wget_log_file)
#if download keeps failing for some reason, abandon and append to specific log of failed downloads
if u==5:
print('Downloading for '+arc_tile_name+' failed.')
with open(wget_fail,'a') as fail:
fail.write(arc_tile_name+'\n')
#otherwise, process
else:
print('Processing '+arc_tile_name+'... writing to: '+mosaic_log_file)
tmp_dir_out = os.path.join(tmp_dir,'2m','processed_'+arc_tile_name)
if not os.path.exists(tmp_dir_out):
os.mkdir(tmp_dir_out)
else:
shutil.rmtree(tmp_dir_out)
os.mkdir(tmp_dir_out)
with open(mosaic_log_file,'w') as mos:
with stdout_redirected(to=mos), merged_stderr_stdout():
try:
REMA_strip_r1_1(os.path.join(tmp_dir,'2m'),arc_tile_name,tmp_dir_out,mosaic_coreg_segm=True,tgt_EPSG=None,tgt_res=[30,-30],nodata_out=-9999,interp_method='bilinear',geoid=False,rm_tar=True,downsample=True)
except Exception:
print(traceback.format_exc())
def batch_wrapper(arg_dict):
return get_process_tile(**arg_dict)
if nb_tasks==1:
for t in list_remaining_tiles:
get_process_tile(tmp_dir,t)
else:
pool = mp.Pool(nb_tasks)
arg_dict = {'tmp_dir': tmp_dir}
u_args = [{'tile': t} for t in list_remaining_tiles]
for t in u_args:
t.update(arg_dict)
pool.map(batch_wrapper,u_args)
pool.close()
#final sorting
# for rgi in rgi_list:
# if tile in tiles_per_rgi[rgi_list.index(rgi)]:
# dir_out = os.path.join(main_dir,(str(rgi)).zfill(2) + '_rgi60','tile_process',tile,'corr_dem_arcDEM')
# shutil.copytree(tmp_dir_out,dir_out)
#
# shutil.rmtree(tmp_dir_out)
| 38.302326
| 225
| 0.687007
|
7013f081b22a20748d8b78ff2917c84bc6626270
| 14,226
|
py
|
Python
|
sysinv/sysinv/sysinv/sysinv/api/controllers/v1/dns.py
|
albailey/config
|
40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9
|
[
"Apache-2.0"
] | 10
|
2020-02-07T18:57:44.000Z
|
2021-09-11T10:29:34.000Z
|
sysinv/sysinv/sysinv/sysinv/api/controllers/v1/dns.py
|
albailey/config
|
40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9
|
[
"Apache-2.0"
] | 1
|
2021-01-14T12:01:55.000Z
|
2021-01-14T12:01:55.000Z
|
sysinv/sysinv/sysinv/sysinv/api/controllers/v1/dns.py
|
albailey/config
|
40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9
|
[
"Apache-2.0"
] | 10
|
2020-10-13T08:37:46.000Z
|
2022-02-09T00:21:25.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 UnitedStack Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2017 Wind River Systems, Inc.
#
import jsonpatch
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from oslo_log import log
from sysinv._i18n import _
from sysinv.api.controllers.v1 import base
from sysinv.api.controllers.v1 import collection
from sysinv.api.controllers.v1 import link
from sysinv.api.controllers.v1 import types
from sysinv.api.controllers.v1 import utils
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import utils as cutils
from sysinv import objects
from netaddr import IPAddress
from netaddr import AddrFormatError
LOG = log.getLogger(__name__)
class DNSPatchType(types.JsonPatchType):
@staticmethod
def mandatory_attrs():
return []
class DNS(base.APIBase):
"""API representation of DNS configuration.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of
an dns.
"""
uuid = types.uuid
"Unique UUID for this dns"
nameservers = wtypes.text
"Represent the nameservers of the idns. csv list."
action = wtypes.text
"Represent the action on the idns."
forisystemid = int
"The isystemid that this idns belongs to"
isystem_uuid = types.uuid
"The UUID of the system this dns belongs to"
links = [link.Link]
"A list containing a self link and associated dns links"
created_at = wtypes.datetime.datetime
updated_at = wtypes.datetime.datetime
def __init__(self, **kwargs):
self.fields = list(objects.dns.fields.keys())
for k in self.fields:
setattr(self, k, kwargs.get(k))
# 'action' is not part of objects.idns.fields
# (it's an API-only attribute)
self.fields.append('action')
setattr(self, 'action', kwargs.get('action', None))
@classmethod
def convert_with_links(cls, rpc_dns, expand=True):
# fields = ['uuid', 'address'] if not expand else None
# dns = idns.from_rpc_object(rpc_dns, fields)
dns = DNS(**rpc_dns.as_dict())
if not expand:
dns.unset_fields_except(['uuid',
'nameservers',
'isystem_uuid',
'created_at',
'updated_at'])
# never expose the isystem_id attribute
dns.isystem_id = wtypes.Unset
# never expose the isystem_id attribute, allow exposure for now
# dns.forisystemid = wtypes.Unset
dns.links = [link.Link.make_link('self', pecan.request.host_url,
'idnss', dns.uuid),
link.Link.make_link('bookmark',
pecan.request.host_url,
'idnss', dns.uuid,
bookmark=True)
]
return dns
class DNSCollection(collection.Collection):
"""API representation of a collection of dnss."""
idnss = [DNS]
"A list containing dns objects"
def __init__(self, **kwargs):
self._type = 'idnss'
@classmethod
def convert_with_links(cls, rpc_dnss, limit, url=None,
expand=False, **kwargs):
collection = DNSCollection()
collection.idnss = [DNS.convert_with_links(p, expand)
for p in rpc_dnss]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
##############
# UTILS
##############
def _check_dns_data(dns, ip_family):
# Get data
nameservers = dns['nameservers']
idns_nameservers_list = []
dns_nameservers = ""
MAX_S = 3
if 'forisystemid' in dns.keys():
ntp_list = pecan.request.dbapi.intp_get_by_isystem(dns['forisystemid'])
else:
ntp_list = pecan.request.dbapi.intp_get_by_isystem(dns['isystem_uuid'])
if nameservers:
for nameserver in [n.strip() for n in nameservers.split(',')]:
# Semantic check each server as IP
try:
idns_nameservers_list.append(str(IPAddress(nameserver)))
if ip_family and IPAddress(nameserver).version != ip_family:
raise wsme.exc.ClientSideError(_(
"IP version mismatch: was expecting "
"IPv%d, IPv%d received") % (ip_family,
IPAddress(nameserver).version))
except (AddrFormatError, ValueError):
if nameserver == 'NC':
idns_nameservers_list.append(str(""))
break
raise wsme.exc.ClientSideError(_(
"Invalid DNS nameserver target address %s "
"Please configure a valid DNS "
"address.") % (nameserver))
if len(idns_nameservers_list) == 0 or idns_nameservers_list == [""]:
if ntp_list:
if hasattr(ntp_list[0], 'ntpservers'):
if ntp_list[0].ntpservers:
for ntpserver in [n.strip() for n in
ntp_list[0].ntpservers.split(',')]:
try:
str(IPAddress(ntpserver))
except (AddrFormatError, ValueError):
if utils.is_valid_hostname(ntpserver):
raise wsme.exc.ClientSideError(_(
"At least one DNS server must be used "
"when any NTP server address is using "
"FQDN. Alternatively, use IPv4 or IPv6 for"
"NTP server address and then delete DNS "
"servers."))
if len(idns_nameservers_list) > MAX_S:
raise wsme.exc.ClientSideError(_(
"Maximum DNS nameservers supported: %s but provided: %s. "
"Please configure a valid list of DNS nameservers."
% (MAX_S, len(idns_nameservers_list))))
dns_nameservers = ",".join(idns_nameservers_list)
dns['nameservers'] = dns_nameservers
return dns
LOCK_NAME = 'DNSController'
class DNSController(rest.RestController):
"""REST controller for idnss."""
_custom_actions = {
'detail': ['GET'],
}
def __init__(self, from_isystems=False):
self._from_isystems = from_isystems
def _get_dnss_collection(self, isystem_uuid, marker, limit, sort_key,
sort_dir, expand=False, resource_url=None):
if self._from_isystems and not isystem_uuid:
raise exception.InvalidParameterValue(_(
"System id not specified."))
limit = utils.validate_limit(limit)
sort_dir = utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.dns.get_by_uuid(pecan.request.context,
marker)
if isystem_uuid:
dnss = pecan.request.dbapi.idns_get_by_isystem(
isystem_uuid, limit,
marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
else:
dnss = pecan.request.dbapi.idns_get_list(limit, marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
return DNSCollection.convert_with_links(dnss, limit,
url=resource_url,
expand=expand,
sort_key=sort_key,
sort_dir=sort_dir)
@wsme_pecan.wsexpose(DNSCollection, types.uuid, types.uuid, int,
wtypes.text, wtypes.text)
def get_all(self, isystem_uuid=None, marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of dnss. Only one per system"""
return self._get_dnss_collection(isystem_uuid, marker, limit,
sort_key, sort_dir)
@wsme_pecan.wsexpose(DNSCollection, types.uuid, types.uuid, int,
wtypes.text, wtypes.text)
def detail(self, isystem_uuid=None, marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of dnss with detail."""
# NOTE(lucasagomes): /detail should only work agaist collections
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "idnss":
raise exception.HTTPNotFound
expand = True
resource_url = '/'.join(['dnss', 'detail'])
return self._get_dnss_collection(isystem_uuid,
marker, limit,
sort_key, sort_dir,
expand, resource_url)
@wsme_pecan.wsexpose(DNS, types.uuid)
def get_one(self, dns_uuid):
"""Retrieve information about the given dns."""
if self._from_isystems:
raise exception.OperationNotPermitted
rpc_dns = objects.dns.get_by_uuid(pecan.request.context, dns_uuid)
return DNS.convert_with_links(rpc_dns)
@wsme_pecan.wsexpose(DNS, body=DNS)
def post(self, dns):
"""Create a new dns."""
raise exception.OperationNotPermitted
@cutils.synchronized(LOCK_NAME)
@wsme.validate(types.uuid, [DNSPatchType])
@wsme_pecan.wsexpose(DNS, types.uuid,
body=[DNSPatchType])
def patch(self, dns_uuid, patch):
"""Update the current DNS configuration."""
if self._from_isystems:
raise exception.OperationNotPermitted
rpc_dns = objects.dns.get_by_uuid(pecan.request.context, dns_uuid)
action = None
for p in patch:
if '/action' in p['path']:
value = p['value']
patch.remove(p)
if value in (constants.APPLY_ACTION, constants.INSTALL_ACTION):
action = value
break
# replace isystem_uuid and idns_uuid with corresponding
patch_obj = jsonpatch.JsonPatch(patch)
state_rel_path = ['/uuid', '/id', '/forisystemid',
'/isystem_uuid']
if any(p['path'] in state_rel_path for p in patch_obj):
raise wsme.exc.ClientSideError(_("The following fields can not be "
"modified: %s" %
state_rel_path))
for p in patch_obj:
if p['path'] == '/isystem_uuid':
isystem = objects.system.get_by_uuid(pecan.request.context,
p['value'])
p['path'] = '/forisystemid'
p['value'] = isystem.id
try:
# Keep an original copy of the dns data
dns_orig = rpc_dns.as_dict()
dns = DNS(**jsonpatch.apply_patch(rpc_dns.as_dict(),
patch_obj))
except utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
# Since dns requests on the controller go over the oam network,
# check the ip version of the oam address pool in the database
oam_network = pecan.request.dbapi.network_get_by_type(
constants.NETWORK_TYPE_OAM)
oam_address_pool = pecan.request.dbapi.address_pool_get(
oam_network.pool_uuid)
ip_family = oam_address_pool.family
LOG.info("dns %s; ip_family: ipv%d" % (dns.as_dict(), ip_family))
dns = _check_dns_data(dns.as_dict(), ip_family)
try:
# Update only the fields that have changed
for field in objects.dns.fields:
if rpc_dns[field] != dns[field]:
rpc_dns[field] = dns[field]
delta = rpc_dns.obj_what_changed()
if delta:
rpc_dns.save()
if action == constants.APPLY_ACTION:
# perform rpc to conductor to perform config apply
pecan.request.rpcapi.update_dns_config(
pecan.request.context)
else:
LOG.info("No DNS config changes")
return DNS.convert_with_links(rpc_dns)
except Exception as e:
# rollback database changes
for field in dns_orig:
if rpc_dns[field] != dns_orig[field]:
rpc_dns[field] = dns_orig[field]
rpc_dns.save()
msg = _("Failed to update the DNS configuration")
if e == exception.HTTPNotFound:
msg = _("DNS update failed: system %s dns %s : patch %s"
% (isystem['systemname'], dns, patch))
raise wsme.exc.ClientSideError(msg)
@wsme_pecan.wsexpose(None, types.uuid, status_code=204)
def delete(self, dns_uuid):
"""Delete a dns."""
raise exception.OperationNotPermitted
| 36.383632
| 79
| 0.557219
|
43e694b5bdd869a05f095be932647618d63248f0
| 5,285
|
py
|
Python
|
scripts/external_libs/scapy-2.3.1/python3/scapy/arch/unix.py
|
klement/trex-core
|
b98e2e6d2b8c6caeb233ce36fcbc131ffc45e35e
|
[
"Apache-2.0"
] | 7
|
2017-12-30T20:57:43.000Z
|
2020-03-22T00:15:26.000Z
|
scripts/external_libs/scapy-2.3.1/python3/scapy/arch/unix.py
|
klement/trex-core
|
b98e2e6d2b8c6caeb233ce36fcbc131ffc45e35e
|
[
"Apache-2.0"
] | 13
|
2019-10-11T12:33:43.000Z
|
2020-02-10T08:28:34.000Z
|
scripts/external_libs/scapy-2.3.1/python3/scapy/arch/unix.py
|
klement/trex-core
|
b98e2e6d2b8c6caeb233ce36fcbc131ffc45e35e
|
[
"Apache-2.0"
] | 3
|
2017-09-14T13:18:05.000Z
|
2020-02-10T15:56:54.000Z
|
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Common customizations for all Unix-like operating systems other than Linux
"""
import sys,os,struct,socket,time
from subprocess import check_output
from fcntl import ioctl
from scapy.error import warning
import scapy.config
import scapy.utils
import scapy.utils6
import scapy.arch
scapy.config.conf.use_winpcapy = True
scapy.config.conf.use_netifaces = True
scapy.config.conf.use_dnet = True
from .pcapdnet import *
##################
## Routes stuff ##
##################
def read_routes():
if scapy.arch.SOLARIS:
f=check_output(["netstat", "-rvn"], universal_newlines = True) # -f inet
elif scapy.arch.FREEBSD:
f=check_output(["netstat", "-rnW"], universal_newlines = True) # -W to handle long interface names
else:
f=check_output(["netstat", "-rn"], universal_newlines = True) # -f inet
ok = False
routes = []
pending_if = []
for l in f.split('\n'):
l = l.strip()
if l.find("----") >= 0: # a separation line
continue
if not ok:
if_index = [ l.split().index(i) for i in ['Iface', 'Netif', 'Interface', 'Device'] if i in l.split()]
if if_index:
ok = True
if_index = if_index[0]
continue
if not l:
break
if scapy.arch.SOLARIS:
lspl = l.split()
if len(lspl) == 10:
dest,mask,gw,netif,mxfrg,rtt,ref,flg = lspl[:8]
else: # missing interface
dest,mask,gw,mxfrg,rtt,ref,flg = lspl[:7]
netif=None
else:
rt = l.split()
dest,gw,flg = rt[:3]
netif = rt[if_index]
if flg.find("Lc") >= 0:
continue
if dest == "default":
dest = 0
netmask = 0
else:
if scapy.arch.SOLARIS:
netmask = scapy.utils.atol(mask)
elif "/" in dest:
dest,netmask = dest.split("/")
netmask = scapy.utils.itom(int(netmask))
else:
netmask = scapy.utils.itom((dest.count(".") + 1) * 8)
dest += ".0"*(3-dest.count("."))
dest = scapy.utils.atol(dest)
if not "G" in flg:
gw = '0.0.0.0'
if netif is not None:
ifaddr = scapy.arch.get_if_addr(netif)
routes.append((dest,netmask,gw,netif,ifaddr))
else:
pending_if.append((dest,netmask,gw))
# On Solaris, netstat does not provide output interfaces for some routes
# We need to parse completely the routing table to route their gw and
# know their output interface
for dest,netmask,gw in pending_if:
gw_l = scapy.utils.atol(gw)
max_rtmask,gw_if,gw_if_addr, = 0,None,None
for rtdst,rtmask,_,rtif,rtaddr in routes[:]:
if gw_l & rtmask == rtdst:
if rtmask >= max_rtmask:
max_rtmask = rtmask
gw_if = rtif
gw_if_addr = rtaddr
if gw_if:
routes.append((dest,netmask,gw,gw_if,gw_if_addr))
else:
warning("Did not find output interface to reach gateway %s" % gw)
return routes
############
### IPv6 ###
############
def read_routes6():
f = os.popen("netstat -rn -f inet6")
ok = False
mtu_present = False
prio_present = False
routes = []
lifaddr = in6_getifaddr()
for l in f.readlines():
if not l:
break
l = l.strip()
if not ok:
if l.find("Destination") >= 0:
ok = 1
mtu_present = l.find("Mtu") >= 0
prio_present = l.find("Prio") >= 0
continue
# gv 12/12/06: under debugging
if scapy.arch.NETBSD or scapy.arch.OPENBSD:
lspl = l.split()
d,nh,fl = lspl[:3]
dev = lspl[5+mtu_present+prio_present]
expire = None
else: # FREEBSD or DARWIN
d,nh,fl,dev = l.split()[:4]
if [ x for x in lifaddr if x[2] == dev] == []:
continue
if 'L' in fl: # drop MAC addresses
continue
if 'link' in nh:
nh = '::'
cset = [] # candidate set (possible source addresses)
dp = 128
if d == 'default':
d = '::'
dp = 0
if '/' in d:
d,dp = d.split("/")
dp = int(dp)
if '%' in d:
d,dev = d.split('%')
if '%' in nh:
nh,dev = nh.split('%')
if scapy.arch.LOOPBACK_NAME in dev:
if d == '::' and dp == 96: #Do not use ::/96 deprecated IPV4 mapping address
continue
cset = ['::1']
nh = '::'
else:
devaddrs = [ x for x in lifaddr if x[2] == dev ]
cset = scapy.utils6.construct_source_candidate_set(d, dp, devaddrs, scapy.arch.LOOPBACK_NAME)
if len(cset) != 0:
routes.append((d, dp, nh, dev, cset))
f.close()
return routes
| 31.272189
| 113
| 0.512772
|
be86dad8e341d45926c1167e867e6296fec7c80c
| 7,794
|
py
|
Python
|
docs/conf.py
|
andrefarina/mlops-test
|
70b9414449e7adddc4de5bb19e901a2b9bc84a3c
|
[
"FTL"
] | null | null | null |
docs/conf.py
|
andrefarina/mlops-test
|
70b9414449e7adddc4de5bb19e901a2b9bc84a3c
|
[
"FTL"
] | null | null | null |
docs/conf.py
|
andrefarina/mlops-test
|
70b9414449e7adddc4de5bb19e901a2b9bc84a3c
|
[
"FTL"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# mlops-deploy documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'mlops-deploy'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mlops-deploydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'mlops-deploy.tex',
u'mlops-deploy Documentation',
u"andre.farina", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mlops-deploy', u'mlops-deploy Documentation',
[u"andre.farina"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'mlops-deploy', u'mlops-deploy Documentation',
u"andre.farina", 'mlops-deploy',
'curso mlops deploy alura', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 31.812245
| 80
| 0.706569
|
4fbe1d052a4f90720631b84a181218c247ab2e17
| 4,465
|
py
|
Python
|
tests/fixtures/results/numpydoc.test1.py
|
chadrik/doc484
|
597b421a398f5afcc5feb7abae376820fcc25876
|
[
"MIT"
] | 22
|
2017-07-24T22:12:01.000Z
|
2021-10-17T15:52:48.000Z
|
tests/fixtures/results/numpydoc.test1.py
|
chadrik/doc484
|
597b421a398f5afcc5feb7abae376820fcc25876
|
[
"MIT"
] | 1
|
2019-11-07T03:55:34.000Z
|
2019-11-07T04:08:09.000Z
|
tests/fixtures/results/numpydoc.test1.py
|
chadrik/doc484
|
597b421a398f5afcc5feb7abae376820fcc25876
|
[
"MIT"
] | 2
|
2018-09-25T22:48:16.000Z
|
2020-04-17T11:41:57.000Z
|
"""
Module-level docs
"""
from __future__ import absolute_import, print_function
def basic(one, two, three, four, _five, six_):
# type: (Union[str, int], str, Any, Any, bool, int) -> bool
"""
Parameters
----------
one : Union[str, int]
description of one
two : str
description of two
that spans multiple lines
four
omitted type
_five : bool
description
with
a line break
six_ : int
Returns
-------
bool
True if successful, False otherwise
"""
def star_args(one, *two, **three):
# type: (Union[str, int], *str, **Any) -> bool
"""
Parameters
----------
one : Union[str, int]
two : str
Returns
-------
bool
"""
def star_args2(one, *two, **three):
# type: (Union[str, int], *str, **Any) -> bool
"""
Parameters
----------
one : Union[str, int]
two : *str
Returns
-------
bool
"""
def skiptype(one, two, three):
# notype
"""
Use # notype comment to skip type comment generation
Parameters
----------
one : Union[str, int]
two : str
Returns
-------
bool
"""
def any_doc_types(one, *two, **three):
# type: (Any, *Any, **Any) -> Any
"""
Docstring with explicit Any types triggers comment generation
Parameters
----------
one : Any
two : Any
Returns
-------
Any
"""
def existing_type_comment(one, two, three):
# type: (Union[str, int], str, Any) -> bool
"""
Existing type comments should be overwritten
Parameters
----------
one : Union[str, int]
two : str
Returns
-------
bool
"""
def existing_type_comment_any(one, two, three):
# type: (Any, Any, Any) -> Any
"""
Existing type comments should be overwritten, even with Any types
Parameters
----------
one : Any
two : Any
Returns
-------
Any
"""
def existing_comment(one, two, three):
# type: (Union[str, int], str, Any) -> bool
# this comment should be preserved
"""
Parameters
----------
one : Union[str, int]
two : str
Returns
-------
bool
"""
def default_return_type(one):
# type: (str) -> Any
"""
When no return type is specified, the default type can be globally
configured.
Parameters
----------
one : str
"""
def returns_tuple():
# type: () -> Tuple[Union[str, int], str, four, bool, int]
"""
Verbose tuple return documentation
Returns
-------
one : Union[str, int]
description of one
two : str
description of two
that spans multiple lines
four
omitted type
_five : bool
description
with
a line break
six_ : int
"""
def yields():
# type: () -> Iterator[str]
"""
Yields
------
str
"""
def yields_tuple():
# type: () -> Iterator[Tuple[Union[str, int], str, four, bool, int]]
"""
Verbose tuple return documentation
Yields
------
one : Union[str, int]
description of one
two : str
description of two
that spans multiple lines
four
omitted type
_five : bool
description
with
a line break
six_ : int
"""
class BasicClass:
def foo(self, one, two, three):
# type: (Union[str, int], str, Any) -> bool
"""
Parameters
----------
one : Union[str, int]
two : str
Returns
-------
bool
"""
def function_self(self, one, two, three):
# type: (Any, Union[str, int], str, Any) -> bool
"""
A function with a first argument named self should document self
Parameters
----------
one : Union[str, int]
two : str
Returns
-------
bool
"""
class InitDocsAtClassLevel:
"""
Argument documentation at the class-level should be applied to __init__
Parameters
----------
one : Union[str, int]
two : str
"""
def __init__(self, one, two, three):
# type: (Union[str, int], str, Any) -> None
pass
def no_valid_doc_types(foo, bar):
# type: (Any, Any) -> Any
"""
Docstring doc types which do not match the argument names
Type comment should be generated
Parameters
----------
one : Union[str, int]
two : str
"""
| 16.849057
| 75
| 0.515342
|
9d346471abc8f0ea5632b40efd17e312d3889672
| 278
|
py
|
Python
|
python/setup_exe.py
|
netchira/netchira.github.io
|
bed7b1425fe0ec206887be9cf48a571afbded9e8
|
[
"CC0-1.0"
] | 6
|
2019-09-25T06:43:01.000Z
|
2022-03-11T02:54:47.000Z
|
python/setup_exe.py
|
netchira/netchira.github.io
|
bed7b1425fe0ec206887be9cf48a571afbded9e8
|
[
"CC0-1.0"
] | 6
|
2019-01-06T07:35:10.000Z
|
2022-02-26T03:46:28.000Z
|
python/setup_exe.py
|
netchira/netchira.github.io
|
bed7b1425fe0ec206887be9cf48a571afbded9e8
|
[
"CC0-1.0"
] | 7
|
2021-05-14T07:04:36.000Z
|
2022-03-20T18:23:28.000Z
|
from distutils.core import setup
import py2exe
script = 'CompareTxt.py'
option = {
'compressed': 1,
'optimize': 2,
'bundle_files': 3,
}
setup(
options = {
'py2exe': option,
},
console = [
{'script': script }
],
zipfile = None,
)
| 14.631579
| 32
| 0.543165
|
b2604195d2d10eda311a6fd23b98c8e917ca4245
| 107
|
py
|
Python
|
lab_assignment/lab_9/linux_mac/os.py
|
caru1613/introduction_to_python_TEAMLAB_MOOC
|
e0ac95f7a6b889e7d18b7bdaaab49820e73d5477
|
[
"MIT"
] | null | null | null |
lab_assignment/lab_9/linux_mac/os.py
|
caru1613/introduction_to_python_TEAMLAB_MOOC
|
e0ac95f7a6b889e7d18b7bdaaab49820e73d5477
|
[
"MIT"
] | null | null | null |
lab_assignment/lab_9/linux_mac/os.py
|
caru1613/introduction_to_python_TEAMLAB_MOOC
|
e0ac95f7a6b889e7d18b7bdaaab49820e73d5477
|
[
"MIT"
] | null | null | null |
import os
os.mkdir("log")
if not os.path.isdir("log"):
print("there isn't ", dir)
os.mkdir("log")
| 15.285714
| 30
| 0.598131
|
a3dbfa15b57c59cc10a4134ab441212e02f7e728
| 799
|
py
|
Python
|
src/core/sly/docparse.py
|
mehanalavimajd/hascal
|
5ad81bb673ab40ae2b27540e56eae496582b4b2b
|
[
"BSL-1.0"
] | 1
|
2021-05-12T09:33:24.000Z
|
2021-05-12T09:33:24.000Z
|
src/core/sly/docparse.py
|
mehanalavimajd/hascal
|
5ad81bb673ab40ae2b27540e56eae496582b4b2b
|
[
"BSL-1.0"
] | null | null | null |
src/core/sly/docparse.py
|
mehanalavimajd/hascal
|
5ad81bb673ab40ae2b27540e56eae496582b4b2b
|
[
"BSL-1.0"
] | null | null | null |
__all__ = [ 'DocParseMeta' ]
class DocParseMeta(type):
@staticmethod
def __new__(meta, clsname, bases, clsdict):
if '__doc__' in clsdict:
lexer = meta.lexer()
parser = meta.parser()
lexer.cls_name = parser.cls_name = clsname
lexer.cls_qualname = parser.cls_qualname = clsdict['__qualname__']
lexer.cls_module = parser.cls_module = clsdict['__module__']
parsedict = parser.parse(lexer.tokenize(clsdict['__doc__']))
assert isinstance(parsedict, dict), 'Parser must return a dictionary'
clsdict.update(parsedict)
return super().__new__(meta, clsname, bases, clsdict)
@classmethod
def __init_subclass__(cls):
assert hasattr(cls, 'parser') and hasattr(cls, 'lexer')
| 39.95
| 81
| 0.637046
|
fa3d4abaab581f30fe9ebb0e5cf3e3a2db5d985d
| 57,300
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/community/general/plugins/modules/one_vm.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 22
|
2021-07-16T08:11:22.000Z
|
2022-03-31T07:15:34.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/community/general/plugins/modules/one_vm.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/community/general/plugins/modules/one_vm.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 39
|
2021-07-05T02:31:42.000Z
|
2022-03-31T02:46:03.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
"""
(c) 2017, Milan Ilic <milani@nordeus.com>
(c) 2019, Jan Meerkamp <meerkamp@dvv.de>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION = '''
---
module: one_vm
short_description: Creates or terminates OpenNebula instances
description:
- Manages OpenNebula instances
requirements:
- pyone
options:
api_url:
description:
- URL of the OpenNebula RPC server.
- It is recommended to use HTTPS so that the username/password are not
- transferred over the network unencrypted.
- If not set then the value of the C(ONE_URL) environment variable is used.
type: str
api_username:
description:
- Name of the user to login into the OpenNebula RPC server. If not set
- then the value of the C(ONE_USERNAME) environment variable is used.
type: str
api_password:
description:
- Password of the user to login into OpenNebula RPC server. If not set
- then the value of the C(ONE_PASSWORD) environment variable is used.
- if both I(api_username) or I(api_password) are not set, then it will try
- authenticate with ONE auth file. Default path is "~/.one/one_auth".
- Set environment variable C(ONE_AUTH) to override this path.
type: str
template_name:
description:
- Name of VM template to use to create a new instace
type: str
template_id:
description:
- ID of a VM template to use to create a new instance
type: int
vm_start_on_hold:
description:
- Set to true to put vm on hold while creating
default: False
type: bool
instance_ids:
description:
- A list of instance ids used for states':' C(absent), C(running), C(rebooted), C(poweredoff)
aliases: ['ids']
type: list
elements: int
state:
description:
- C(present) - create instances from a template specified with C(template_id)/C(template_name).
- C(running) - run instances
- C(poweredoff) - power-off instances
- C(rebooted) - reboot instances
- C(absent) - terminate instances
choices: ["present", "absent", "running", "rebooted", "poweredoff"]
default: present
type: str
hard:
description:
- Reboot, power-off or terminate instances C(hard)
default: no
type: bool
wait:
description:
- Wait for the instance to reach its desired state before returning. Keep
- in mind if you are waiting for instance to be in running state it
- doesn't mean that you will be able to SSH on that machine only that
- boot process have started on that instance, see 'wait_for' example for
- details.
default: yes
type: bool
wait_timeout:
description:
- How long before wait gives up, in seconds
default: 300
type: int
attributes:
description:
- A dictionary of key/value attributes to add to new instances, or for
- setting C(state) of instances with these attributes.
- Keys are case insensitive and OpenNebula automatically converts them to upper case.
- Be aware C(NAME) is a special attribute which sets the name of the VM when it's deployed.
- C(#) character(s) can be appended to the C(NAME) and the module will automatically add
- indexes to the names of VMs.
- For example':' C(NAME':' foo-###) would create VMs with names C(foo-000), C(foo-001),...
- When used with C(count_attributes) and C(exact_count) the module will
- match the base name without the index part.
default: {}
type: dict
labels:
description:
- A list of labels to associate with new instances, or for setting
- C(state) of instances with these labels.
default: []
type: list
elements: str
count_attributes:
description:
- A dictionary of key/value attributes that can only be used with
- C(exact_count) to determine how many nodes based on a specific
- attributes criteria should be deployed. This can be expressed in
- multiple ways and is shown in the EXAMPLES section.
type: dict
count_labels:
description:
- A list of labels that can only be used with C(exact_count) to determine
- how many nodes based on a specific labels criteria should be deployed.
- This can be expressed in multiple ways and is shown in the EXAMPLES
- section.
type: list
elements: str
count:
description:
- Number of instances to launch
default: 1
type: int
exact_count:
description:
- Indicates how many instances that match C(count_attributes) and
- C(count_labels) parameters should be deployed. Instances are either
- created or terminated based on this value.
- NOTE':' Instances with the least IDs will be terminated first.
type: int
mode:
description:
- Set permission mode of the instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others.
type: str
owner_id:
description:
- ID of the user which will be set as the owner of the instance
type: int
group_id:
description:
- ID of the group which will be set as the group of the instance
type: int
memory:
description:
- The size of the memory for new instances (in MB, GB, ...)
type: str
disk_size:
description:
- The size of the disk created for new instances (in MB, GB, TB,...).
- NOTE':' If The Template hats Multiple Disks the Order of the Sizes is
- matched against the order specified in C(template_id)/C(template_name).
type: list
elements: str
cpu:
description:
- Percentage of CPU divided by 100 required for the new instance. Half a
- processor is written 0.5.
type: float
vcpu:
description:
- Number of CPUs (cores) new VM will have.
type: int
networks:
description:
- A list of dictionaries with network parameters. See examples for more details.
default: []
type: list
elements: dict
disk_saveas:
description:
- Creates an image from a VM disk.
- It is a dictionary where you have to specify C(name) of the new image.
- Optionally you can specify C(disk_id) of the disk you want to save. By default C(disk_id) is 0.
- I(NOTE)':' This operation will only be performed on the first VM (if more than one VM ID is passed)
- and the VM has to be in the C(poweredoff) state.
- Also this operation will fail if an image with specified C(name) already exists.
type: dict
persistent:
description:
- Create a private persistent copy of the template plus any image defined in DISK, and instantiate that copy.
default: NO
type: bool
version_added: '0.2.0'
datastore_id:
description:
- Name of Datastore to use to create a new instace
version_added: '0.2.0'
type: int
datastore_name:
description:
- Name of Datastore to use to create a new instace
version_added: '0.2.0'
type: str
author:
- "Milan Ilic (@ilicmilan)"
- "Jan Meerkamp (@meerkampdvv)"
'''
EXAMPLES = '''
- name: Create a new instance
community.general.one_vm:
template_id: 90
register: result
- name: Print VM properties
ansible.builtin.debug:
msg: result
- name: Deploy a new VM on hold
community.general.one_vm:
template_name: 'app1_template'
vm_start_on_hold: 'True'
- name: Deploy a new VM and set its name to 'foo'
community.general.one_vm:
template_name: 'app1_template'
attributes:
name: foo
- name: Deploy a new VM and set its group_id and mode
community.general.one_vm:
template_id: 90
group_id: 16
mode: 660
- name: Deploy a new VM as persistent
community.general.one_vm:
template_id: 90
persistent: yes
- name: Change VM's permissions to 640
community.general.one_vm:
instance_ids: 5
mode: 640
- name: Deploy 2 new instances and set memory, vcpu, disk_size and 3 networks
community.general.one_vm:
template_id: 15
disk_size: 35.2 GB
memory: 4 GB
vcpu: 4
count: 2
networks:
- NETWORK_ID: 27
- NETWORK: "default-network"
NETWORK_UNAME: "app-user"
SECURITY_GROUPS: "120,124"
- NETWORK_ID: 27
SECURITY_GROUPS: "10"
- name: Deploy a new instance which uses a Template with two Disks
community.general.one_vm:
template_id: 42
disk_size:
- 35.2 GB
- 50 GB
memory: 4 GB
vcpu: 4
count: 1
networks:
- NETWORK_ID: 27
- name: "Deploy an new instance with attribute 'bar: bar1' and set its name to 'foo'"
community.general.one_vm:
template_id: 53
attributes:
name: foo
bar: bar1
- name: "Enforce that 2 instances with attributes 'foo1: app1' and 'foo2: app2' are deployed"
community.general.one_vm:
template_id: 53
attributes:
foo1: app1
foo2: app2
exact_count: 2
count_attributes:
foo1: app1
foo2: app2
- name: Enforce that 4 instances with an attribute 'bar' are deployed
community.general.one_vm:
template_id: 53
attributes:
name: app
bar: bar2
exact_count: 4
count_attributes:
bar:
# Deploy 2 new instances with attribute 'foo: bar' and labels 'app1' and 'app2' and names in format 'fooapp-##'
# Names will be: fooapp-00 and fooapp-01
- name: Deploy 2 new instances
community.general.one_vm:
template_id: 53
attributes:
name: fooapp-##
foo: bar
labels:
- app1
- app2
count: 2
# Deploy 2 new instances with attribute 'app: app1' and names in format 'fooapp-###'
# Names will be: fooapp-002 and fooapp-003
- name: Deploy 2 new instances
community.general.one_vm:
template_id: 53
attributes:
name: fooapp-###
app: app1
count: 2
# Reboot all instances with name in format 'fooapp-#'
# Instances 'fooapp-00', 'fooapp-01', 'fooapp-002' and 'fooapp-003' will be rebooted
- name: Reboot all instances with names in a certain format
community.general.one_vm:
attributes:
name: fooapp-#
state: rebooted
# Enforce that only 1 instance with name in format 'fooapp-#' is deployed
# The task will delete oldest instances, so only the 'fooapp-003' will remain
- name: Enforce that only 1 instance with name in a certain format is deployed
community.general.one_vm:
template_id: 53
exact_count: 1
count_attributes:
name: fooapp-#
- name: Deploy an new instance with a network
community.general.one_vm:
template_id: 53
networks:
- NETWORK_ID: 27
register: vm
- name: Wait for SSH to come up
ansible.builtin.wait_for_connection:
delegate_to: '{{ vm.instances[0].networks[0].ip }}'
- name: Terminate VMs by ids
community.general.one_vm:
instance_ids:
- 153
- 160
state: absent
- name: Reboot all VMs that have labels 'foo' and 'app1'
community.general.one_vm:
labels:
- foo
- app1
state: rebooted
- name: "Fetch all VMs that have name 'foo' and attribute 'app: bar'"
community.general.one_vm:
attributes:
name: foo
app: bar
register: results
- name: Deploy 2 new instances with labels 'foo1' and 'foo2'
community.general.one_vm:
template_name: app_template
labels:
- foo1
- foo2
count: 2
- name: Enforce that only 1 instance with label 'foo1' will be running
community.general.one_vm:
template_name: app_template
labels:
- foo1
exact_count: 1
count_labels:
- foo1
- name: Terminate all instances that have attribute foo
community.general.one_vm:
template_id: 53
exact_count: 0
count_attributes:
foo:
- name: "Power-off the VM and save VM's disk with id=0 to the image with name 'foo-image'"
community.general.one_vm:
instance_ids: 351
state: poweredoff
disk_saveas:
name: foo-image
- name: "Save VM's disk with id=1 to the image with name 'bar-image'"
community.general.one_vm:
instance_ids: 351
disk_saveas:
name: bar-image
disk_id: 1
'''
RETURN = '''
instances_ids:
description: a list of instances ids whose state is changed or which are fetched with C(instance_ids) option.
type: list
returned: success
sample: [ 1234, 1235 ]
instances:
description: a list of instances info whose state is changed or which are fetched with C(instance_ids) option.
type: complex
returned: success
contains:
vm_id:
description: vm id
type: int
sample: 153
vm_name:
description: vm name
type: str
sample: foo
template_id:
description: vm's template id
type: int
sample: 153
group_id:
description: vm's group id
type: int
sample: 1
group_name:
description: vm's group name
type: str
sample: one-users
owner_id:
description: vm's owner id
type: int
sample: 143
owner_name:
description: vm's owner name
type: str
sample: app-user
mode:
description: vm's mode
type: str
returned: success
sample: 660
state:
description: state of an instance
type: str
sample: ACTIVE
lcm_state:
description: lcm state of an instance that is only relevant when the state is ACTIVE
type: str
sample: RUNNING
cpu:
description: Percentage of CPU divided by 100
type: float
sample: 0.2
vcpu:
description: Number of CPUs (cores)
type: int
sample: 2
memory:
description: The size of the memory in MB
type: str
sample: 4096 MB
disk_size:
description: The size of the disk in MB
type: str
sample: 20480 MB
networks:
description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC
type: list
sample: [
{
"ip": "10.120.5.33",
"mac": "02:00:0a:78:05:21",
"name": "default-test-private",
"security_groups": "0,10"
},
{
"ip": "10.120.5.34",
"mac": "02:00:0a:78:05:22",
"name": "default-test-private",
"security_groups": "0"
}
]
uptime_h:
description: Uptime of the instance in hours
type: int
sample: 35
labels:
description: A list of string labels that are associated with the instance
type: list
sample: [
"foo",
"spec-label"
]
attributes:
description: A dictionary of key/values attributes that are associated with the instance
type: dict
sample: {
"HYPERVISOR": "kvm",
"LOGO": "images/logos/centos.png",
"TE_GALAXY": "bar",
"USER_INPUTS": null
}
tagged_instances:
description:
- A list of instances info based on a specific attributes and/or
- labels that are specified with C(count_attributes) and C(count_labels)
- options.
type: complex
returned: success
contains:
vm_id:
description: vm id
type: int
sample: 153
vm_name:
description: vm name
type: str
sample: foo
template_id:
description: vm's template id
type: int
sample: 153
group_id:
description: vm's group id
type: int
sample: 1
group_name:
description: vm's group name
type: str
sample: one-users
owner_id:
description: vm's user id
type: int
sample: 143
owner_name:
description: vm's user name
type: str
sample: app-user
mode:
description: vm's mode
type: str
returned: success
sample: 660
state:
description: state of an instance
type: str
sample: ACTIVE
lcm_state:
description: lcm state of an instance that is only relevant when the state is ACTIVE
type: str
sample: RUNNING
cpu:
description: Percentage of CPU divided by 100
type: float
sample: 0.2
vcpu:
description: Number of CPUs (cores)
type: int
sample: 2
memory:
description: The size of the memory in MB
type: str
sample: 4096 MB
disk_size:
description: The size of the disk in MB
type: list
sample: [
"20480 MB",
"10240 MB"
]
networks:
description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC
type: list
sample: [
{
"ip": "10.120.5.33",
"mac": "02:00:0a:78:05:21",
"name": "default-test-private",
"security_groups": "0,10"
},
{
"ip": "10.120.5.34",
"mac": "02:00:0a:78:05:22",
"name": "default-test-private",
"security_groups": "0"
}
]
uptime_h:
description: Uptime of the instance in hours
type: int
sample: 35
labels:
description: A list of string labels that are associated with the instance
type: list
sample: [
"foo",
"spec-label"
]
attributes:
description: A dictionary of key/values attributes that are associated with the instance
type: dict
sample: {
"HYPERVISOR": "kvm",
"LOGO": "images/logos/centos.png",
"TE_GALAXY": "bar",
"USER_INPUTS": null
}
'''
try:
import pyone
HAS_PYONE = True
except ImportError:
HAS_PYONE = False
from ansible.module_utils.basic import AnsibleModule
import os
def get_template(module, client, predicate):
pool = client.templatepool.info(-2, -1, -1, -1)
# Filter -2 means fetch all templates user can Use
found = 0
found_template = None
template_name = ''
for template in pool.VMTEMPLATE:
if predicate(template):
found = found + 1
found_template = template
template_name = template.NAME
if found == 0:
return None
elif found > 1:
module.fail_json(msg='There are more templates with name: ' + template_name)
return found_template
def get_template_by_name(module, client, template_name):
return get_template(module, client, lambda template: (template.NAME == template_name))
def get_template_by_id(module, client, template_id):
return get_template(module, client, lambda template: (template.ID == template_id))
def get_template_id(module, client, requested_id, requested_name):
template = get_template_by_id(module, client, requested_id) if requested_id is not None else get_template_by_name(module, client, requested_name)
if template:
return template.ID
else:
return None
def get_datastore(module, client, predicate):
pool = client.datastorepool.info()
found = 0
found_datastore = None
datastore_name = ''
for datastore in pool.DATASTORE:
if predicate(datastore):
found = found + 1
found_datastore = datastore
datastore_name = datastore.NAME
if found == 0:
return None
elif found > 1:
module.fail_json(msg='There are more datastores with name: ' + datastore_name)
return found_datastore
def get_datastore_by_name(module, client, datastore_name):
return get_datastore(module, client, lambda datastore: (datastore.NAME == datastore_name))
def get_datastore_by_id(module, client, datastore_id):
return get_datastore(module, client, lambda datastore: (datastore.ID == datastore_id))
def get_datastore_id(module, client, requested_id, requested_name):
datastore = get_datastore_by_id(module, client, requested_id) if requested_id else get_datastore_by_name(module, client, requested_name)
if datastore:
return datastore.ID
else:
return None
def get_vm_by_id(client, vm_id):
try:
vm = client.vm.info(int(vm_id))
except BaseException:
return None
return vm
def get_vms_by_ids(module, client, state, ids):
vms = []
for vm_id in ids:
vm = get_vm_by_id(client, vm_id)
if vm is None and state != 'absent':
module.fail_json(msg='There is no VM with id=' + str(vm_id))
vms.append(vm)
return vms
def get_vm_info(client, vm):
vm = client.vm.info(vm.ID)
networks_info = []
disk_size = []
if 'DISK' in vm.TEMPLATE:
if isinstance(vm.TEMPLATE['DISK'], list):
for disk in vm.TEMPLATE['DISK']:
disk_size.append(disk['SIZE'] + ' MB')
else:
disk_size.append(vm.TEMPLATE['DISK']['SIZE'] + ' MB')
if 'NIC' in vm.TEMPLATE:
if isinstance(vm.TEMPLATE['NIC'], list):
for nic in vm.TEMPLATE['NIC']:
networks_info.append({
'ip': nic.get('IP', ''),
'mac': nic.get('MAC', ''),
'name': nic.get('NETWORK', ''),
'security_groups': nic.get('SECURITY_GROUPS', '')
})
else:
networks_info.append({
'ip': vm.TEMPLATE['NIC'].get('IP', ''),
'mac': vm.TEMPLATE['NIC'].get('MAC', ''),
'name': vm.TEMPLATE['NIC'].get('NETWORK', ''),
'security_groups':
vm.TEMPLATE['NIC'].get('SECURITY_GROUPS', '')
})
import time
current_time = time.localtime()
vm_start_time = time.localtime(vm.STIME)
vm_uptime = time.mktime(current_time) - time.mktime(vm_start_time)
vm_uptime /= (60 * 60)
permissions_str = parse_vm_permissions(client, vm)
# LCM_STATE is VM's sub-state that is relevant only when STATE is ACTIVE
vm_lcm_state = None
if vm.STATE == VM_STATES.index('ACTIVE'):
vm_lcm_state = LCM_STATES[vm.LCM_STATE]
vm_labels, vm_attributes = get_vm_labels_and_attributes_dict(client, vm.ID)
info = {
'template_id': int(vm.TEMPLATE['TEMPLATE_ID']),
'vm_id': vm.ID,
'vm_name': vm.NAME,
'state': VM_STATES[vm.STATE],
'lcm_state': vm_lcm_state,
'owner_name': vm.UNAME,
'owner_id': vm.UID,
'networks': networks_info,
'disk_size': disk_size,
'memory': vm.TEMPLATE['MEMORY'] + ' MB',
'vcpu': vm.TEMPLATE['VCPU'],
'cpu': vm.TEMPLATE['CPU'],
'group_name': vm.GNAME,
'group_id': vm.GID,
'uptime_h': int(vm_uptime),
'attributes': vm_attributes,
'mode': permissions_str,
'labels': vm_labels
}
return info
def parse_vm_permissions(client, vm):
vm_PERMISSIONS = client.vm.info(vm.ID).PERMISSIONS
owner_octal = int(vm_PERMISSIONS.OWNER_U) * 4 + int(vm_PERMISSIONS.OWNER_M) * 2 + int(vm_PERMISSIONS.OWNER_A)
group_octal = int(vm_PERMISSIONS.GROUP_U) * 4 + int(vm_PERMISSIONS.GROUP_M) * 2 + int(vm_PERMISSIONS.GROUP_A)
other_octal = int(vm_PERMISSIONS.OTHER_U) * 4 + int(vm_PERMISSIONS.OTHER_M) * 2 + int(vm_PERMISSIONS.OTHER_A)
permissions = str(owner_octal) + str(group_octal) + str(other_octal)
return permissions
def set_vm_permissions(module, client, vms, permissions):
changed = False
for vm in vms:
vm = client.vm.info(vm.ID)
old_permissions = parse_vm_permissions(client, vm)
changed = changed or old_permissions != permissions
if not module.check_mode and old_permissions != permissions:
permissions_str = bin(int(permissions, base=8))[2:] # 600 -> 110000000
mode_bits = [int(d) for d in permissions_str]
try:
client.vm.chmod(
vm.ID, mode_bits[0], mode_bits[1], mode_bits[2], mode_bits[3], mode_bits[4], mode_bits[5], mode_bits[6], mode_bits[7], mode_bits[8])
except pyone.OneAuthorizationException:
module.fail_json(msg="Permissions changing is unsuccessful, but instances are present if you deployed them.")
return changed
def set_vm_ownership(module, client, vms, owner_id, group_id):
changed = False
for vm in vms:
vm = client.vm.info(vm.ID)
if owner_id is None:
owner_id = vm.UID
if group_id is None:
group_id = vm.GID
changed = changed or owner_id != vm.UID or group_id != vm.GID
if not module.check_mode and (owner_id != vm.UID or group_id != vm.GID):
try:
client.vm.chown(vm.ID, owner_id, group_id)
except pyone.OneAuthorizationException:
module.fail_json(msg="Ownership changing is unsuccessful, but instances are present if you deployed them.")
return changed
def get_size_in_MB(module, size_str):
SYMBOLS = ['B', 'KB', 'MB', 'GB', 'TB']
s = size_str
init = size_str
num = ""
while s and s[0:1].isdigit() or s[0:1] == '.':
num += s[0]
s = s[1:]
num = float(num)
symbol = s.strip()
if symbol not in SYMBOLS:
module.fail_json(msg="Cannot interpret %r %r %d" % (init, symbol, num))
prefix = {'B': 1}
for i, s in enumerate(SYMBOLS[1:]):
prefix[s] = 1 << (i + 1) * 10
size_in_bytes = int(num * prefix[symbol])
size_in_MB = size_in_bytes / (1024 * 1024)
return size_in_MB
def create_disk_str(module, client, template_id, disk_size_list):
if not disk_size_list:
return ''
template = client.template.info(template_id)
if isinstance(template.TEMPLATE['DISK'], list):
# check if the number of disks is correct
if len(template.TEMPLATE['DISK']) != len(disk_size_list):
module.fail_json(msg='This template has ' + str(len(template.TEMPLATE['DISK'])) + ' disks but you defined ' + str(len(disk_size_list)))
result = ''
index = 0
for DISKS in template.TEMPLATE['DISK']:
disk = {}
diskresult = ''
# Get all info about existed disk e.g. IMAGE_ID,...
for key, value in DISKS.items():
disk[key] = value
# copy disk attributes if it is not the size attribute
diskresult += 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE')
# Set the Disk Size
diskresult += ', SIZE=' + str(int(get_size_in_MB(module, disk_size_list[index]))) + ']\n'
result += diskresult
index += 1
else:
if len(disk_size_list) > 1:
module.fail_json(msg='This template has one disk but you defined ' + str(len(disk_size_list)))
disk = {}
# Get all info about existed disk e.g. IMAGE_ID,...
for key, value in template.TEMPLATE['DISK'].items():
disk[key] = value
# copy disk attributes if it is not the size attribute
result = 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE')
# Set the Disk Size
result += ', SIZE=' + str(int(get_size_in_MB(module, disk_size_list[0]))) + ']\n'
return result
def create_attributes_str(attributes_dict, labels_list):
attributes_str = ''
if labels_list:
attributes_str += 'LABELS="' + ','.join('{label}'.format(label=label) for label in labels_list) + '"\n'
if attributes_dict:
attributes_str += '\n'.join('{key}="{val}"'.format(key=key.upper(), val=val) for key, val in attributes_dict.items()) + '\n'
return attributes_str
def create_nics_str(network_attrs_list):
nics_str = ''
for network in network_attrs_list:
# Packing key-value dict in string with format key="value", key="value"
network_str = ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in network.items())
nics_str = nics_str + 'NIC = [' + network_str + ']\n'
return nics_str
def create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent):
if attributes_dict:
vm_name = attributes_dict.get('NAME', '')
disk_str = create_disk_str(module, client, template_id, disk_size)
vm_extra_template_str = create_attributes_str(attributes_dict, labels_list) + create_nics_str(network_attrs_list) + disk_str
try:
vm_id = client.template.instantiate(template_id, vm_name, vm_start_on_hold, vm_extra_template_str, vm_persistent)
except pyone.OneException as e:
module.fail_json(msg=str(e))
vm = get_vm_by_id(client, vm_id)
return get_vm_info(client, vm)
def generate_next_index(vm_filled_indexes_list, num_sign_cnt):
counter = 0
cnt_str = str(counter).zfill(num_sign_cnt)
while cnt_str in vm_filled_indexes_list:
counter = counter + 1
cnt_str = str(counter).zfill(num_sign_cnt)
return cnt_str
def get_vm_labels_and_attributes_dict(client, vm_id):
vm_USER_TEMPLATE = client.vm.info(vm_id).USER_TEMPLATE
attrs_dict = {}
labels_list = []
for key, value in vm_USER_TEMPLATE.items():
if key != 'LABELS':
attrs_dict[key] = value
else:
if key is not None:
labels_list = value.split(',')
return labels_list, attrs_dict
def get_all_vms_by_attributes(client, attributes_dict, labels_list):
pool = client.vmpool.info(-2, -1, -1, -1).VM
vm_list = []
name = ''
if attributes_dict:
name = attributes_dict.pop('NAME', '')
if name != '':
base_name = name[:len(name) - name.count('#')]
# Check does the name have indexed format
with_hash = name.endswith('#')
for vm in pool:
if vm.NAME.startswith(base_name):
if with_hash and vm.NAME[len(base_name):].isdigit():
# If the name has indexed format and after base_name it has only digits it'll be matched
vm_list.append(vm)
elif not with_hash and vm.NAME == name:
# If the name is not indexed it has to be same
vm_list.append(vm)
pool = vm_list
import copy
vm_list = copy.copy(pool)
for vm in pool:
remove_list = []
vm_labels_list, vm_attributes_dict = get_vm_labels_and_attributes_dict(client, vm.ID)
if attributes_dict and len(attributes_dict) > 0:
for key, val in attributes_dict.items():
if key in vm_attributes_dict:
if val and vm_attributes_dict[key] != val:
remove_list.append(vm)
break
else:
remove_list.append(vm)
break
vm_list = list(set(vm_list).difference(set(remove_list)))
remove_list = []
if labels_list and len(labels_list) > 0:
for label in labels_list:
if label not in vm_labels_list:
remove_list.append(vm)
break
vm_list = list(set(vm_list).difference(set(remove_list)))
return vm_list
def create_count_of_vms(
module, client, template_id, count, attributes_dict, labels_list, disk_size, network_attrs_list, wait, wait_timeout, vm_start_on_hold, vm_persistent):
new_vms_list = []
vm_name = ''
if attributes_dict:
vm_name = attributes_dict.get('NAME', '')
if module.check_mode:
return True, [], []
# Create list of used indexes
vm_filled_indexes_list = None
num_sign_cnt = vm_name.count('#')
if vm_name != '' and num_sign_cnt > 0:
vm_list = get_all_vms_by_attributes(client, {'NAME': vm_name}, None)
base_name = vm_name[:len(vm_name) - num_sign_cnt]
vm_name = base_name
# Make list which contains used indexes in format ['000', '001',...]
vm_filled_indexes_list = list((vm.NAME[len(base_name):].zfill(num_sign_cnt)) for vm in vm_list)
while count > 0:
new_vm_name = vm_name
# Create indexed name
if vm_filled_indexes_list is not None:
next_index = generate_next_index(vm_filled_indexes_list, num_sign_cnt)
vm_filled_indexes_list.append(next_index)
new_vm_name += next_index
# Update NAME value in the attributes in case there is index
attributes_dict['NAME'] = new_vm_name
new_vm_dict = create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent)
new_vm_id = new_vm_dict.get('vm_id')
new_vm = get_vm_by_id(client, new_vm_id)
new_vms_list.append(new_vm)
count -= 1
if vm_start_on_hold:
if wait:
for vm in new_vms_list:
wait_for_hold(module, client, vm, wait_timeout)
else:
if wait:
for vm in new_vms_list:
wait_for_running(module, client, vm, wait_timeout)
return True, new_vms_list, []
def create_exact_count_of_vms(module, client, template_id, exact_count, attributes_dict, count_attributes_dict,
labels_list, count_labels_list, disk_size, network_attrs_list, hard, wait, wait_timeout, vm_start_on_hold, vm_persistent):
vm_list = get_all_vms_by_attributes(client, count_attributes_dict, count_labels_list)
vm_count_diff = exact_count - len(vm_list)
changed = vm_count_diff != 0
new_vms_list = []
instances_list = []
tagged_instances_list = vm_list
if module.check_mode:
return changed, instances_list, tagged_instances_list
if vm_count_diff > 0:
# Add more VMs
changed, instances_list, tagged_instances = create_count_of_vms(module, client, template_id, vm_count_diff, attributes_dict,
labels_list, disk_size, network_attrs_list, wait, wait_timeout,
vm_start_on_hold, vm_persistent)
tagged_instances_list += instances_list
elif vm_count_diff < 0:
# Delete surplus VMs
old_vms_list = []
while vm_count_diff < 0:
old_vm = vm_list.pop(0)
old_vms_list.append(old_vm)
terminate_vm(module, client, old_vm, hard)
vm_count_diff += 1
if wait:
for vm in old_vms_list:
wait_for_done(module, client, vm, wait_timeout)
instances_list = old_vms_list
# store only the remaining instances
old_vms_set = set(old_vms_list)
tagged_instances_list = [vm for vm in vm_list if vm not in old_vms_set]
return changed, instances_list, tagged_instances_list
VM_STATES = ['INIT', 'PENDING', 'HOLD', 'ACTIVE', 'STOPPED', 'SUSPENDED', 'DONE', '', 'POWEROFF', 'UNDEPLOYED', 'CLONING', 'CLONING_FAILURE']
LCM_STATES = ['LCM_INIT', 'PROLOG', 'BOOT', 'RUNNING', 'MIGRATE', 'SAVE_STOP',
'SAVE_SUSPEND', 'SAVE_MIGRATE', 'PROLOG_MIGRATE', 'PROLOG_RESUME',
'EPILOG_STOP', 'EPILOG', 'SHUTDOWN', 'STATE13', 'STATE14', 'CLEANUP_RESUBMIT', 'UNKNOWN', 'HOTPLUG', 'SHUTDOWN_POWEROFF',
'BOOT_UNKNOWN', 'BOOT_POWEROFF', 'BOOT_SUSPENDED', 'BOOT_STOPPED', 'CLEANUP_DELETE', 'HOTPLUG_SNAPSHOT', 'HOTPLUG_NIC',
'HOTPLUG_SAVEAS', 'HOTPLUG_SAVEAS_POWEROFF', 'HOTPULG_SAVEAS_SUSPENDED', 'SHUTDOWN_UNDEPLOY']
def wait_for_state(module, client, vm, wait_timeout, state_predicate):
import time
start_time = time.time()
while (time.time() - start_time) < wait_timeout:
vm = client.vm.info(vm.ID)
state = vm.STATE
lcm_state = vm.LCM_STATE
if state_predicate(state, lcm_state):
return vm
elif state not in [VM_STATES.index('INIT'), VM_STATES.index('PENDING'), VM_STATES.index('HOLD'),
VM_STATES.index('ACTIVE'), VM_STATES.index('CLONING'), VM_STATES.index('POWEROFF')]:
module.fail_json(msg='Action is unsuccessful. VM state: ' + VM_STATES[state])
time.sleep(1)
module.fail_json(msg="Wait timeout has expired!")
def wait_for_running(module, client, vm, wait_timeout):
return wait_for_state(module, client, vm, wait_timeout, lambda state,
lcm_state: (state in [VM_STATES.index('ACTIVE')] and lcm_state in [LCM_STATES.index('RUNNING')]))
def wait_for_done(module, client, vm, wait_timeout):
return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('DONE')]))
def wait_for_hold(module, client, vm, wait_timeout):
return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('HOLD')]))
def wait_for_poweroff(module, client, vm, wait_timeout):
return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('POWEROFF')]))
def terminate_vm(module, client, vm, hard=False):
changed = False
if not vm:
return changed
changed = True
if not module.check_mode:
if hard:
client.vm.action('terminate-hard', vm.ID)
else:
client.vm.action('terminate', vm.ID)
return changed
def terminate_vms(module, client, vms, hard):
changed = False
for vm in vms:
changed = terminate_vm(module, client, vm, hard) or changed
return changed
def poweroff_vm(module, client, vm, hard):
vm = client.vm.info(vm.ID)
changed = False
lcm_state = vm.LCM_STATE
state = vm.STATE
if lcm_state not in [LCM_STATES.index('SHUTDOWN'), LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]:
changed = True
if changed and not module.check_mode:
if not hard:
client.vm.action('poweroff', vm.ID)
else:
client.vm.action('poweroff-hard', vm.ID)
return changed
def poweroff_vms(module, client, vms, hard):
changed = False
for vm in vms:
changed = poweroff_vm(module, client, vm, hard) or changed
return changed
def reboot_vms(module, client, vms, wait_timeout, hard):
if not module.check_mode:
# Firstly, power-off all instances
for vm in vms:
vm = client.vm.info(vm.ID)
lcm_state = vm.LCM_STATE
state = vm.STATE
if lcm_state not in [LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]:
poweroff_vm(module, client, vm, hard)
# Wait for all to be power-off
for vm in vms:
wait_for_poweroff(module, client, vm, wait_timeout)
for vm in vms:
resume_vm(module, client, vm)
return True
def resume_vm(module, client, vm):
vm = client.vm.info(vm.ID)
changed = False
lcm_state = vm.LCM_STATE
if lcm_state == LCM_STATES.index('SHUTDOWN_POWEROFF'):
module.fail_json(msg="Cannot perform action 'resume' because this action is not available " +
"for LCM_STATE: 'SHUTDOWN_POWEROFF'. Wait for the VM to shutdown properly")
if lcm_state not in [LCM_STATES.index('RUNNING')]:
changed = True
if changed and not module.check_mode:
client.vm.action('resume', vm.ID)
return changed
def resume_vms(module, client, vms):
changed = False
for vm in vms:
changed = resume_vm(module, client, vm) or changed
return changed
def check_name_attribute(module, attributes):
if attributes.get("NAME"):
import re
if re.match(r'^[^#]+#*$', attributes.get("NAME")) is None:
module.fail_json(msg="Ilegal 'NAME' attribute: '" + attributes.get("NAME") +
"' .Signs '#' are allowed only at the end of the name and the name cannot contain only '#'.")
TEMPLATE_RESTRICTED_ATTRIBUTES = ["CPU", "VCPU", "OS", "FEATURES", "MEMORY", "DISK", "NIC", "INPUT", "GRAPHICS",
"CONTEXT", "CREATED_BY", "CPU_COST", "DISK_COST", "MEMORY_COST",
"TEMPLATE_ID", "VMID", "AUTOMATIC_DS_REQUIREMENTS", "DEPLOY_FOLDER", "LABELS"]
def check_attributes(module, attributes):
for key in attributes.keys():
if key in TEMPLATE_RESTRICTED_ATTRIBUTES:
module.fail_json(msg='Restricted attribute `' + key + '` cannot be used when filtering VMs.')
# Check the format of the name attribute
check_name_attribute(module, attributes)
def disk_save_as(module, client, vm, disk_saveas, wait_timeout):
if not disk_saveas.get('name'):
module.fail_json(msg="Key 'name' is required for 'disk_saveas' option")
image_name = disk_saveas.get('name')
disk_id = disk_saveas.get('disk_id', 0)
if not module.check_mode:
if vm.STATE != VM_STATES.index('POWEROFF'):
module.fail_json(msg="'disksaveas' option can be used only when the VM is in 'POWEROFF' state")
try:
client.vm.disksaveas(vm.ID, disk_id, image_name, 'OS', -1)
except pyone.OneException as e:
module.fail_json(msg=str(e))
wait_for_poweroff(module, client, vm, wait_timeout) # wait for VM to leave the hotplug_saveas_poweroff state
def get_connection_info(module):
url = module.params.get('api_url')
username = module.params.get('api_username')
password = module.params.get('api_password')
if not url:
url = os.environ.get('ONE_URL')
if not username:
username = os.environ.get('ONE_USERNAME')
if not password:
password = os.environ.get('ONE_PASSWORD')
if not username:
if not password:
authfile = os.environ.get('ONE_AUTH')
if authfile is None:
authfile = os.path.join(os.environ.get("HOME"), ".one", "one_auth")
try:
with open(authfile, "r") as fp:
authstring = fp.read().rstrip()
username = authstring.split(":")[0]
password = authstring.split(":")[1]
except (OSError, IOError):
module.fail_json(msg=("Could not find or read ONE_AUTH file at '%s'" % authfile))
except Exception:
module.fail_json(msg=("Error occurs when read ONE_AUTH file at '%s'" % authfile))
if not url:
module.fail_json(msg="Opennebula API url (api_url) is not specified")
from collections import namedtuple
auth_params = namedtuple('auth', ('url', 'username', 'password'))
return auth_params(url=url, username=username, password=password)
def main():
fields = {
"api_url": {"required": False, "type": "str"},
"api_username": {"required": False, "type": "str"},
"api_password": {"required": False, "type": "str", "no_log": True},
"instance_ids": {"required": False, "aliases": ['ids'], "type": "list", "elements": "int"},
"template_name": {"required": False, "type": "str"},
"template_id": {"required": False, "type": "int"},
"vm_start_on_hold": {"default": False, "type": "bool"},
"state": {
"default": "present",
"choices": ['present', 'absent', 'rebooted', 'poweredoff', 'running'],
"type": "str"
},
"mode": {"required": False, "type": "str"},
"owner_id": {"required": False, "type": "int"},
"group_id": {"required": False, "type": "int"},
"wait": {"default": True, "type": "bool"},
"wait_timeout": {"default": 300, "type": "int"},
"hard": {"default": False, "type": "bool"},
"memory": {"required": False, "type": "str"},
"cpu": {"required": False, "type": "float"},
"vcpu": {"required": False, "type": "int"},
"disk_size": {"required": False, "type": "list", "elements": "str"},
"datastore_name": {"required": False, "type": "str"},
"datastore_id": {"required": False, "type": "int"},
"networks": {"default": [], "type": "list", "elements": "dict"},
"count": {"default": 1, "type": "int"},
"exact_count": {"required": False, "type": "int"},
"attributes": {"default": {}, "type": "dict"},
"count_attributes": {"required": False, "type": "dict"},
"labels": {"default": [], "type": "list", "elements": "str"},
"count_labels": {"required": False, "type": "list", "elements": "str"},
"disk_saveas": {"type": "dict"},
"persistent": {"default": False, "type": "bool"}
}
module = AnsibleModule(argument_spec=fields,
mutually_exclusive=[
['template_id', 'template_name', 'instance_ids'],
['template_id', 'template_name', 'disk_saveas'],
['instance_ids', 'count_attributes', 'count'],
['instance_ids', 'count_labels', 'count'],
['instance_ids', 'exact_count'],
['instance_ids', 'attributes'],
['instance_ids', 'labels'],
['disk_saveas', 'attributes'],
['disk_saveas', 'labels'],
['exact_count', 'count'],
['count', 'hard'],
['instance_ids', 'cpu'], ['instance_ids', 'vcpu'],
['instance_ids', 'memory'], ['instance_ids', 'disk_size'],
['instance_ids', 'networks'],
['persistent', 'disk_size']
],
supports_check_mode=True)
if not HAS_PYONE:
module.fail_json(msg='This module requires pyone to work!')
auth = get_connection_info(module)
params = module.params
instance_ids = params.get('instance_ids')
requested_template_name = params.get('template_name')
requested_template_id = params.get('template_id')
put_vm_on_hold = params.get('vm_start_on_hold')
state = params.get('state')
permissions = params.get('mode')
owner_id = params.get('owner_id')
group_id = params.get('group_id')
wait = params.get('wait')
wait_timeout = params.get('wait_timeout')
hard = params.get('hard')
memory = params.get('memory')
cpu = params.get('cpu')
vcpu = params.get('vcpu')
disk_size = params.get('disk_size')
requested_datastore_id = params.get('datastore_id')
requested_datastore_name = params.get('datastore_name')
networks = params.get('networks')
count = params.get('count')
exact_count = params.get('exact_count')
attributes = params.get('attributes')
count_attributes = params.get('count_attributes')
labels = params.get('labels')
count_labels = params.get('count_labels')
disk_saveas = params.get('disk_saveas')
persistent = params.get('persistent')
if not (auth.username and auth.password):
module.warn("Credentials missing")
else:
one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
if attributes:
attributes = dict((key.upper(), value) for key, value in attributes.items())
check_attributes(module, attributes)
if count_attributes:
count_attributes = dict((key.upper(), value) for key, value in count_attributes.items())
if not attributes:
import copy
module.warn('When you pass `count_attributes` without `attributes` option when deploying, `attributes` option will have same values implicitly.')
attributes = copy.copy(count_attributes)
check_attributes(module, count_attributes)
if count_labels and not labels:
module.warn('When you pass `count_labels` without `labels` option when deploying, `labels` option will have same values implicitly.')
labels = count_labels
# Fetch template
template_id = None
if requested_template_id is not None or requested_template_name:
template_id = get_template_id(module, one_client, requested_template_id, requested_template_name)
if template_id is None:
if requested_template_id is not None:
module.fail_json(msg='There is no template with template_id: ' + str(requested_template_id))
elif requested_template_name:
module.fail_json(msg="There is no template with name: " + requested_template_name)
# Fetch datastore
datastore_id = None
if requested_datastore_id or requested_datastore_name:
datastore_id = get_datastore_id(module, one_client, requested_datastore_id, requested_datastore_name)
if datastore_id is None:
if requested_datastore_id:
module.fail_json(msg='There is no datastore with datastore_id: ' + str(requested_datastore_id))
elif requested_datastore_name:
module.fail_json(msg="There is no datastore with name: " + requested_datastore_name)
else:
attributes['SCHED_DS_REQUIREMENTS'] = 'ID=' + str(datastore_id)
if exact_count and template_id is None:
module.fail_json(msg='Option `exact_count` needs template_id or template_name')
if exact_count is not None and not (count_attributes or count_labels):
module.fail_json(msg='Either `count_attributes` or `count_labels` has to be specified with option `exact_count`.')
if (count_attributes or count_labels) and exact_count is None:
module.fail_json(msg='Option `exact_count` has to be specified when either `count_attributes` or `count_labels` is used.')
if template_id is not None and state != 'present':
module.fail_json(msg="Only state 'present' is valid for the template")
if memory:
attributes['MEMORY'] = str(int(get_size_in_MB(module, memory)))
if cpu:
attributes['CPU'] = str(cpu)
if vcpu:
attributes['VCPU'] = str(vcpu)
if exact_count is not None and state != 'present':
module.fail_json(msg='The `exact_count` option is valid only for the `present` state')
if exact_count is not None and exact_count < 0:
module.fail_json(msg='`exact_count` cannot be less than 0')
if count <= 0:
module.fail_json(msg='`count` has to be greater than 0')
if permissions is not None:
import re
if re.match("^[0-7]{3}$", permissions) is None:
module.fail_json(msg="Option `mode` has to have exactly 3 digits and be in the octet format e.g. 600")
if exact_count is not None:
# Deploy an exact count of VMs
changed, instances_list, tagged_instances_list = create_exact_count_of_vms(module, one_client, template_id, exact_count, attributes,
count_attributes, labels, count_labels, disk_size,
networks, hard, wait, wait_timeout, put_vm_on_hold, persistent)
vms = tagged_instances_list
elif template_id is not None and state == 'present':
# Deploy count VMs
changed, instances_list, tagged_instances_list = create_count_of_vms(module, one_client, template_id, count,
attributes, labels, disk_size, networks, wait, wait_timeout,
put_vm_on_hold, persistent)
# instances_list - new instances
# tagged_instances_list - all instances with specified `count_attributes` and `count_labels`
vms = instances_list
else:
# Fetch data of instances, or change their state
if not (instance_ids or attributes or labels):
module.fail_json(msg="At least one of `instance_ids`,`attributes`,`labels` must be passed!")
if memory or cpu or vcpu or disk_size or networks:
module.fail_json(msg="Parameters as `memory`, `cpu`, `vcpu`, `disk_size` and `networks` you can only set when deploying a VM!")
if hard and state not in ['rebooted', 'poweredoff', 'absent', 'present']:
module.fail_json(msg="The 'hard' option can be used only for one of these states: 'rebooted', 'poweredoff', 'absent' and 'present'")
vms = []
tagged = False
changed = False
if instance_ids:
vms = get_vms_by_ids(module, one_client, state, instance_ids)
else:
tagged = True
vms = get_all_vms_by_attributes(one_client, attributes, labels)
if len(vms) == 0 and state != 'absent' and state != 'present':
module.fail_json(msg='There are no instances with specified `instance_ids`, `attributes` and/or `labels`')
if len(vms) == 0 and state == 'present' and not tagged:
module.fail_json(msg='There are no instances with specified `instance_ids`.')
if tagged and state == 'absent':
module.fail_json(msg='Option `instance_ids` is required when state is `absent`.')
if state == 'absent':
changed = terminate_vms(module, one_client, vms, hard)
elif state == 'rebooted':
changed = reboot_vms(module, one_client, vms, wait_timeout, hard)
elif state == 'poweredoff':
changed = poweroff_vms(module, one_client, vms, hard)
elif state == 'running':
changed = resume_vms(module, one_client, vms)
instances_list = vms
tagged_instances_list = []
if permissions is not None:
changed = set_vm_permissions(module, one_client, vms, permissions) or changed
if owner_id is not None or group_id is not None:
changed = set_vm_ownership(module, one_client, vms, owner_id, group_id) or changed
if wait and not module.check_mode and state != 'present':
wait_for = {
'absent': wait_for_done,
'rebooted': wait_for_running,
'poweredoff': wait_for_poweroff,
'running': wait_for_running
}
for vm in vms:
if vm is not None:
wait_for[state](module, one_client, vm, wait_timeout)
if disk_saveas is not None:
if len(vms) == 0:
module.fail_json(msg="There is no VM whose disk will be saved.")
disk_save_as(module, one_client, vms[0], disk_saveas, wait_timeout)
changed = True
# instances - a list of instances info whose state is changed or which are fetched with C(instance_ids) option
instances = list(get_vm_info(one_client, vm) for vm in instances_list if vm is not None)
instances_ids = list(vm.ID for vm in instances_list if vm is not None)
# tagged_instances - A list of instances info based on a specific attributes and/or labels that are specified with C(count_attributes) and C(count_labels)
tagged_instances = list(get_vm_info(one_client, vm) for vm in tagged_instances_list if vm is not None)
result = {'changed': changed, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': tagged_instances}
module.exit_json(**result)
if __name__ == '__main__':
main()
| 35.501859
| 158
| 0.610401
|
f1e1aec6684082d56cbe4f0de3a166b3b290cfb6
| 547
|
py
|
Python
|
tests/test_translate.py
|
wasdee/pythainlp
|
9e97321aebc104cb260f801e3b983c937f31ae01
|
[
"Apache-2.0"
] | 1
|
2021-01-13T17:59:55.000Z
|
2021-01-13T17:59:55.000Z
|
tests/test_translate.py
|
prrssr/pythainlp
|
19ff3510a73dd93515fcc1b4485326a8b7172026
|
[
"Apache-2.0"
] | null | null | null |
tests/test_translate.py
|
prrssr/pythainlp
|
19ff3510a73dd93515fcc1b4485326a8b7172026
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import unittest
from pythainlp.translate import EnThTranslator, ThEnTranslator
class TestTranslatePackage(unittest.TestCase):
def test_translate(self):
self.th_en_translator = ThEnTranslator()
self.assertIsNotNone(
self.th_en_translator.translate(
"แมวกินปลา",
)
)
self.en_th_translator = EnThTranslator()
self.assertIsNotNone(
self.en_th_translator.translate(
"the cat eats fish.",
)
)
| 24.863636
| 62
| 0.599634
|
569879f9fe2ea4ec1eb1e5ca87ad697765effedc
| 8,825
|
py
|
Python
|
BatchExport.py
|
mitchelldmccollum/Blender2.8BatchExport
|
09d99833d1c436e3e004966080ed31117cca039a
|
[
"MIT"
] | null | null | null |
BatchExport.py
|
mitchelldmccollum/Blender2.8BatchExport
|
09d99833d1c436e3e004966080ed31117cca039a
|
[
"MIT"
] | null | null | null |
BatchExport.py
|
mitchelldmccollum/Blender2.8BatchExport
|
09d99833d1c436e3e004966080ed31117cca039a
|
[
"MIT"
] | 1
|
2019-09-05T05:37:20.000Z
|
2019-09-05T05:37:20.000Z
|
bl_info = {
"name": "Batch Exporter",
"description": "",
"author": "Mitch McCollum",
"version": (1, 8, 17),
"blender": (2, 80, 0),
"location": "3D View > Tools",
"warning": "", # used for warning icon and text in addons panel
"wiki_url": "",
"tracker_url": "",
"category": "Development"
}
import bpy
import os
import platform
import subprocess
from bpy.props import (StringProperty,
BoolProperty,
IntProperty,
FloatProperty,
FloatVectorProperty,
EnumProperty,
PointerProperty,
)
from bpy.types import (Panel,
Menu,
Operator,
PropertyGroup,
)
# ------------------------------------------------------------------------
# Scene Properties
# ------------------------------------------------------------------------
class Utilities():
FilePath = ""
def make_path_absolute(self,key):
#""" Prevent Blender's relative paths of doom """
# This can be a collection property or addon preferences
props = bpy.context.scene.my_tool
sane_path = lambda p: os.path.abspath(bpy.path.abspath(p))
if key in props and props[key].startswith('//'):
props[key] = sane_path(props[key])
u = Utilities()
class MyProperties(PropertyGroup):
batchRenameBool: BoolProperty(
name="Batch Rename",
description="Batch Rename",
default = False
)
batchApplyBool: BoolProperty(
name="Apply Transform",
description="Apply Position",
default = False
)
BulkRename: StringProperty(
name="Name",
description=":",
default="",
maxlen=1024,
)
FilePath: StringProperty(
name="File Path",
description=":",
default="",
maxlen=1024,
subtype='DIR_PATH',
update = lambda s,c: u.make_path_absolute('FilePath'),
)
my_enum: EnumProperty(
name="FileType:",
description="What File Format",
items=[ ('F', "FBX", ""),
('O', "OBJ", ""),
]
)
Engine: EnumProperty(
name="Engine",
description="What Engine",
items=[ ('None', "None", ""),
('Unity', "Unity", ""),
('Unreal', "Unreal", ""),
]
)
# ------------------------------------------------------------------------
# Operators
# ------------------------------------------------------------------------
class WM_OT_BatchExport(Operator):
bl_idname = "wm.batch_export"
bl_label = "Batch Export"
def execute(self,context):
#store selection
objs = bpy.context.selected_objects
bpy.ops.object.select_all(action='DESELECT')
scene = context.scene
mytool = scene.my_tool
global Filepath
index = 0
for ob in objs:
index += 1
if mytool.batchRenameBool == True:
ob.name = mytool.BulkRename + str(index)
ob.select_set(state=True)
bpy.context.view_layer.objects.active = ob
#store object location then zero it out
location = ob.location.copy()
if mytool.batchApplyBool == True:
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)
if mytool.Engine == "Unity":
FixRotationForUnity3D()
bpy.ops.object.location_clear()
bpy.ops.object.select_grouped(type='CHILDREN_RECURSIVE')
#export fbx
if mytool.FilePath != "":
if mytool.my_enum == "F":
u.FilePath = mytool.FilePath + ob.name + '.fbx'
else:
u.FilePath = mytool.FilePath + ob.name + '.obj'
else:
if mytool.my_enum == "F":
u.FilePath = bpy.path.abspath("//") + ob.name + '.fbx'
else:
u.FilePath = mytool.FilePath + ob.name + '.obj'
print("Wrote to: " + u.FilePath)
if mytool.my_enum == 'F':
if mytool.Engine == "Unreal":
bpy.ops.export_scene.fbx(filepath=u.FilePath, use_selection=True, global_scale = 100)
bpy.ops.export_scene.fbx(filepath=u.FilePath, use_selection=True)
else:
bpy.ops.export_scene.obj(filepath=u.FilePath, use_selection=True)
#restore location
ob.location = location
##FBX
#reselect originally selected objects
for ob in objs:
ob.select_set(state=True)
return { 'FINISHED' }
def FixRotationForUnity3D(self):
bpy.ops.object.transform_apply(rotation = True)
bpy.ops.transform.rotate(value = -1.5708, axis = (1, 0, 0), constraint_axis = (True, False, False), constraint_orientation = 'GLOBAL')
bpy.ops.transform.rotate(value = -3.1416, axis = (0, 1, 0), constraint_axis = (False, True, False), constraint_orientation = 'GLOBAL')
bpy.ops.object.transform_apply(rotation = True)
bpy.ops.transform.rotate(value = 1.5708, axis = (1, 0, 0), constraint_axis = (True, False, False), constraint_orientation = 'GLOBAL')
bpy.ops.transform.rotate(value = 3.1416, axis = (0, 0, 1), constraint_axis = (False, False, True), constraint_orientation = 'GLOBAL')
class WM_OT_OpenFileLocation(Operator):
bl_idname = "wm.open_file_location"
bl_label = "Open File Location"
def execute(self,context):
global Filepath
if u.FilePath == "":
u.FilePath = bpy.path.abspath("//")
if platform.system() == "Windows":
os.startfile(u.FilePath)
elif platform.system() == "Darwin":
subprocess.Popen(["open", FilePath])
else:
subprocess.Popen(["xdg-open", FilePath])
return { 'FINISHED' }
# ------------------------------------------------------------------------
# Menus
# ------------------------------------------------------------------------
class OBJECT_MT_CustomMenu(bpy.types.Menu):
bl_idname = "object.custom_menu"
bl_label = "Select"
def draw(self, context):
layout = self.layout
# Built-in operators
layout.operator("object.select_all", text="Select/Deselect All").action = 'TOGGLE'
layout.operator("object.select_all", text="Inverse").action = 'INVERT'
layout.operator("object.select_random", text="Random")
# ------------------------------------------------------------------------
# Panel in Object Mode
# ------------------------------------------------------------------------
class OBJECT_PT_CustomPanel(Panel):
bl_idname = "object.custom_panel"
bl_label = "Batch Exporter"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "BatchExport"
bl_context = "objectmode"
@classmethod
def poll(self,context):
return context.object is not None
def draw(self, context):
layout = self.layout
scene = context.scene
mytool = scene.my_tool
layout.label(text="Batch Rename:")
layout.prop(mytool, "batchRenameBool")
layout.prop(mytool, "BulkRename")
layout.label(text=" ")
layout.label(text="Export Options:")
layout.prop(mytool, "batchApplyBool")
layout.prop(mytool, "Engine")
layout.label(text=" ")
layout.label(text="Export Location:")
layout.prop(mytool, "FilePath")
layout.prop(mytool, "my_enum")
layout.operator("wm.batch_export")
layout.operator("wm.open_file_location")
#layout.menu(OBJECT_MT_CustomMenu.bl_idname, text="Presets", icon="SCENE")
layout.separator()
# ------------------------------------------------------------------------
# Registration
# ------------------------------------------------------------------------
classes = (
MyProperties,
WM_OT_BatchExport,
OBJECT_MT_CustomMenu,
OBJECT_PT_CustomPanel,
WM_OT_OpenFileLocation
)
def register():
from bpy.utils import register_class
for cls in classes:
register_class(cls)
bpy.types.Scene.my_tool = PointerProperty(type=MyProperties)
def unregister():
from bpy.utils import unregister_class
for cls in reversed(classes):
unregister_class(cls)
del bpy.types.Scene.my_tool
if __name__ == "__main__":
register()
| 31.630824
| 142
| 0.511955
|
1e0a1b7b1609a4ab97a16fcbe91e0c04cfcf3972
| 7,496
|
py
|
Python
|
tods/tests/common/test_train_score_split.py
|
ZhuangweiKang/tods
|
fe3f55f8ccb306dd292c668e0f1154f1afdfa556
|
[
"Apache-2.0"
] | 544
|
2020-09-21T06:02:33.000Z
|
2022-03-27T07:16:32.000Z
|
tods/tests/common/test_train_score_split.py
|
ZhuangweiKang/tods
|
fe3f55f8ccb306dd292c668e0f1154f1afdfa556
|
[
"Apache-2.0"
] | 35
|
2020-09-21T06:33:13.000Z
|
2022-03-11T14:20:21.000Z
|
tods/tests/common/test_train_score_split.py
|
ZhuangweiKang/tods
|
fe3f55f8ccb306dd292c668e0f1154f1afdfa556
|
[
"Apache-2.0"
] | 86
|
2020-09-21T16:44:33.000Z
|
2022-03-11T18:20:22.000Z
|
import os
import pickle
import unittest
from d3m import container
from d3m.metadata import base as metadata_base
from tods.common import TrainScoreSplit
class TrainScoreDatasetSplitPrimitiveTestCase(unittest.TestCase):
def _get_yahoo_dataset(self):
dataset_doc_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..', '..', '..', 'datasets', 'anomaly','yahoo_sub_5','TRAIN','dataset_TRAIN', 'datasetDoc.json'))
dataset = container.Dataset.load('file://{dataset_doc_path}'.format(dataset_doc_path=dataset_doc_path))
return dataset
def test_produce_train(self):
dataset = self._get_yahoo_dataset()
# We set semantic types like runtime would.
dataset.metadata = dataset.metadata.add_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 0), 'https://metadata.datadrivendiscovery.org/types/Index')
dataset.metadata = dataset.metadata.add_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 7), 'https://metadata.datadrivendiscovery.org/types/TrueTarget')
dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 2), 'https://metadata.datadrivendiscovery.org/types/Attribute')
dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 3), 'https://metadata.datadrivendiscovery.org/types/Attribute')
dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 4), 'https://metadata.datadrivendiscovery.org/types/Attribute')
dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 5), 'https://metadata.datadrivendiscovery.org/types/Attribute')
dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 6), 'https://metadata.datadrivendiscovery.org/types/Attribute')
dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 7), 'https://metadata.datadrivendiscovery.org/types/Attribute')
hyperparams_class = TrainScoreSplit.TrainScoreDatasetSplitPrimitive.metadata.get_hyperparams()
primitive = TrainScoreSplit.TrainScoreDatasetSplitPrimitive(hyperparams=hyperparams_class.defaults().replace({
'shuffle': True,
}))
primitive.set_training_data(dataset=dataset)
primitive.fit()
# To test that pickling works.
pickle.dumps(primitive)
results = primitive.produce(inputs=container.List([0], generate_metadata=True)).value
self.assertEqual(len(results), 1)
for dataset in results:
self.assertEqual(len(dataset), 1)
self.assertEqual(results[0]['learningData'].shape[0], 945)
column_names = ['d3mIndex', 'timestamp', 'value_0', 'value_1', 'value_2', 'value_3', 'value_4','ground_truth']
for i in range(8):
self.assertEqual(results.metadata.query((0, 'learningData', metadata_base.ALL_ELEMENTS, i))['name'],
column_names[i])
self.assertEqual(results.metadata.query((0, 'learningData', metadata_base.ALL_ELEMENTS, 0))['semantic_types'], (
'http://schema.org/Integer', 'https://metadata.datadrivendiscovery.org/types/PrimaryKey', 'https://metadata.datadrivendiscovery.org/types/Index'
))
for i in range(2, 6):
self.assertEqual(
results.metadata.query((0, 'learningData', metadata_base.ALL_ELEMENTS, i))['semantic_types'], ('http://schema.org/Float',)
)
self.assertEqual(results.metadata.query((0, 'learningData', metadata_base.ALL_ELEMENTS, 7))['semantic_types'],(
'http://schema.org/Integer',
'https://metadata.datadrivendiscovery.org/types/SuggestedTarget',
'https://metadata.datadrivendiscovery.org/types/TrueTarget',
))
def test_produce_score(self):
dataset = self._get_yahoo_dataset()
# We set semantic types like runtime would.
dataset.metadata = dataset.metadata.add_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 0), 'https://metadata.datadrivendiscovery.org/types/Index')
dataset.metadata = dataset.metadata.add_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 7), 'https://metadata.datadrivendiscovery.org/types/TrueTarget')
dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 2), 'https://metadata.datadrivendiscovery.org/types/Attribute')
dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 3), 'https://metadata.datadrivendiscovery.org/types/Attribute')
dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 4), 'https://metadata.datadrivendiscovery.org/types/Attribute')
dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 5), 'https://metadata.datadrivendiscovery.org/types/Attribute')
dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 6), 'https://metadata.datadrivendiscovery.org/types/Attribute')
dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 7), 'https://metadata.datadrivendiscovery.org/types/Attribute')
hyperparams_class = TrainScoreSplit.TrainScoreDatasetSplitPrimitive.metadata.get_hyperparams()
primitive = TrainScoreSplit.TrainScoreDatasetSplitPrimitive(hyperparams=hyperparams_class.defaults().replace({
'shuffle': True,
}))
primitive.set_training_data(dataset=dataset)
primitive.fit()
results = primitive.produce_score_data(inputs=container.List([0], generate_metadata=True)).value
self.assertEqual(len(results), 1)
for dataset in results:
self.assertEqual(len(dataset), 1)
self.assertEqual(results[0]['learningData'].shape[0], 315)
#TODO check data type
self.assertEqual(results.metadata.query((0, 'learningData'))['dimension']['length'], 315)
column_names = ['d3mIndex', 'timestamp', 'value_0', 'value_1', 'value_2', 'value_3', 'value_4','ground_truth']
for i in range(8):
self.assertEqual(results.metadata.query((0, 'learningData', metadata_base.ALL_ELEMENTS, i))['name'],
column_names[i])
self.assertEqual(results.metadata.query((0, 'learningData', metadata_base.ALL_ELEMENTS, 0))['semantic_types'], (
'http://schema.org/Integer', 'https://metadata.datadrivendiscovery.org/types/PrimaryKey', 'https://metadata.datadrivendiscovery.org/types/Index'
))
for i in range(2, 6):
self.assertEqual(
results.metadata.query((0, 'learningData', metadata_base.ALL_ELEMENTS, i))['semantic_types'], ('http://schema.org/Float',)
)
print(results.metadata.query((0, 'learningData', metadata_base.ALL_ELEMENTS, 7))['semantic_types'])
self.assertEqual(results.metadata.query((0, 'learningData', metadata_base.ALL_ELEMENTS, 7))['semantic_types'], (
'http://schema.org/Integer',
'https://metadata.datadrivendiscovery.org/types/SuggestedTarget',
'https://metadata.datadrivendiscovery.org/types/TrueTarget',
))
if __name__ == '__main__':
unittest.main()
| 59.023622
| 180
| 0.709312
|
ab299fe5ed93c0ee5bebc3da5c18f17c3b452bea
| 14,991
|
py
|
Python
|
Experiments/BostonHousing/utils.py
|
Neronjust2017/Bayesian-neural-networks
|
9d7f781f5c2dfa8fadf26300b4b5b64366c939cd
|
[
"MIT"
] | 4
|
2020-07-07T12:29:03.000Z
|
2021-11-17T07:20:17.000Z
|
Experiments/BostonHousing/utils.py
|
Neronjust2017/Bayesian-neural-networks
|
9d7f781f5c2dfa8fadf26300b4b5b64366c939cd
|
[
"MIT"
] | null | null | null |
Experiments/BostonHousing/utils.py
|
Neronjust2017/Bayesian-neural-networks
|
9d7f781f5c2dfa8fadf26300b4b5b64366c939cd
|
[
"MIT"
] | 1
|
2020-05-29T08:07:48.000Z
|
2020-05-29T08:07:48.000Z
|
from sklearn.datasets import load_boston
from pandas import Series,DataFrame
import torch
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def _get_index_train_test_path(split_num, train = True):
_DATA_DIRECTORY_PATH = './data/'
"""
Method to generate the path containing the training/test split for the given
split number (generally from 1 to 20).
@param split_num Split number for which the data has to be generated
@param train Is true if the data is training data. Else false.
@return path Path of the file containing the requried data
"""
if train:
return _DATA_DIRECTORY_PATH + "index_train_" + str(split_num) + ".txt"
else:
return _DATA_DIRECTORY_PATH + "index_test_" + str(split_num) + ".txt"
def load_data():
boston = load_boston()
X = boston.data
Y = boston.target
return X, Y
def get_data_splited(split, X, Y):
# We load the indexes of the training and test sets
print('Loading file: ' + _get_index_train_test_path(split, train=True))
print('Loading file: ' + _get_index_train_test_path(split, train=False))
index_train = np.loadtxt(_get_index_train_test_path(split, train=True))
index_test = np.loadtxt(_get_index_train_test_path(split, train=False))
X_train = X[[int(i) for i in index_train.tolist()]]
y_train = Y[[int(i) for i in index_train.tolist()]]
X_test = X[[int(i) for i in index_test.tolist()]]
y_test = Y[[int(i) for i in index_test.tolist()]]
y_train = y_train.reshape([y_train.shape[0], 1])
y_test = y_test.reshape([y_test.shape[0], 1])
num_training_examples = int(0.8 * X_train.shape[0])
X_val = X_train[num_training_examples:, :]
y_val = y_train[num_training_examples:, :]
X_train = X_train[0:num_training_examples, :]
y_train = y_train[0:num_training_examples, :]
x_means, x_stds = X_train.mean(axis=0), X_train.var(axis=0) ** 0.5
y_means, y_stds = y_train.mean(axis=0), y_train.var(axis=0) ** 0.5
X_train = (X_train - x_means) / x_stds
y_train = (y_train - y_means) / y_stds
X_val = (X_val - x_means) / x_stds
y_val = (y_val - y_means) / y_stds
X_test = (X_test - x_means) / x_stds
y_test = (y_test - y_means) / y_stds
return X_train, y_train, X_val, y_val, X_test, y_test, y_stds
def get_dataset(X_train, y_train, X_val, y_val, X_test, y_test):
x_train = torch.from_numpy(X_train).float()
y_train = torch.from_numpy(y_train).float()
print(x_train.size(), y_train.size())
trainset = torch.utils.data.TensorDataset(x_train, y_train)
x_val = torch.from_numpy(X_val).float()
y_val = torch.from_numpy(y_val).float()
print(x_val.size(), y_val.size())
valset = torch.utils.data.TensorDataset(x_val, y_val)
x_test = torch.from_numpy(X_test).float()
y_test = torch.from_numpy(y_test).float()
print(x_test.size(), y_test.size())
testset = torch.utils.data.TensorDataset(x_test, y_test)
return trainset, valset, testset
def get_dataloader(trainset, valset, testset, use_cuda, batch_size, worker=True):
if worker:
if use_cuda:
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=True, pin_memory=True,
num_workers=3)
valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size,
shuffle=False, pin_memory=True,
num_workers=3)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False, pin_memory=True,
num_workers=3)
else:
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=True, pin_memory=False,
num_workers=3)
valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size,
shuffle=False, pin_memory=False,
num_workers=3)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False, pin_memory=False,
num_workers=3)
else:
if use_cuda:
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=True)
valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size,
shuffle=False)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False)
else:
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=True)
valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size,
shuffle=False)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False)
return trainloader, valloader, testloader
def get_dataloader_sample(trainset, valset, testset, use_cuda, batch_size, sampler, worker=True):
if worker:
if use_cuda:
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=False, pin_memory=True,
num_workers=3, sampler=sampler)
valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size,
shuffle=False, pin_memory=True,
num_workers=3)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False, pin_memory=True,
num_workers=3)
else:
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=False, pin_memory=False,
num_workers=3, sampler=sampler)
valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size,
shuffle=False, pin_memory=False,
num_workers=3)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False, pin_memory=False,
num_workers=3)
else:
if use_cuda:
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=False, sampler=sampler)
valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size,
shuffle=False)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False)
else:
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=False, sampler=sampler)
valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size,
shuffle=False)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False)
return trainloader, valloader, testloader
def store_results(file, results):
with open(file, "a") as myfile:
for str in results:
myfile.write(str)
def plot_pred_cost(pred_cost_train, nb_epochs, nb_its_dev, cost_dev, results_dir_split):
textsize = 15
marker = 5
plt.figure(dpi=100)
fig, ax1 = plt.subplots()
ax1.plot(pred_cost_train, 'r--')
ax1.plot(range(0, nb_epochs, nb_its_dev), cost_dev[::nb_its_dev], 'b-')
ax1.set_ylabel('MSE loss')
plt.xlabel('epoch')
plt.grid(b=True, which='major', color='k', linestyle='-')
plt.grid(b=True, which='minor', color='k', linestyle='--')
lgd = plt.legend(['train cost', 'val cost'], markerscale=marker, prop={'size': textsize, 'weight': 'normal'})
ax = plt.gca()
plt.title('Regression costs')
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(textsize)
item.set_weight('normal')
plt.savefig(results_dir_split + '/pred_cost.png', bbox_extra_artists=(lgd,), bbox_inches='tight')
def plot_kl_cost(kl_cost_train, results_dir_split):
textsize = 15
marker = 5
plt.figure()
fig, ax1 = plt.subplots()
ax1.plot(kl_cost_train, 'r')
ax1.set_ylabel('nats?')
plt.xlabel('epoch')
plt.grid(b=True, which='major', color='k', linestyle='-')
plt.grid(b=True, which='minor', color='k', linestyle='--')
lgd = plt.legend(['train cost', 'val cost'], markerscale=marker, prop={'size': textsize, 'weight': 'normal'})
ax = plt.gca()
plt.title('DKL (per sample)')
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(textsize)
item.set_weight('normal')
plt.savefig(results_dir_split + '/KL_cost.png', bbox_extra_artists=(lgd,), bbox_inches='tight')
def plot_rmse(nb_epochs, nb_its_dev, rmse_train, rmse_dev, results_dir_split):
textsize = 15
marker = 5
plt.figure(dpi=100)
fig2, ax2 = plt.subplots()
ax2.set_ylabel('% rmse')
ax2.semilogy(range(0, nb_epochs, nb_its_dev), 100 * rmse_dev[::nb_its_dev], 'b-')
if rmse_train is not None:
ax2.semilogy(100 * rmse_train, 'r--')
plt.xlabel('epoch')
plt.grid(b=True, which='major', color='k', linestyle='-')
plt.grid(b=True, which='minor', color='k', linestyle='--')
ax2.get_yaxis().set_minor_formatter(matplotlib.ticker.ScalarFormatter())
ax2.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
lgd = plt.legend(['val rmse', 'train rmse'], markerscale=marker, prop={'size': textsize, 'weight': 'normal'})
ax = plt.gca()
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(textsize)
item.set_weight('normal')
plt.savefig(results_dir_split + '/rmse.png', bbox_extra_artists=(lgd,), box_inches='tight')
def plot_uncertainty_noise(means, noise, total_unc, y_test, results_dir_split):
textsize = 15
marker = 5
means = means.reshape((means.shape[0],))
noise = noise.reshape((noise.shape[0],))
total_unc_1 = total_unc[0].reshape((total_unc[0].shape[0],))
total_unc_2 = total_unc[1].reshape((total_unc[1].shape[0],))
total_unc_3 = total_unc[2].reshape((total_unc[2].shape[0],))
c = ['#1f77b4', '#ff7f0e']
ind = np.arange(0, len(y_test))
plt.figure()
fig, ax1 = plt.subplots()
plt.scatter(ind, y_test, color='black', alpha=0.5)
ax1.plot(ind, means, 'r')
plt.fill_between(ind, means - total_unc_3, means + total_unc_3,
alpha=0.25, label='99.7% Confidence')
plt.fill_between(ind, means - total_unc_2, means + total_unc_2,
alpha=0.25, label='95% Confidence')
plt.fill_between(ind, means - total_unc_1, means + total_unc_1,
alpha=0.25, label='68% Confidence')
plt.fill_between(ind, means - noise, means + noise,
alpha=0.25, label='Noise')
ax1.set_ylabel('prediction')
plt.xlabel('test points')
plt.grid(b=True, which='major', color='k', linestyle='-')
plt.grid(b=True, which='minor', color='k', linestyle='--')
lgd = plt.legend(['prediction mean'], markerscale=marker, prop={'size': textsize, 'weight': 'normal'})
ax = plt.gca()
plt.title('Uncertainty')
plt.savefig(results_dir_split + '/uncertainty.png', bbox_extra_artists=(lgd,), bbox_inches='tight')
def plot_uncertainty(means, stds, y_test, results_dir_split):
textsize = 15
marker = 5
means = means.reshape((means.shape[0],))
stds = stds.reshape((stds.shape[0],))
c = ['#1f77b4', '#ff7f0e']
ind = np.arange(0, len(y_test))
plt.figure()
fig, ax1 = plt.subplots()
plt.scatter(ind, y_test, color='black', alpha=0.5)
ax1.plot(ind, means, 'r')
plt.fill_between(ind, means - 3 * stds, means + 3 * stds,
alpha=0.25, label='99.7% Confidence')
plt.fill_between(ind, means - 2 * stds, means + 2 * stds,
alpha=0.25, label='95% Confidence')
plt.fill_between(ind, means - stds, means + stds,
alpha=0.25, label='68% Confidence')
ax1.set_ylabel('prediction')
plt.xlabel('test points')
plt.grid(b=True, which='major', color='k', linestyle='-')
plt.grid(b=True, which='minor', color='k', linestyle='--')
lgd = plt.legend(['prediction mean'], markerscale=marker, prop={'size': textsize, 'weight': 'normal'})
ax = plt.gca()
plt.title('Uncertainty')
plt.savefig(results_dir_split + '/uncertainty.png', bbox_extra_artists=(lgd,), bbox_inches='tight')
def store_all_results(results, path):
results_order_rmse = sorted(results.items(), key=lambda x: x[1][0], reverse=False)
for i in range(len(results_order_rmse)):
with open(path+'/results_rmse.txt', 'a') as f:
f.write(str(results_order_rmse[i][0]) + ' RMSE: %f +- %f (stddev) +- %f (std error) PICP %f MPIW %f'
% (results_order_rmse[i][1][0], results_order_rmse[i][1][1], results_order_rmse[i][1][2], results_order_rmse[i][1][3],results_order_rmse[i][1][4]))
f.write('\n')
results_order_picp = sorted(results.items(), key=lambda x: x[1][3], reverse=True)
for i in range(len(results_order_picp)):
with open(path+'/results_picp.txt', 'a') as f:
f.write(str(results_order_picp[i][0]) + ' RMSE: %f +- %f (stddev) +- %f (std error) PICP %f MPIW %f'
% (results_order_picp[i][1][0], results_order_picp[i][1][1], results_order_picp[i][1][2], results_order_picp[i][1][3],results_order_picp[i][1][4]))
f.write('\n')
| 47.439873
| 167
| 0.581416
|
62bbde4bcad67ddadc2ae4832cf4d041bb113f87
| 2,469
|
py
|
Python
|
vtam/utils/SequenceClusterer.py
|
RaphaelHebert/vtam
|
6cbc7e241f9aa4245f5fd000769b9765333d41c2
|
[
"MIT"
] | null | null | null |
vtam/utils/SequenceClusterer.py
|
RaphaelHebert/vtam
|
6cbc7e241f9aa4245f5fd000769b9765333d41c2
|
[
"MIT"
] | null | null | null |
vtam/utils/SequenceClusterer.py
|
RaphaelHebert/vtam
|
6cbc7e241f9aa4245f5fd000769b9765333d41c2
|
[
"MIT"
] | null | null | null |
import os
import shlex
import subprocess
import sys
import pandas
from vtam.utils.PathManager import PathManager
class SequenceClusterer(object):
def __init__(self, variant_info_df, cluster_identity):
"""Takes as input df with at least these columns: variant_id, read_cout, sequence"""
self.variant_info_df = variant_info_df
self.cluster_identity = cluster_identity
def compute_clusters(self):
tempcluster_dir = PathManager.instance().get_tempdir()
i_fas = os.path.join(tempcluster_dir, 'cluster_input.fas')
with open(i_fas, 'w') as fout:
for idx, row in self.variant_info_df.iterrows():
valdict = {}
valdict['variant_id'] = row.variant_id
valdict['read_count'] = row.read_count
valdict['sequence'] = row.sequence
fout.write(
">{variant_id};size={read_count}\n{sequence}\n".format(
**valdict))
cmd = "vsearch --cluster_size cluster_input.fas --id {} --otutabout otutabout.txt --clusters test".format(self.cluster_identity)
if sys.platform.startswith("win"):
args = cmd
else:
args = shlex.split(cmd)
subprocess.run(args=args, cwd=tempcluster_dir)
otutabout_path = os.path.join(tempcluster_dir, "otutabout.txt")
otutabout_df = pandas.read_csv(otutabout_path, sep="\t")
otutabout_df.rename({'#OTU ID': 'centroid'}, axis=1, inplace=True)
otutabout_long_df = pandas.melt(otutabout_df, id_vars=['centroid'],
var_name='variant_id',
value_name='read_count')
otutabout_long_df.rename({'centroid': 'clusterid'}, axis=1,
inplace=True)
otutabout_long_df = otutabout_long_df.loc[
otutabout_long_df.read_count > 0]
otutabout_long_df.variant_id = otutabout_long_df.variant_id.astype(
'int')
cluster_count_df = otutabout_long_df[
['clusterid', 'variant_id']].groupby('clusterid').count()
cluster_count_df.rename({'variant_id': 'clustersize'}, axis=1,
inplace=True)
cluster_count_df = otutabout_long_df[
['clusterid', 'variant_id']].merge(cluster_count_df,
on='clusterid')
return cluster_count_df
| 39.190476
| 136
| 0.597003
|
e6c868a0ae2a61a7e54e88d1e73daf3803c1d68a
| 1,589
|
py
|
Python
|
redisbox/unittest.py
|
ziadsawalha/redisbox
|
5dda344a67e2f9e5fba66f1150ec4a8556e22262
|
[
"Apache-2.0"
] | 1
|
2016-12-28T09:47:16.000Z
|
2016-12-28T09:47:16.000Z
|
redisbox/unittest.py
|
ziadsawalha/redisbox
|
5dda344a67e2f9e5fba66f1150ec4a8556e22262
|
[
"Apache-2.0"
] | null | null | null |
redisbox/unittest.py
|
ziadsawalha/redisbox
|
5dda344a67e2f9e5fba66f1150ec4a8556e22262
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from unittest import TestCase
import os
try:
import redis
except ImportError:
raise ImportError('`redis` is required for RedisTestCase')
class RedisTestCase(TestCase):
"""A base for Redis DB driven test cases. Provides
:class:`redis.StrictRedis` instance in :attribute:`redis_client`
and has a :method:`flushall` helper method for database cleanup.
It is expected that tests are run from `nose` with `--with-redisbox` flag
that brings up a sandboxed instance of Redis.
"""
__redis_client = None
@property
def redis_client(self):
"""Returns an instance of :class:`redis.StrictRedis` connected
to RedisBox database instance.
"""
if not self.__redis_client:
try:
port = int(os.getenv('REDISBOX_PORT'))
self.__redis_client = redis.StrictRedis(port=port)
except (TypeError, redis.ConnectionError):
raise RuntimeError(
'Seems that RedisBox is not running. ' +
'Do you run nosetests with --with-redisbox flag?')
return self.__redis_client
def flushall(self, drop=True):
"""Delete all data.
A typical use is call this method in :func:`unittest.TestCase.tearDown`
to have a clean database for every test case method.
.. code-block:: python
def tearDown(self):
super(self, MyTestCase).tearDown()
self.flushall()
"""
self.redis_client.flushall()
| 30.557692
| 79
| 0.630585
|
10b613c89a5adbfdf73b8f171ba5791d8a1e6519
| 12,126
|
py
|
Python
|
visualdl/server/lib.py
|
ziyuli/VisualDL
|
a58477fdbaa47ac1c76793ebdfc0c700e87c3f09
|
[
"Apache-2.0"
] | null | null | null |
visualdl/server/lib.py
|
ziyuli/VisualDL
|
a58477fdbaa47ac1c76793ebdfc0c700e87c3f09
|
[
"Apache-2.0"
] | null | null | null |
visualdl/server/lib.py
|
ziyuli/VisualDL
|
a58477fdbaa47ac1c76793ebdfc0c700e87c3f09
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017 VisualDL Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =======================================================================
from __future__ import absolute_import
import re
import sys
import time
from tempfile import NamedTemporaryFile
import numpy as np
from PIL import Image
from .log import logger
import wave
try:
from urllib.parse import urlencode
except Exception:
from urllib import urlencode
def get_modes(storage):
return storage.modes()
def get_tags(storage, component):
result = {}
for mode in storage.modes():
with storage.mode(mode) as reader:
tags = reader.tags(component)
if tags:
result[mode] = {}
for tag in tags:
result[mode][tag] = {
'displayName': tag,
'description': "",
}
return result
def get_scalar_tags(storage):
return get_tags(storage, 'scalar')
def get_scalar(storage, mode, tag, num_records=300):
assert num_records > 1
with storage.mode(mode) as reader:
scalar = reader.scalar(tag)
records = scalar.records()
ids = scalar.ids()
timestamps = scalar.timestamps()
data = list(zip(timestamps, ids, records))
data_size = len(data)
if data_size <= num_records:
return data
span = float(data_size) / (num_records - 1)
span_offset = 0
data_idx = int(span_offset * span)
sampled_data = []
while data_idx < data_size:
sampled_data.append(data[data_size - data_idx - 1])
span_offset += 1
data_idx = int(span_offset * span)
sampled_data.append(data[0])
res = sampled_data[::-1]
# TODO(Superjomn) some bug here, sometimes there are zero here.
if res[-1] == 0.:
res = res[:-1]
return res
def get_image_tags(storage):
result = {}
for mode in storage.modes():
with storage.mode(mode) as reader:
tags = reader.tags('image')
if tags:
result[mode] = {}
for tag in tags:
image = reader.image(tag)
for i in range(max(1, image.num_samples())):
caption = tag if image.num_samples(
) <= 1 else '%s/%d' % (tag, i)
result[mode][caption] = {
'displayName': caption,
'description': "",
'samples': 1,
}
return result
def get_image_tag_steps(storage, mode, tag):
# remove suffix '/x'
res = re.search(r".*/([0-9]+$)", tag)
sample_index = 0
origin_tag = tag
if res:
tag = tag[:tag.rfind('/')]
sample_index = int(res.groups()[0])
with storage.mode(mode) as reader:
image = reader.image(tag)
res = []
for step_index in range(image.num_records()):
record = image.record(step_index, sample_index)
shape = record.shape()
# TODO(ChunweiYan) remove this trick, some shape will be empty
if not shape:
continue
try:
query = urlencode({
'sample': 0,
'index': step_index,
'tag': origin_tag,
'run': mode,
})
res.append({
'height': shape[0],
'width': shape[1],
'step': record.step_id(),
'wall_time': image.timestamp(step_index),
'query': query,
})
except Exception:
logger.error("image sample out of range")
return res
def get_invididual_image(storage, mode, tag, step_index, max_size=80):
with storage.mode(mode) as reader:
res = re.search(r".*/([0-9]+$)", tag)
# remove suffix '/x'
offset = 0
if res:
offset = int(res.groups()[0])
tag = tag[:tag.rfind('/')]
image = reader.image(tag)
record = image.record(step_index, offset)
shape = record.shape()
if shape[2] == 1:
shape = [shape[0], shape[1]]
data = np.array(record.data(), dtype='uint8').reshape(shape)
tempfile = NamedTemporaryFile(mode='w+b', suffix='.png')
with Image.fromarray(data) as im:
im.save(tempfile)
tempfile.seek(0, 0)
return tempfile
def get_audio_tags(storage):
result = {}
for mode in storage.modes():
with storage.mode(mode) as reader:
tags = reader.tags('audio')
if tags:
result[mode] = {}
for tag in tags:
audio = reader.audio(tag)
for i in range(max(1, audio.num_samples())):
caption = tag if audio.num_samples(
) <= 1 else '%s/%d' % (tag, i)
result[mode][caption] = {
'displayName': caption,
'description': "",
'samples': 1,
}
return result
def get_audio_tag_steps(storage, mode, tag):
# remove suffix '/x'
res = re.search(r".*/([0-9]+$)", tag)
sample_index = 0
origin_tag = tag
if res:
tag = tag[:tag.rfind('/')]
sample_index = int(res.groups()[0])
with storage.mode(mode) as reader:
audio = reader.audio(tag)
res = []
for step_index in range(audio.num_records()):
record = audio.record(step_index, sample_index)
query = urlencode({
'sample': 0,
'index': step_index,
'tag': origin_tag,
'run': mode,
})
res.append({
'step': record.step_id(),
'wall_time': audio.timestamp(step_index),
'query': query,
})
return res
def get_individual_audio(storage, mode, tag, step_index, max_size=80):
with storage.mode(mode) as reader:
res = re.search(r".*/([0-9]+$)", tag)
# remove suffix '/x'
offset = 0
if res:
offset = int(res.groups()[0])
tag = tag[:tag.rfind('/')]
audio = reader.audio(tag)
record = audio.record(step_index, offset)
shape = record.shape()
sample_rate = shape[0]
sample_width = shape[1]
num_channels = shape[2]
# sending a temp file to front end
tempfile = NamedTemporaryFile(mode='w+b', suffix='.wav')
# write audio file to that tempfile
wavfile = wave.open(tempfile, 'wb')
wavfile.setframerate(sample_rate)
wavfile.setnchannels(num_channels)
wavfile.setsampwidth(sample_width)
# convert to binary string to write to wav file
data = np.array(record.data(), dtype='uint8')
wavfile.writeframes(data.tostring())
# make sure the marker is at the start of file
tempfile.seek(0, 0)
return tempfile
def get_histogram_tags(storage):
return get_tags(storage, 'histogram')
def get_texts_tags(storage):
return get_tags(storage, 'text')
def get_texts(storage, mode, tag, num_records=100):
with storage.mode(mode) as reader:
texts = reader.text(tag)
records = texts.records()
ids = texts.ids()
timestamps = texts.timestamps()
data = list(zip(timestamps, ids, records))
data_size = len(data)
if data_size <= num_records:
return data
span = float(data_size) / (num_records - 1)
span_offset = 0
data_idx = int(span_offset * span)
sampled_data = []
while data_idx < data_size:
sampled_data.append(data[data_size - data_idx - 1])
span_offset += 1
data_idx = int(span_offset * span)
sampled_data.append(data[0])
res = sampled_data[::-1]
# TODO(Superjomn) some bug here, sometimes there are zero here.
if res[-1] == 0.:
res = res[:-1]
return res
def get_embeddings(storage, mode, reduction, dimension=2, num_records=5000):
with storage.mode(mode) as reader:
embedding = reader.embedding()
labels = embedding.get_all_labels()
high_dimensional_vectors = np.array(embedding.get_all_embeddings())
if reduction == 'tsne':
import visualdl.server.tsne as tsne
low_dim_embs = tsne.tsne(
high_dimensional_vectors,
dimension,
initial_dims=50,
perplexity=30.0)
elif reduction == 'pca':
low_dim_embs = simple_pca(high_dimensional_vectors, dimension)
low_dim_embs = np.real(low_dim_embs)
return {"embedding": low_dim_embs.tolist(), "labels": labels}
def get_histogram(storage, mode, tag):
with storage.mode(mode) as reader:
histogram = reader.histogram(tag)
res = []
for i in range(histogram.num_records()):
try:
# some bug with protobuf, some times may overflow
record = histogram.record(i)
except Exception:
continue
res.append([])
py_record = res[-1]
py_record.append(record.timestamp())
py_record.append(record.step())
py_record.append([])
data = py_record[-1]
for j in range(record.num_instances()):
instance = record.instance(j)
data.append(
[instance.left(), instance.right(), instance.frequency()])
# num_samples: We will only return 100 samples.
num_samples = 100
if len(res) < num_samples:
return res
# sample some steps
span = float(len(res)) / (num_samples - 1)
span_offset = 0
data_idx = 0
sampled_data = []
data_size = len(res)
while data_idx < data_size:
sampled_data.append(res[data_size - data_idx - 1])
span_offset += 1
data_idx = int(span_offset * span)
sampled_data.append(res[0])
return sampled_data[::-1]
def retry(ntimes, function, time2sleep, *args, **kwargs):
'''
try to execute `function` `ntimes`, if exception catched, the thread will
sleep `time2sleep` seconds.
'''
for i in range(ntimes):
try:
return function(*args, **kwargs)
except Exception:
error_info = '\n'.join(map(str, sys.exc_info()))
logger.error("Unexpected error: %s" % error_info)
time.sleep(time2sleep)
def cache_get(cache):
def _handler(key, func, *args, **kwargs):
data = cache.get(key)
if data is None:
logger.warning('update cache %s' % key)
data = func(*args, **kwargs)
cache.set(key, data)
return data
return data
return _handler
def simple_pca(x, dimension):
"""
A simple PCA implementation to do the dimension reduction.
"""
# Center the data.
x -= np.mean(x, axis=0)
# Computing the Covariance Matrix
cov = np.cov(x, rowvar=False)
# Get eigenvectors and eigenvalues from the covariance matrix
eigvals, eigvecs = np.linalg.eig(cov)
# Sort the eigvals from high to low
order = np.argsort(eigvals)[::-1]
# Drop the eigenvectors with low eigenvalues
eigvecs = eigvecs[:, order[:dimension]]
return np.dot(x, eigvecs)
| 28.871429
| 78
| 0.549975
|
1aebf9a308c1541d3d48e307dcb5a0559b36534f
| 687
|
py
|
Python
|
froide/campaign/migrations/0002_auto_20190309_1241.py
|
manonthemat/froide
|
698c49935eaf2e922f3c9f6a46af0fd545ccbbbb
|
[
"MIT"
] | null | null | null |
froide/campaign/migrations/0002_auto_20190309_1241.py
|
manonthemat/froide
|
698c49935eaf2e922f3c9f6a46af0fd545ccbbbb
|
[
"MIT"
] | null | null | null |
froide/campaign/migrations/0002_auto_20190309_1241.py
|
manonthemat/froide
|
698c49935eaf2e922f3c9f6a46af0fd545ccbbbb
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.7 on 2019-03-09 11:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('campaign', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='campaign',
name='description',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='campaign',
name='start_date',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='campaign',
name='url',
field=models.URLField(blank=True),
),
]
| 23.689655
| 50
| 0.556041
|
556d24cd1e65b4c1a2dcac5ca9bc2aed220bce59
| 1,275
|
py
|
Python
|
pipeline/defects_generator.py
|
junronglau/product-defects-extraction
|
98358100ee2efd5907dd8ae7c42d7f217410ce4f
|
[
"Apache-2.0"
] | null | null | null |
pipeline/defects_generator.py
|
junronglau/product-defects-extraction
|
98358100ee2efd5907dd8ae7c42d7f217410ce4f
|
[
"Apache-2.0"
] | 1
|
2021-04-14T06:49:51.000Z
|
2021-04-21T18:30:38.000Z
|
pipeline/defects_generator.py
|
junronglau/product-defects-extraction
|
98358100ee2efd5907dd8ae7c42d7f217410ce4f
|
[
"Apache-2.0"
] | null | null | null |
from pathlib import Path
import sys
path = str(Path(Path(__file__).parent.absolute()).parent.absolute())
sys.path.insert(0, path)
from dataloader.dc_data_loader import DataLoader
from preprocess.twostep_preprocessor import TwoStepPreprocessor
from models.svm_model import SvmModel
from trainers.svm_trainer import SvmTrainer
from utils.utils import get_args
from utils.config import process_config
def defects_classifier():
try:
args = get_args()
config = process_config(args.config)
except ValueError:
print("Missing or invalid arguments")
exit(0)
print('Creating the data loader...')
data_loader = DataLoader(config)
train_data, test_data = data_loader.get_data()
print('Creating the Preprocessor...')
preprocessor = TwoStepPreprocessor(train_data, test_data)
preprocessor.prepare_data(evaluate=True)
test_data = preprocessor.get_all_data()
print('Loading and evaluating the Model...')
model = SvmModel(config, load=True)
trainer = SvmTrainer(model, preprocessor)
predictions = trainer.generate_predictions(**test_data)
train_data.iloc[predictions > 0.5].to_csv(config.defects_classifier.paths.save_data_path, index=False)
if __name__ == '__main__':
defects_classifier()
| 30.357143
| 106
| 0.74902
|
978498db28e9dbf427c684970b329c4706261052
| 12,123
|
py
|
Python
|
services/group.py
|
wenruiq/amqp-msg-service
|
7bf89815b8650282af45508ef36e895a4781a63a
|
[
"MIT"
] | null | null | null |
services/group.py
|
wenruiq/amqp-msg-service
|
7bf89815b8650282af45508ef36e895a4781a63a
|
[
"MIT"
] | null | null | null |
services/group.py
|
wenruiq/amqp-msg-service
|
7bf89815b8650282af45508ef36e895a4781a63a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from flask import Flask, request
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
from datetime import datetime
import json
import pika
import requests
# This version of order.py uses a mysql DB via flask-sqlalchemy, instead of JSON files, as the data store.
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://admin:EatSomeDick@esdos.cml2qcg6djxv.ap-southeast-1.rds.amazonaws.com:3306/grp'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
CORS(app)
from models import Group, GroupMembers, User
# Get group for specified grpID
@app.route("/group/<string:grpID>", methods=['GET'])
def get_group(grpID):
group = db.session.query(Group).filter_by(grpID=grpID).first()
if group:
return group.json(), 200
else:
return {'message': 'group not found for id ' + str(grpID)}, 404
# Get list of group members for specified grpID
@app.route("/group/<string:grpID>/members", methods=['GET'])
def get_groupmembers(grpID):
groupMembers = db.session.query(GroupMembers).filter_by(grpID=grpID)
if groupMembers:
return {'members': [member.userID for member in groupMembers]}, 200
else:
return {'message': 'No members found'}, 404
# Get list of all groups: [ [grpID1, grpname1], [grpID2, grpname2], ... ]
@app.route("/group/get-all-groups", methods=['GET'])
def get_all_groups():
all_groups = db.session.query(Group).all()
groups_list = []
for group in all_groups:
groups_list.append([group.grpID, group.grpname])
if len(groups_list) > 0:
return {'groups_list': groups_list}, 200
else:
return {'groups_list': []}, 404
# Get list of groups user is in: [grpID1, grpID2, grpID3, ... ]
@app.route("/group/user-groups/<int:userID>", methods=['GET'])
def get_user_groups(userID):
groups = db.session.query(GroupMembers).filter_by(userID=userID)
groupID_list = [group.grpID for group in groups]
if len(groupID_list) > 0:
return {'groups:': groupID_list}, 200
else:
return {'message': 'User does not exist or user is not in any groups'}, 404
# Update grpname of group
@app.route("/group/change-grpname", methods=['PUT'])
def change_grpname():
#status in 2xx indicates success
status = 201
result = {}
grpID = request.json.get('grpID', None)
grpname = request.json.get('grpname', None)
if grpID != None and grpname != None:
group = db.session.query(Group).filter_by(grpID=grpID).first()
group.grpname = grpname
status = 201
else:
status = 400
result = {"status": status, "message": "Invalid grpID provided"}
if status == 201:
try:
db.session.add(group)
db.session.commit()
except Exception as e:
status = 500
result = {"status": status, "message": "An error occurred when updating the group's groupname in DB.", "error": str(e)}
if status == 201:
result = {"status": "success" , "message": "Groupname has been updated successfully"}
return str(result), status
# Add user to existing group
@app.route("/group/add-user", methods=['POST'])
def add_user():
status = 201
result = {}
userID = request.json.get('userID', None)
grpID = request.json.get('grpID', None)
if userID != None and grpID != None:
GroupMember = GroupMembers(grpID=grpID, userID=userID)
status = 201
else:
status = 400
result = {"status": status, "message": "Invalid grpID or userID"}
if status == 201:
try:
db.session.add(GroupMember)
addContact(userID, grpID)
db.session.commit()
except Exception as e:
status = 500
result = {"status": status, "message": "An error occured when adding member into group in DB", "error": str(e)}
if status == 201:
result = {"status": "success" , "message": "Group member added successfully"}
return str(result), status
# Delete user from existing group
@app.route("/group/delete-user", methods=['DELETE'])
def delete_user():
status = 201
result = {}
userID = request.json.get('userID', None)
grpID = request.json.get('grpID', None)
if userID != None and grpID != None:
GroupMember = db.session.query(GroupMembers).filter_by(userID=userID, grpID=grpID).first()
status = 201
else:
status = 400
result = {"status": status, "message": "Invalid grpID or userID"}
if status == 201:
try:
db.session.delete(GroupMember)
db.session.commit()
except Exception as e:
status = 500
result = {"status": status, "message": "An error occured when deleting member from group in DB", "error": str(e)}
if status == 201:
result = {"status": "success" , "message": "Group member deleted successfully"}
return str(result), status
@app.route("/group/create", methods=['POST'])
def create_group():
#status in 2xx indicates success
status = 201
result = {}
# groupID auto incremented
grpname = request.json.get('grpname', None)
user_id_list = request.json.get('user_id_list', None)
if grpname != None and user_id_list != None:
grpCount = len(Group.query.all()) + 1
grpID = "G" + str(grpCount)
groupObj = Group(grpID=grpID, grpname=grpname)
status = 201
else:
status = 400
result = {"status": status, "message": "Invalid 'groupname' or no users selected"}
if status == 201:
try:
db.session.add(groupObj)
db.session.commit()
except Exception as e:
status = 500
result = {"status": status, "message": "An error occurred when creating the group in DB.", "error": str(e)}
# if successfully created group in group db, go on to create groupMembers in groupMembers db
if status == 201:
try:
grpID = groupObj.grpID
for user in user_id_list:
GroupMember = GroupMembers(grpID=grpID, userID=user)
db.session.add(GroupMember)
addContact(user, grpID)
db.session.commit()
except Exception as e:
status = 500
result = {"status": status, "message": "An error occured when adding members into the group in DB, but the group was successfully created in DB", "error": str(e)}
if status == 201:
result = {"status": "success" , "message": "Group with group members has been created successfully"}
return str(result), status
def addContact(userID, grpID):
url = 'http://localhost:9004/contact/create/group/' + str(userID)
response = requests.post(url, json={'grpID': str(grpID)})
if __name__ == '__main__':
app.run(host="0.0.0.0", port=9002, debug=True)
# @app.route("/order", methods=['GET'])
# def get_all():
# return {'orders': [order.json() for order in Order.query.all()]}
# @app.route("/order/<string:order_id>", methods=['GET'])
# def find_by_order_id(order_id):
# order = Order.query.filter_by(order_id=order_id).first()
# if order:
# return order.json()
# return {'message': 'Order not found for id ' + str(order_id)}, 404
# @app.route("/order/", methods=['POST'])
# def create_order():
# # status in 2xx indicates success
# status = 201
# result = {}
# # retrieve information about order and order items from the request
# customer_id = request.json.get('customer_id', None)
# order = Order(customer_id = customer_id)
# cart_item = request.json.get('cart_item')
# for index, ci in enumerate(cart_item):
# if 'book_id' in cart_item[index] and 'quantity' in cart_item[index]:
# order.order_item.append(Order_Item(book_id = cart_item[index]['book_id'], quantity = cart_item[index]['quantity']))
# else:
# status = 400
# result = {"status": status, "message": "Invalid 'book_id' or 'quantity'."}
# break
# if status==201 and len(order.order_item)<1:
# status = 404
# result = {"status": status, "message": "Empty order."}
# if status==201:
# try:
# db.session.add(order)
# db.session.commit()
# except Exception as e:
# status = 500
# result = {"status": status, "message": "An error occurred when creating the order in DB.", "error": str(e)}
# if status==201:
# result = order.json()
# # FIXME: add a call to "send_order" copied from another appropriate file
# send_order(result)
# return str(result), status
# def send_order(order):
# """inform Shipping/Monitoring/Error as needed"""
# # default username / password to the borker are both 'guest'
# hostname = "localhost" # default broker hostname. Web management interface default at http://localhost:15672
# port = 5672 # default messaging port.
# # connect to the broker and set up a communication channel in the connection
# connection = pika.BlockingConnection(pika.ConnectionParameters(host=hostname, port=port))
# # Note: various network firewalls, filters, gateways (e.g., SMU VPN on wifi), may hinder the connections;
# # If "pika.exceptions.AMQPConnectionError" happens, may try again after disconnecting the wifi and/or disabling firewalls
# channel = connection.channel()
# # set up the exchange if the exchange doesn't exist
# exchangename="order_topic"
# channel.exchange_declare(exchange=exchangename, exchange_type='topic')
# # prepare the message body content
# message = json.dumps(order, default=str) # convert a JSON object to a string
# # send the message
# # always inform Monitoring for logging no matter if successful or not
# # FIXME: is this line of code needed according to the binding key used in Monitoring?
# # channel.basic_publish(exchange=exchangename, routing_key="shipping.info", body=message)
# # By default, the message is "transient" within the broker;
# # i.e., if the monitoring is offline or the broker cannot match the routing key for the message, the message is lost.
# # If need durability of a message, need to declare the queue in the sender (see sample code below).
# if "status" in order: # if some error happened in order creation
# # inform Error handler
# channel.queue_declare(queue='errorhandler', durable=True) # make sure the queue used by the error handler exist and durable
# channel.queue_bind(exchange=exchangename, queue='errorhandler', routing_key='*.error') # make sure the queue is bound to the exchange
# channel.basic_publish(exchange=exchangename, routing_key="shipping.error", body=message,
# properties=pika.BasicProperties(delivery_mode = 2) # make message persistent within the matching queues until it is received by some receiver (the matching queues have to exist and be durable and bound to the exchange)
# )
# print("Order status ({:d}) sent to error handler.".format(order["status"]))
# else: # inform Shipping and exit
# # prepare the channel and send a message to Shipping
# channel.queue_declare(queue='shipping', durable=True) # make sure the queue used by Shipping exist and durable
# channel.queue_bind(exchange=exchangename, queue='shipping', routing_key='*.order') # make sure the queue is bound to the exchange
# channel.basic_publish(exchange=exchangename, routing_key="shipping.order", body=message,
# properties=pika.BasicProperties(delivery_mode = 2, # make message persistent within the matching queues until it is received by some receiver (the matching queues have to exist and be durable and bound to the exchange, which are ensured by the previous two api calls)
# )
# )
# print("Order sent to shipping.")
# # close the connection to the broker
# connection.close()
| 40.818182
| 281
| 0.644725
|
472a4f173066e48f170c11023572ebe7028b27c2
| 1,292
|
py
|
Python
|
setup.py
|
marchcui/pythUDS
|
3012c716299730c23f58d7e545d5bb22f301d1c7
|
[
"MIT"
] | null | null | null |
setup.py
|
marchcui/pythUDS
|
3012c716299730c23f58d7e545d5bb22f301d1c7
|
[
"MIT"
] | null | null | null |
setup.py
|
marchcui/pythUDS
|
3012c716299730c23f58d7e545d5bb22f301d1c7
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'udsoncan',
packages = find_packages(exclude=['test']),
package_data={
'': ['*.conf'],
},
version = '1.12.2',
description = 'Implementation of the Unified Diagnostic Service (UDS) protocol (ISO-14229) used in the automotive industry.',
long_description=long_description,
author = 'Pier-Yves Lessard',
author_email = 'py.lessard@gmail.com',
license='MIT',
url = 'https://github.com/pylessard/python-udsoncan',
download_url = 'https://github.com/pylessard/python-udsoncan/archive/v1.12.2.tar.gz',
keywords = ['uds', '14229', 'iso-14229', 'diagnostic', 'automotive'],
python_requires='>=3.0',
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"Operating System :: POSIX :: Linux",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator",
],
)
| 35.888889
| 128
| 0.642415
|
eff04eca6dce680e8b23f7c8daf98965893d93d4
| 7,766
|
py
|
Python
|
prelim_data_gen13_v2.py
|
dhbesson/ION2017_NN_Jammers
|
8768987a7ec63ccfb9d24747cc36c554c3da749a
|
[
"MIT"
] | 5
|
2018-10-05T10:01:04.000Z
|
2022-01-02T22:11:51.000Z
|
prelim_data_gen13_v2.py
|
LINMEIYAN/ION2017_NN_Jammers
|
8768987a7ec63ccfb9d24747cc36c554c3da749a
|
[
"MIT"
] | null | null | null |
prelim_data_gen13_v2.py
|
LINMEIYAN/ION2017_NN_Jammers
|
8768987a7ec63ccfb9d24747cc36c554c3da749a
|
[
"MIT"
] | 9
|
2018-01-15T04:34:36.000Z
|
2022-02-20T03:53:19.000Z
|
import numpy as np
from scipy.spatial import distance
from operator import add
import matplotlib.pyplot as plt
import itertools
import sys
import math
import random
import pandas
# todo combine data gen and neural network into one script
size_training_set = 1000
nx_sensor = 5.
ny_sensor = 5.
nz_sensor = 1.
nx_quadrants = 10.
ny_quadrants = 10.
number_training_jammers = int(nx_quadrants*ny_quadrants)
x_min_quadrant = 0.
x_max_quadrant = 10000.
y_min_quadrant = 0.
y_max_quadrant = 10000.
x_length_quadrant = (x_max_quadrant - x_min_quadrant)/nx_quadrants
y_length_quadrant = (y_max_quadrant - y_min_quadrant)/ny_quadrants
quad_labels = range(0,int(nx_quadrants*ny_quadrants))
quad_labels = np.array(quad_labels)
quad_labels = np.reshape(quad_labels, (nx_quadrants,ny_quadrants))
# np.random.seed(42)
nx = nx_quadrants
ny = ny_quadrants
nz = 1
nP = 1
num_quads = int(nx_quadrants*ny_quadrants)
max_combo = 5
min_combo = 1
max_jammers = int(nx*ny)
# quad_combos = []
# for L in range(min_combo, max_combo+1):
# for subset in itertools.combinations(range(0,num_quads),L):
# quad_combos.append(list(subset))
# print("Quad Combo Level Complete: %d" % L)
# print(len(quad_combos))
def nCr(n,r):
f = math.factorial
return f(n) / f(r) / f(n-r)
combo_1 = max_jammers
combo_2 = nCr(max_jammers,2)
x_j = np.linspace((1/(2*nx))*x_max_quadrant, ((2*nx-1)/(2*nx))*x_max_quadrant, nx)
y_j = np.linspace((1/(2*ny))*y_max_quadrant, ((2*ny-1)/(2*ny))*y_max_quadrant, ny)
z_j = np.linspace(0, 0, nz)
P_j = np.linspace(100, 100, nP)
xx_j, yy_j, zz_j = np.meshgrid(x_j, y_j, z_j)
np.random.seed(69)
temp_xx_j = xx_j.reshape((np.prod(xx_j.shape),))
print(np.size(np.random.rand(np.size(temp_xx_j))))
temp_xx_j = map(add,temp_xx_j, -(1/(2*nx))*x_max_quadrant + (1/(nx))*x_max_quadrant*np.random.rand(np.size(temp_xx_j)))
print(temp_xx_j[0])
temp_yy_j = yy_j.reshape((np.prod(yy_j.shape),))
temp_yy_j = map(add,temp_yy_j, -(1/(2*ny))*y_max_quadrant + (1/(ny))*y_max_quadrant*np.random.rand(np.size(temp_yy_j)))
temp_zz_j = zz_j.reshape((np.prod(zz_j.shape),))
temp_jam_coords = (zip(temp_xx_j, temp_yy_j, temp_zz_j))
jammer_combos = []
new_jammer_choices = np.random.choice(max_jammers, number_training_jammers, replace=False)
new_temp_jam_coords = temp_jam_coords
# chosen_jammers = [1,5,20,35,62,80,89,95]
# chosen_jammers = [22,210,350,38]
# np.random.seed(50)
# chosen_jammers = np.random.choice(number_training_jammers, 3, replace=False)
# print(chosen_jammers)
# jammer_combos = [[new_temp_jam_coords[x_chosen] for x_chosen in chosen_jammers]]
for L in range(min_combo, max_combo+1):
if L <= 2:
for subset in itertools.combinations(new_temp_jam_coords,L):
jammer_combos.append(subset)
print("Jammer Combo Level Complete: %d" % L)
else:
np.random.seed(L+1)
random_subset_choices = [random.sample(new_temp_jam_coords, L) for i in range(0,int(size_training_set))]
[jammer_combos.append(subset) for subset in random_subset_choices]
print("Jammer Combo Level Complete: %d" % L)
print(jammer_combos)
# for L in range(min_combo, max_combo+1):
# for subset in itertools.combinations(new_temp_jam_coords,L):
# jammer_combos.append(subset)
# print("Jammer Combo Level Complete: %d" % L)
max_jammer_combo = len(jammer_combos)
FREQ = 1575.42
GT = 1.
GR = 1.
PT = 100.
C = 299792458.
WAVELENGTH = C/(FREQ*1000000.)
x_sensor = np.linspace(0, x_max_quadrant, nx_sensor)
y_sensor = np.linspace(0, y_max_quadrant, ny_sensor)
z_sensor = np.linspace(200, 200, nz_sensor)
xx, yy, zz = np.meshgrid(x_sensor, y_sensor, z_sensor)
xx = xx.reshape((np.prod(xx.shape),))
yy = yy.reshape((np.prod(yy.shape),))
zz = zz.reshape((np.prod(zz.shape),))
sensor_coords = zip(xx, yy, zz)
def determine_quadrant(x_pos,y_pos):
x_coord_quadrant = int(x_pos/x_length_quadrant)
y_coord_quadrant = int(y_pos/y_length_quadrant)
if x_coord_quadrant == nx_quadrants:
x_coord_quadrant = x_coord_quadrant - 1
if y_coord_quadrant == ny_quadrants:
y_coord_quadrant = y_coord_quadrant - 1
temp_quadrant = quad_labels[x_coord_quadrant,y_coord_quadrant]
return temp_quadrant
PR = dict([])
jam_test = {'data':[], 'target':[], 'jam_coords':temp_jam_coords,'jam_combo_coords':[], 'sensor_coords':sensor_coords}
macro_params = {'size_training_set':size_training_set, 'nx_sensor':nx_sensor, 'ny_sensor':ny_sensor,
'nz_sensor':nz_sensor, 'number_training_jammers':number_training_jammers, 'nx_quadrants':nx_quadrants,
'ny_quadrants':ny_quadrants,'x_min_quadrant':x_min_quadrant,'x_max_quadrant':x_max_quadrant,
'y_min_quadrant':y_min_quadrant,'y_max_quadrant':y_max_quadrant,'x_length_quadrant':x_length_quadrant,
'y_length_quadrant':y_length_quadrant,'quad_labels':quad_labels}
num_combo = len(jammer_combos)
temp_PR_matrix = {}
temp_quad_matrix = {}
test_temp_PR_matrix = {}
test_temp_quad_matrix = {}
for i, jammer in enumerate(new_temp_jam_coords):
temp_PR_matrix[i] = []
for j, sensor in enumerate(sensor_coords):
x = jammer[0]
y = jammer[1]
z = z_j
temp_R = distance.euclidean((x, y, z_j), sensor)
temp_PR_matrix[i].append(PT*GT*GR*WAVELENGTH**2/((4*np.pi * temp_R)**2))
temp_quad_matrix[i] = determine_quadrant(x,y)
# np.random.seed(3)
# new_jammer_combos_choices = np.random.choice(xrange(combo_1+combo_2,num_combo), size_training_set, replace=False)
# new_jammer_combos_choices = np.append(new_jammer_combos_choices,xrange(0,combo_1+combo_2))
# jammer_combos = [jammer_combos[i] for i in new_jammer_combos_choices]
# total = int(size_training_set+combo_1+combo_2)
# point = total / 100
# increment = total / 20
for i, combo in enumerate(jammer_combos):
temp_PR_list = [0]*len(sensor_coords)
temp_quad_list = np.zeros(num_quads)
print combo
for jammer in combo:
i_j = new_temp_jam_coords.index(jammer)
x = jammer[0]
y = jammer[1]
z = z_j
temp_PR = temp_PR_matrix[i_j]
temp_PR_list = [a + b for a,b in zip(temp_PR_list,temp_PR)]
temp_quadrant = temp_quad_matrix[i_j]
temp_quad_list[temp_quadrant] = 1
jam_test['data'].append(temp_PR_list)
jam_test['target'].append(temp_quad_list)
jam_test['jam_combo_coords'].append(combo)
# if (i % (5 * point) == 0):
# sys.stdout.write("\r[" + "=" * (i / increment) + " " * ((total - i) / increment) + "]" + str(i / point) + "%")
# sys.stdout.flush()
jam_test['data'] = np.array(jam_test['data'])
jam_test['target'] = np.array(jam_test['target'])
jam_test['jam_coords'] = np.array(jam_test['jam_coords'])
jam_test['jam_combo_coords'] = np.array(jam_test['jam_combo_coords'])
jam_test['sensor_coords'] = np.array(jam_test['sensor_coords'])
np.save('test_data_chosen_messy_1k.npy', jam_test)
np.save('macro_params_chosen_messy_1k.npy', macro_params)
fig, ax = plt.subplots()
# img = plt.imread('lax.png')
# ax.imshow(img, origin='lower', extent=[0,7000,0,4750])
# print(jam_test['target'])
[ax.scatter(i_jam[0],i_jam[1], s=50, color='r') for i_jam in new_temp_jam_coords]
for x_line in np.arange(x_min_quadrant,x_max_quadrant,x_length_quadrant):
for y_line in np.arange(y_min_quadrant,y_max_quadrant,y_length_quadrant):
ax.plot([x_line, x_line], [y_line, y_line+y_length_quadrant], 'k-', lw=2)
ax.plot([x_line, x_line+x_length_quadrant], [y_line, y_line], 'k-', lw=2)
ax.plot([x_max_quadrant, x_max_quadrant], [y_min_quadrant, y_max_quadrant], 'k-', lw=2)
ax.plot([x_min_quadrant, x_max_quadrant], [y_max_quadrant, y_max_quadrant], 'k-', lw=2)
ax.scatter(jam_test['sensor_coords'][:,0],jam_test['sensor_coords'][:,1], color='b', s=100)
plt.show()
| 34.981982
| 120
| 0.711692
|
ecd9b0497104e49e71c9eb79e97c844635021c03
| 7,807
|
py
|
Python
|
okama/macro.py
|
nenkoru/okama
|
1e202bc801aea8adaf4c2ad033cd51af0c957df5
|
[
"MIT"
] | null | null | null |
okama/macro.py
|
nenkoru/okama
|
1e202bc801aea8adaf4c2ad033cd51af0c957df5
|
[
"MIT"
] | null | null | null |
okama/macro.py
|
nenkoru/okama
|
1e202bc801aea8adaf4c2ad033cd51af0c957df5
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from typing import Union, Tuple
import numpy as np
import pandas as pd
from .api.data_queries import QueryData
from .api.namespaces import get_macro_namespaces
from .common.helpers.helpers import Float, Frame, Date
from .settings import default_macro, PeriodLength, _MONTHS_PER_YEAR
class MacroABC(ABC):
def __init__(
self,
symbol: str = default_macro,
first_date: Union[str, pd.Timestamp] = "1800-01",
last_date: Union[str, pd.Timestamp] = "2030-01",
):
self.symbol: str = symbol
self._check_namespace()
self._get_symbol_data(symbol)
self.values_ts: pd.Series = QueryData.get_macro_ts(
symbol, first_date, last_date
)
self.first_date: pd.Timestamp = self.values_ts.index[0].to_timestamp()
self.last_date: pd.Timestamp = self.values_ts.index[-1].to_timestamp()
self.pl = PeriodLength(
self.values_ts.shape[0] // _MONTHS_PER_YEAR,
self.values_ts.shape[0] % _MONTHS_PER_YEAR,
)
self._pl_txt = f"{self.pl.years} years, {self.pl.months} months"
def __repr__(self):
dic = {
"symbol": self.symbol,
"name": self.name,
"country": self.country,
"currency": self.currency,
"type": self.type,
"first date": self.first_date.strftime("%Y-%m"),
"last date": self.last_date.strftime("%Y-%m"),
"period length": self._pl_txt,
}
return repr(pd.Series(dic))
def _check_namespace(self):
namespace = self.symbol.split(".", 1)[-1]
allowed_namespaces = get_macro_namespaces()
if namespace not in allowed_namespaces:
raise ValueError(
f"{namespace} is not in allowed namespaces: {allowed_namespaces}"
)
def _get_symbol_data(self, symbol):
x = QueryData.get_symbol_info(symbol)
self.ticker: str = x["code"]
self.name: str = x["name"]
self.country: str = x["country"]
self.currency: str = x["currency"]
self.type: str = x["type"]
@abstractmethod
def describe(self):
pass
class Inflation(MacroABC):
"""
Inflation related data and methods.
"""
@property
def cumulative_inflation(self) -> pd.Series:
"""
Return cumulative inflation rate time series for a period from first_date to last_date.
"""
if self.symbol.split(".", 1)[-1] != "INFL":
raise ValueError("cumulative_inflation is defined for inflation only")
return (self.values_ts + 1.0).cumprod() - 1.0
@property
def annual_inflation_ts(self):
return Frame.get_annual_return_ts_from_monthly(self.values_ts)
@property
def purchasing_power_1000(self) -> Float:
"""
Return purchasing power of 1000 (in a currency of inflation) after period from first_date to last_date.
"""
return Float.get_purchasing_power(self.cumulative_inflation[-1])
@property
def rolling_inflation(self) -> pd.Series:
"""
Return 12 months rolling inflation time series.
"""
if self.symbol.split(".", 1)[-1] != "INFL":
raise ValueError("cumulative_inflation is defined for inflation only")
x = (self.values_ts + 1.0).rolling(_MONTHS_PER_YEAR).apply(
np.prod, raw=True
) - 1.0
x.dropna(inplace=True)
return x
def describe(self, years: Tuple[int, ...] = (1, 5, 10)) -> pd.DataFrame:
"""
Generate descriptive inflation statistics for a given list of tickers.
Statistics includes:
- YTD compound inflation
- Annual inflation (geometric mean) for a given list of periods
- max 12 months inflation for the periods
- Annual inflation (geometric mean) for the whole history
"""
description = pd.DataFrame()
dt0 = self.last_date
df = self.values_ts
# YTD inflation properties
year = pd.Timestamp.today().year
ts = df[str(year):]
inflation = Frame.get_cumulative_return(ts)
row1 = {self.name: inflation}
row1.update(period="YTD", property="compound inflation")
row2 = {self.name: Float.get_purchasing_power(inflation)}
row2.update(period="YTD", property="1000 purchasing power")
description = description.append([row1, row2], ignore_index=True)
# inflation properties for a given list of periods
for i in years:
dt = Date.subtract_years(dt0, i)
if dt >= self.first_date:
ts = df[dt:]
# mean inflation
inflation = Frame.get_cagr(ts)
row1 = {self.name: inflation}
# compound inflation
comp_inflation = Frame.get_cumulative_return(ts)
row2 = {self.name: comp_inflation}
# max inflation
max_inflation = self.rolling_inflation[dt:].nlargest(
n=1
) # largest 12m inflation for selected period
row3 = {self.name: max_inflation.iloc[0]}
row3.update(period=max_inflation.index.values[0].strftime("%Y-%m"))
# purchase power
row4 = {self.name: Float.get_purchasing_power(comp_inflation)}
else:
row1 = {self.name: None}
row2 = {self.name: None}
row3 = {self.name: None}
row3.update(period=f"{i} years")
row4 = {self.name: None}
row1.update(period=f"{i} years", property="annual inflation")
row2.update(period=f"{i} years", property="compound inflation")
row3.update(property="max 12m inflation")
row4.update(period=f"{i} years", property="1000 purchasing power")
description = description.append(row1, ignore_index=True)
description = description.append(row2, ignore_index=True)
description = description.append(row3, ignore_index=True)
description = description.append(row4, ignore_index=True)
# Annual inflation for full period available
ts = df
full_inflation = Frame.get_cagr(ts)
row = {self.name: full_inflation}
row.update(period=self._pl_txt, property="annual inflation")
description = description.append(row, ignore_index=True)
# compound inflation
comp_inflation = Frame.get_cumulative_return(ts)
row = {self.name: comp_inflation}
row.update(period=self._pl_txt, property="compound inflation")
description = description.append(row, ignore_index=True)
# max inflation for full period available
max_inflation = self.rolling_inflation.nlargest(n=1)
row = {self.name: max_inflation.iloc[0]}
row.update(
period=max_inflation.index.values[0].strftime("%Y-%m"),
property="max 12m inflation",
)
description = description.append(row, ignore_index=True)
# purchase power
row = {self.name: Float.get_purchasing_power(comp_inflation)}
row.update(period=self._pl_txt, property="1000 purchasing power")
description = description.append(row, ignore_index=True)
return Frame.change_columns_order(
description, ["property", "period"], position="first"
)
class Rate(MacroABC):
"""
Rates of central banks and banks.
"""
@property
def okid(self) -> pd.Series:
return Frame.get_okid_index(self.values_ts, self.symbol)
def describe(self, years: Tuple[int, ...] = (1, 5, 10)):
# TODO: Make describe() for OKID indexes
pass
| 37.17619
| 111
| 0.606379
|
39d0a9ea7eb064ed77fad4eaae9b156c87c6f16c
| 805
|
py
|
Python
|
product/urls.py
|
leandrocunha526/client-manager
|
25e2a7d4b083545176757d0ab9c8355167fe04ed
|
[
"BSD-3-Clause"
] | 1
|
2022-01-06T09:51:09.000Z
|
2022-01-06T09:51:09.000Z
|
product/urls.py
|
leandrocunha526/client-manager
|
25e2a7d4b083545176757d0ab9c8355167fe04ed
|
[
"BSD-3-Clause"
] | 4
|
2022-01-07T11:06:00.000Z
|
2022-02-21T20:26:05.000Z
|
product/urls.py
|
leandrocunha526/client-manager
|
25e2a7d4b083545176757d0ab9c8355167fe04ed
|
[
"BSD-3-Clause"
] | 2
|
2022-01-07T11:01:50.000Z
|
2022-01-08T05:44:46.000Z
|
from django.urls import path
from .views import product_list, product_delete, product_new, product_update, category_new, category_list, \
category_update, category_delete, product_detail
urlpatterns = [
path('list/', product_list, name="product_list"),
path('delete/<int:id>', product_delete, name="product_delete"),
path('new/', product_new, name="product_new"),
path('update/<int:id>', product_update, name="product_update"),
path('detail/<int:id>', product_detail, name="product_detail"),
path('category/new/', category_new, name="category_new"),
path('category/list/', category_list, name="category_list"),
path('category/update/<int:id>', category_update, name="category_update"),
path('category/delete/<int:id>', category_delete, name="category_delete"),
]
| 47.352941
| 108
| 0.724224
|
bf30c78421fa5d37d60314d8ccd06608147e5513
| 97,519
|
py
|
Python
|
lib/python2.7/site-packages/paramiko/transport.py
|
drpaneas/linuxed.gr
|
95676e9f18a234092656c61b73b9e6633f2e39ec
|
[
"MIT"
] | null | null | null |
lib/python2.7/site-packages/paramiko/transport.py
|
drpaneas/linuxed.gr
|
95676e9f18a234092656c61b73b9e6633f2e39ec
|
[
"MIT"
] | null | null | null |
lib/python2.7/site-packages/paramiko/transport.py
|
drpaneas/linuxed.gr
|
95676e9f18a234092656c61b73b9e6633f2e39ec
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Core protocol implementation
"""
import os
import socket
import sys
import threading
import time
import weakref
from hashlib import md5, sha1
import paramiko
from paramiko import util
from paramiko.auth_handler import AuthHandler
from paramiko.ssh_gss import GSSAuth
from paramiko.channel import Channel
from paramiko.common import xffffffff, cMSG_CHANNEL_OPEN, cMSG_IGNORE, \
cMSG_GLOBAL_REQUEST, DEBUG, MSG_KEXINIT, MSG_IGNORE, MSG_DISCONNECT, \
MSG_DEBUG, ERROR, WARNING, cMSG_UNIMPLEMENTED, INFO, cMSG_KEXINIT, \
cMSG_NEWKEYS, MSG_NEWKEYS, cMSG_REQUEST_SUCCESS, cMSG_REQUEST_FAILURE, \
CONNECTION_FAILED_CODE, OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED, \
OPEN_SUCCEEDED, cMSG_CHANNEL_OPEN_FAILURE, cMSG_CHANNEL_OPEN_SUCCESS, \
MSG_GLOBAL_REQUEST, MSG_REQUEST_SUCCESS, MSG_REQUEST_FAILURE, \
MSG_CHANNEL_OPEN_SUCCESS, MSG_CHANNEL_OPEN_FAILURE, MSG_CHANNEL_OPEN, \
MSG_CHANNEL_SUCCESS, MSG_CHANNEL_FAILURE, MSG_CHANNEL_DATA, \
MSG_CHANNEL_EXTENDED_DATA, MSG_CHANNEL_WINDOW_ADJUST, MSG_CHANNEL_REQUEST, \
MSG_CHANNEL_EOF, MSG_CHANNEL_CLOSE, MIN_PACKET_SIZE, MAX_WINDOW_SIZE, \
DEFAULT_WINDOW_SIZE, DEFAULT_MAX_PACKET_SIZE
from paramiko.compress import ZlibCompressor, ZlibDecompressor
from paramiko.dsskey import DSSKey
from paramiko.kex_gex import KexGex
from paramiko.kex_group1 import KexGroup1
from paramiko.kex_group14 import KexGroup14
from paramiko.kex_gss import KexGSSGex, KexGSSGroup1, KexGSSGroup14, NullHostKey
from paramiko.message import Message
from paramiko.packet import Packetizer, NeedRekeyException
from paramiko.primes import ModulusPack
from paramiko.py3compat import string_types, long, byte_ord, b
from paramiko.rsakey import RSAKey
from paramiko.ecdsakey import ECDSAKey
from paramiko.server import ServerInterface
from paramiko.sftp_client import SFTPClient
from paramiko.ssh_exception import (SSHException, BadAuthenticationType,
ChannelException, ProxyCommandFailure)
from paramiko.util import retry_on_signal, ClosingContextManager, clamp_value
from Crypto.Cipher import Blowfish, AES, DES3, ARC4
try:
from Crypto.Util import Counter
except ImportError:
from paramiko.util import Counter
# for thread cleanup
_active_threads = []
def _join_lingering_threads():
for thr in _active_threads:
thr.stop_thread()
import atexit
atexit.register(_join_lingering_threads)
class Transport (threading.Thread, ClosingContextManager):
"""
An SSH Transport attaches to a stream (usually a socket), negotiates an
encrypted session, authenticates, and then creates stream tunnels, called
`channels <.Channel>`, across the session. Multiple channels can be
multiplexed across a single session (and often are, in the case of port
forwardings).
Instances of this class may be used as context managers.
"""
_PROTO_ID = '2.0'
_CLIENT_ID = 'paramiko_%s' % paramiko.__version__
_preferred_ciphers = ('aes128-ctr', 'aes256-ctr', 'aes128-cbc', 'blowfish-cbc',
'aes256-cbc', '3des-cbc', 'arcfour128', 'arcfour256')
_preferred_macs = ('hmac-sha1', 'hmac-md5', 'hmac-sha1-96', 'hmac-md5-96')
_preferred_keys = ('ssh-rsa', 'ssh-dss', 'ecdsa-sha2-nistp256')
_preferred_kex = ( 'diffie-hellman-group14-sha1', 'diffie-hellman-group-exchange-sha1' , 'diffie-hellman-group1-sha1')
_preferred_compression = ('none',)
_cipher_info = {
'aes128-ctr': {'class': AES, 'mode': AES.MODE_CTR, 'block-size': 16, 'key-size': 16},
'aes256-ctr': {'class': AES, 'mode': AES.MODE_CTR, 'block-size': 16, 'key-size': 32},
'blowfish-cbc': {'class': Blowfish, 'mode': Blowfish.MODE_CBC, 'block-size': 8, 'key-size': 16},
'aes128-cbc': {'class': AES, 'mode': AES.MODE_CBC, 'block-size': 16, 'key-size': 16},
'aes256-cbc': {'class': AES, 'mode': AES.MODE_CBC, 'block-size': 16, 'key-size': 32},
'3des-cbc': {'class': DES3, 'mode': DES3.MODE_CBC, 'block-size': 8, 'key-size': 24},
'arcfour128': {'class': ARC4, 'mode': None, 'block-size': 8, 'key-size': 16},
'arcfour256': {'class': ARC4, 'mode': None, 'block-size': 8, 'key-size': 32},
}
_mac_info = {
'hmac-sha1': {'class': sha1, 'size': 20},
'hmac-sha1-96': {'class': sha1, 'size': 12},
'hmac-md5': {'class': md5, 'size': 16},
'hmac-md5-96': {'class': md5, 'size': 12},
}
_key_info = {
'ssh-rsa': RSAKey,
'ssh-dss': DSSKey,
'ecdsa-sha2-nistp256': ECDSAKey,
}
_kex_info = {
'diffie-hellman-group1-sha1': KexGroup1,
'diffie-hellman-group14-sha1': KexGroup14,
'diffie-hellman-group-exchange-sha1': KexGex,
'gss-group1-sha1-toWM5Slw5Ew8Mqkay+al2g==': KexGSSGroup1,
'gss-group14-sha1-toWM5Slw5Ew8Mqkay+al2g==': KexGSSGroup14,
'gss-gex-sha1-toWM5Slw5Ew8Mqkay+al2g==': KexGSSGex
}
_compression_info = {
# zlib@openssh.com is just zlib, but only turned on after a successful
# authentication. openssh servers may only offer this type because
# they've had troubles with security holes in zlib in the past.
'zlib@openssh.com': (ZlibCompressor, ZlibDecompressor),
'zlib': (ZlibCompressor, ZlibDecompressor),
'none': (None, None),
}
_modulus_pack = None
def __init__(self,
sock,
default_window_size=DEFAULT_WINDOW_SIZE,
default_max_packet_size=DEFAULT_MAX_PACKET_SIZE,
gss_kex=False,
gss_deleg_creds=True):
"""
Create a new SSH session over an existing socket, or socket-like
object. This only creates the `.Transport` object; it doesn't begin the
SSH session yet. Use `connect` or `start_client` to begin a client
session, or `start_server` to begin a server session.
If the object is not actually a socket, it must have the following
methods:
- ``send(str)``: Writes from 1 to ``len(str)`` bytes, and returns an
int representing the number of bytes written. Returns
0 or raises ``EOFError`` if the stream has been closed.
- ``recv(int)``: Reads from 1 to ``int`` bytes and returns them as a
string. Returns 0 or raises ``EOFError`` if the stream has been
closed.
- ``close()``: Closes the socket.
- ``settimeout(n)``: Sets a (float) timeout on I/O operations.
For ease of use, you may also pass in an address (as a tuple) or a host
string as the ``sock`` argument. (A host string is a hostname with an
optional port (separated by ``":"``) which will be converted into a
tuple of ``(hostname, port)``.) A socket will be connected to this
address and used for communication. Exceptions from the ``socket``
call may be thrown in this case.
.. note::
Modifying the the window and packet sizes might have adverse
effects on your channels created from this transport. The default
values are the same as in the OpenSSH code base and have been
battle tested.
:param socket sock:
a socket or socket-like object to create the session over.
:param int default_window_size:
sets the default window size on the transport. (defaults to
2097152)
:param int default_max_packet_size:
sets the default max packet size on the transport. (defaults to
32768)
.. versionchanged:: 1.15
Added the ``default_window_size`` and ``default_max_packet_size``
arguments.
"""
self.active = False
if isinstance(sock, string_types):
# convert "host:port" into (host, port)
hl = sock.split(':', 1)
if len(hl) == 1:
sock = (hl[0], 22)
else:
sock = (hl[0], int(hl[1]))
if type(sock) is tuple:
# connect to the given (host, port)
hostname, port = sock
reason = 'No suitable address family'
for (family, socktype, proto, canonname, sockaddr) in socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM):
if socktype == socket.SOCK_STREAM:
af = family
addr = sockaddr
sock = socket.socket(af, socket.SOCK_STREAM)
try:
retry_on_signal(lambda: sock.connect((hostname, port)))
except socket.error as e:
reason = str(e)
else:
break
else:
raise SSHException(
'Unable to connect to %s: %s' % (hostname, reason))
# okay, normal socket-ish flow here...
threading.Thread.__init__(self)
self.setDaemon(True)
self.sock = sock
# Python < 2.3 doesn't have the settimeout method - RogerB
try:
# we set the timeout so we can check self.active periodically to
# see if we should bail. socket.timeout exception is never
# propagated.
self.sock.settimeout(0.1)
except AttributeError:
pass
# negotiated crypto parameters
self.packetizer = Packetizer(sock)
self.local_version = 'SSH-' + self._PROTO_ID + '-' + self._CLIENT_ID
self.remote_version = ''
self.local_cipher = self.remote_cipher = ''
self.local_kex_init = self.remote_kex_init = None
self.local_mac = self.remote_mac = None
self.local_compression = self.remote_compression = None
self.session_id = None
self.host_key_type = None
self.host_key = None
# GSS-API / SSPI Key Exchange
self.use_gss_kex = gss_kex
# This will be set to True if GSS-API Key Exchange was performed
self.gss_kex_used = False
self.kexgss_ctxt = None
self.gss_host = None
if self.use_gss_kex:
self.kexgss_ctxt = GSSAuth("gssapi-keyex", gss_deleg_creds)
self._preferred_kex = ('gss-gex-sha1-toWM5Slw5Ew8Mqkay+al2g==',
'gss-group14-sha1-toWM5Slw5Ew8Mqkay+al2g==',
'gss-group1-sha1-toWM5Slw5Ew8Mqkay+al2g==',
'diffie-hellman-group-exchange-sha1',
'diffie-hellman-group14-sha1',
'diffie-hellman-group1-sha1')
# state used during negotiation
self.kex_engine = None
self.H = None
self.K = None
self.initial_kex_done = False
self.in_kex = False
self.authenticated = False
self._expected_packet = tuple()
self.lock = threading.Lock() # synchronization (always higher level than write_lock)
# tracking open channels
self._channels = ChannelMap()
self.channel_events = {} # (id -> Event)
self.channels_seen = {} # (id -> True)
self._channel_counter = 1
self.default_max_packet_size = default_max_packet_size
self.default_window_size = default_window_size
self._forward_agent_handler = None
self._x11_handler = None
self._tcp_handler = None
self.saved_exception = None
self.clear_to_send = threading.Event()
self.clear_to_send_lock = threading.Lock()
self.clear_to_send_timeout = 30.0
self.log_name = 'paramiko.transport'
self.logger = util.get_logger(self.log_name)
self.packetizer.set_log(self.logger)
self.auth_handler = None
self.global_response = None # response Message from an arbitrary global request
self.completion_event = None # user-defined event callbacks
self.banner_timeout = 15 # how long (seconds) to wait for the SSH banner
# server mode:
self.server_mode = False
self.server_object = None
self.server_key_dict = {}
self.server_accepts = []
self.server_accept_cv = threading.Condition(self.lock)
self.subsystem_table = {}
def __repr__(self):
"""
Returns a string representation of this object, for debugging.
"""
out = '<paramiko.Transport at %s' % hex(long(id(self)) & xffffffff)
if not self.active:
out += ' (unconnected)'
else:
if self.local_cipher != '':
out += ' (cipher %s, %d bits)' % (self.local_cipher,
self._cipher_info[self.local_cipher]['key-size'] * 8)
if self.is_authenticated():
out += ' (active; %d open channel(s))' % len(self._channels)
elif self.initial_kex_done:
out += ' (connected; awaiting auth)'
else:
out += ' (connecting)'
out += '>'
return out
def atfork(self):
"""
Terminate this Transport without closing the session. On posix
systems, if a Transport is open during process forking, both parent
and child will share the underlying socket, but only one process can
use the connection (without corrupting the session). Use this method
to clean up a Transport object without disrupting the other process.
.. versionadded:: 1.5.3
"""
self.sock.close()
self.close()
def get_security_options(self):
"""
Return a `.SecurityOptions` object which can be used to tweak the
encryption algorithms this transport will permit (for encryption,
digest/hash operations, public keys, and key exchanges) and the order
of preference for them.
"""
return SecurityOptions(self)
def set_gss_host(self, gss_host):
"""
Setter for C{gss_host} if GSS-API Key Exchange is performed.
:param str gss_host: The targets name in the kerberos database
Default: The name of the host to connect to
:rtype: Void
"""
# We need the FQDN to get this working with SSPI
self.gss_host = socket.getfqdn(gss_host)
def start_client(self, event=None):
"""
Negotiate a new SSH2 session as a client. This is the first step after
creating a new `.Transport`. A separate thread is created for protocol
negotiation.
If an event is passed in, this method returns immediately. When
negotiation is done (successful or not), the given ``Event`` will
be triggered. On failure, `is_active` will return ``False``.
(Since 1.4) If ``event`` is ``None``, this method will not return until
negotation is done. On success, the method returns normally.
Otherwise an SSHException is raised.
After a successful negotiation, you will usually want to authenticate,
calling `auth_password <Transport.auth_password>` or
`auth_publickey <Transport.auth_publickey>`.
.. note:: `connect` is a simpler method for connecting as a client.
.. note::
After calling this method (or `start_server` or `connect`), you
should no longer directly read from or write to the original socket
object.
:param .threading.Event event:
an event to trigger when negotiation is complete (optional)
:raises SSHException: if negotiation fails (and no ``event`` was passed
in)
"""
self.active = True
if event is not None:
# async, return immediately and let the app poll for completion
self.completion_event = event
self.start()
return
# synchronous, wait for a result
self.completion_event = event = threading.Event()
self.start()
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException('Negotiation failed.')
if event.isSet():
break
def start_server(self, event=None, server=None):
"""
Negotiate a new SSH2 session as a server. This is the first step after
creating a new `.Transport` and setting up your server host key(s). A
separate thread is created for protocol negotiation.
If an event is passed in, this method returns immediately. When
negotiation is done (successful or not), the given ``Event`` will
be triggered. On failure, `is_active` will return ``False``.
(Since 1.4) If ``event`` is ``None``, this method will not return until
negotation is done. On success, the method returns normally.
Otherwise an SSHException is raised.
After a successful negotiation, the client will need to authenticate.
Override the methods `get_allowed_auths
<.ServerInterface.get_allowed_auths>`, `check_auth_none
<.ServerInterface.check_auth_none>`, `check_auth_password
<.ServerInterface.check_auth_password>`, and `check_auth_publickey
<.ServerInterface.check_auth_publickey>` in the given ``server`` object
to control the authentication process.
After a successful authentication, the client should request to open a
channel. Override `check_channel_request
<.ServerInterface.check_channel_request>` in the given ``server``
object to allow channels to be opened.
.. note::
After calling this method (or `start_client` or `connect`), you
should no longer directly read from or write to the original socket
object.
:param .threading.Event event:
an event to trigger when negotiation is complete.
:param .ServerInterface server:
an object used to perform authentication and create `channels
<.Channel>`
:raises SSHException: if negotiation fails (and no ``event`` was passed
in)
"""
if server is None:
server = ServerInterface()
self.server_mode = True
self.server_object = server
self.active = True
if event is not None:
# async, return immediately and let the app poll for completion
self.completion_event = event
self.start()
return
# synchronous, wait for a result
self.completion_event = event = threading.Event()
self.start()
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException('Negotiation failed.')
if event.isSet():
break
def add_server_key(self, key):
"""
Add a host key to the list of keys used for server mode. When behaving
as a server, the host key is used to sign certain packets during the
SSH2 negotiation, so that the client can trust that we are who we say
we are. Because this is used for signing, the key must contain private
key info, not just the public half. Only one key of each type (RSA or
DSS) is kept.
:param .PKey key:
the host key to add, usually an `.RSAKey` or `.DSSKey`.
"""
self.server_key_dict[key.get_name()] = key
def get_server_key(self):
"""
Return the active host key, in server mode. After negotiating with the
client, this method will return the negotiated host key. If only one
type of host key was set with `add_server_key`, that's the only key
that will ever be returned. But in cases where you have set more than
one type of host key (for example, an RSA key and a DSS key), the key
type will be negotiated by the client, and this method will return the
key of the type agreed on. If the host key has not been negotiated
yet, ``None`` is returned. In client mode, the behavior is undefined.
:return:
host key (`.PKey`) of the type negotiated by the client, or
``None``.
"""
try:
return self.server_key_dict[self.host_key_type]
except KeyError:
pass
return None
def load_server_moduli(filename=None):
"""
(optional)
Load a file of prime moduli for use in doing group-exchange key
negotiation in server mode. It's a rather obscure option and can be
safely ignored.
In server mode, the remote client may request "group-exchange" key
negotiation, which asks the server to send a random prime number that
fits certain criteria. These primes are pretty difficult to compute,
so they can't be generated on demand. But many systems contain a file
of suitable primes (usually named something like ``/etc/ssh/moduli``).
If you call `load_server_moduli` and it returns ``True``, then this
file of primes has been loaded and we will support "group-exchange" in
server mode. Otherwise server mode will just claim that it doesn't
support that method of key negotiation.
:param str filename:
optional path to the moduli file, if you happen to know that it's
not in a standard location.
:return:
True if a moduli file was successfully loaded; False otherwise.
.. note:: This has no effect when used in client mode.
"""
Transport._modulus_pack = ModulusPack()
# places to look for the openssh "moduli" file
file_list = ['/etc/ssh/moduli', '/usr/local/etc/moduli']
if filename is not None:
file_list.insert(0, filename)
for fn in file_list:
try:
Transport._modulus_pack.read_file(fn)
return True
except IOError:
pass
# none succeeded
Transport._modulus_pack = None
return False
load_server_moduli = staticmethod(load_server_moduli)
def close(self):
"""
Close this session, and any open channels that are tied to it.
"""
if not self.active:
return
self.stop_thread()
for chan in list(self._channels.values()):
chan._unlink()
self.sock.close()
def get_remote_server_key(self):
"""
Return the host key of the server (in client mode).
.. note::
Previously this call returned a tuple of ``(key type, key
string)``. You can get the same effect by calling `.PKey.get_name`
for the key type, and ``str(key)`` for the key string.
:raises SSHException: if no session is currently active.
:return: public key (`.PKey`) of the remote server
"""
if (not self.active) or (not self.initial_kex_done):
raise SSHException('No existing session')
return self.host_key
def is_active(self):
"""
Return true if this session is active (open).
:return:
True if the session is still active (open); False if the session is
closed
"""
return self.active
def open_session(self, window_size=None, max_packet_size=None):
"""
Request a new channel to the server, of type ``"session"``. This is
just an alias for calling `open_channel` with an argument of
``"session"``.
.. note:: Modifying the the window and packet sizes might have adverse
effects on the session created. The default values are the same
as in the OpenSSH code base and have been battle tested.
:param int window_size:
optional window size for this session.
:param int max_packet_size:
optional max packet size for this session.
:return: a new `.Channel`
:raises SSHException: if the request is rejected or the session ends
prematurely
.. versionchanged:: 1.15
Added the ``window_size`` and ``max_packet_size`` arguments.
"""
return self.open_channel('session',
window_size=window_size,
max_packet_size=max_packet_size)
def open_x11_channel(self, src_addr=None):
"""
Request a new channel to the client, of type ``"x11"``. This
is just an alias for ``open_channel('x11', src_addr=src_addr)``.
:param tuple src_addr:
the source address (``(str, int)``) of the x11 server (port is the
x11 port, ie. 6010)
:return: a new `.Channel`
:raises SSHException: if the request is rejected or the session ends
prematurely
"""
return self.open_channel('x11', src_addr=src_addr)
def open_forward_agent_channel(self):
"""
Request a new channel to the client, of type
``"auth-agent@openssh.com"``.
This is just an alias for ``open_channel('auth-agent@openssh.com')``.
:return: a new `.Channel`
:raises SSHException:
if the request is rejected or the session ends prematurely
"""
return self.open_channel('auth-agent@openssh.com')
def open_forwarded_tcpip_channel(self, src_addr, dest_addr):
"""
Request a new channel back to the client, of type ``"forwarded-tcpip"``.
This is used after a client has requested port forwarding, for sending
incoming connections back to the client.
:param src_addr: originator's address
:param dest_addr: local (server) connected address
"""
return self.open_channel('forwarded-tcpip', dest_addr, src_addr)
def open_channel(self,
kind,
dest_addr=None,
src_addr=None,
window_size=None,
max_packet_size=None):
"""
Request a new channel to the server. `Channels <.Channel>` are
socket-like objects used for the actual transfer of data across the
session. You may only request a channel after negotiating encryption
(using `connect` or `start_client`) and authenticating.
.. note:: Modifying the the window and packet sizes might have adverse
effects on the channel created. The default values are the same
as in the OpenSSH code base and have been battle tested.
:param str kind:
the kind of channel requested (usually ``"session"``,
``"forwarded-tcpip"``, ``"direct-tcpip"``, or ``"x11"``)
:param tuple dest_addr:
the destination address (address + port tuple) of this port
forwarding, if ``kind`` is ``"forwarded-tcpip"`` or
``"direct-tcpip"`` (ignored for other channel types)
:param src_addr: the source address of this port forwarding, if
``kind`` is ``"forwarded-tcpip"``, ``"direct-tcpip"``, or ``"x11"``
:param int window_size:
optional window size for this session.
:param int max_packet_size:
optional max packet size for this session.
:return: a new `.Channel` on success
:raises SSHException: if the request is rejected or the session ends
prematurely
.. versionchanged:: 1.15
Added the ``window_size`` and ``max_packet_size`` arguments.
"""
if not self.active:
raise SSHException('SSH session not active')
self.lock.acquire()
try:
window_size = self._sanitize_window_size(window_size)
max_packet_size = self._sanitize_packet_size(max_packet_size)
chanid = self._next_channel()
m = Message()
m.add_byte(cMSG_CHANNEL_OPEN)
m.add_string(kind)
m.add_int(chanid)
m.add_int(window_size)
m.add_int(max_packet_size)
if (kind == 'forwarded-tcpip') or (kind == 'direct-tcpip'):
m.add_string(dest_addr[0])
m.add_int(dest_addr[1])
m.add_string(src_addr[0])
m.add_int(src_addr[1])
elif kind == 'x11':
m.add_string(src_addr[0])
m.add_int(src_addr[1])
chan = Channel(chanid)
self._channels.put(chanid, chan)
self.channel_events[chanid] = event = threading.Event()
self.channels_seen[chanid] = True
chan._set_transport(self)
chan._set_window(window_size, max_packet_size)
finally:
self.lock.release()
self._send_user_message(m)
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is None:
e = SSHException('Unable to open channel.')
raise e
if event.isSet():
break
chan = self._channels.get(chanid)
if chan is not None:
return chan
e = self.get_exception()
if e is None:
e = SSHException('Unable to open channel.')
raise e
def request_port_forward(self, address, port, handler=None):
"""
Ask the server to forward TCP connections from a listening port on
the server, across this SSH session.
If a handler is given, that handler is called from a different thread
whenever a forwarded connection arrives. The handler parameters are::
handler(channel, (origin_addr, origin_port), (server_addr, server_port))
where ``server_addr`` and ``server_port`` are the address and port that
the server was listening on.
If no handler is set, the default behavior is to send new incoming
forwarded connections into the accept queue, to be picked up via
`accept`.
:param str address: the address to bind when forwarding
:param int port:
the port to forward, or 0 to ask the server to allocate any port
:param callable handler:
optional handler for incoming forwarded connections, of the form
``func(Channel, (str, int), (str, int))``.
:return: the port number (`int`) allocated by the server
:raises SSHException: if the server refused the TCP forward request
"""
if not self.active:
raise SSHException('SSH session not active')
port = int(port)
response = self.global_request('tcpip-forward', (address, port), wait=True)
if response is None:
raise SSHException('TCP forwarding request denied')
if port == 0:
port = response.get_int()
if handler is None:
def default_handler(channel, src_addr, dest_addr_port):
#src_addr, src_port = src_addr_port
#dest_addr, dest_port = dest_addr_port
self._queue_incoming_channel(channel)
handler = default_handler
self._tcp_handler = handler
return port
def cancel_port_forward(self, address, port):
"""
Ask the server to cancel a previous port-forwarding request. No more
connections to the given address & port will be forwarded across this
ssh connection.
:param str address: the address to stop forwarding
:param int port: the port to stop forwarding
"""
if not self.active:
return
self._tcp_handler = None
self.global_request('cancel-tcpip-forward', (address, port), wait=True)
def open_sftp_client(self):
"""
Create an SFTP client channel from an open transport. On success, an
SFTP session will be opened with the remote host, and a new
`.SFTPClient` object will be returned.
:return:
a new `.SFTPClient` referring to an sftp session (channel) across
this transport
"""
return SFTPClient.from_transport(self)
def send_ignore(self, byte_count=None):
"""
Send a junk packet across the encrypted link. This is sometimes used
to add "noise" to a connection to confuse would-be attackers. It can
also be used as a keep-alive for long lived connections traversing
firewalls.
:param int byte_count:
the number of random bytes to send in the payload of the ignored
packet -- defaults to a random number from 10 to 41.
"""
m = Message()
m.add_byte(cMSG_IGNORE)
if byte_count is None:
byte_count = (byte_ord(os.urandom(1)) % 32) + 10
m.add_bytes(os.urandom(byte_count))
self._send_user_message(m)
def renegotiate_keys(self):
"""
Force this session to switch to new keys. Normally this is done
automatically after the session hits a certain number of packets or
bytes sent or received, but this method gives you the option of forcing
new keys whenever you want. Negotiating new keys causes a pause in
traffic both ways as the two sides swap keys and do computations. This
method returns when the session has switched to new keys.
:raises SSHException: if the key renegotiation failed (which causes the
session to end)
"""
self.completion_event = threading.Event()
self._send_kex_init()
while True:
self.completion_event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException('Negotiation failed.')
if self.completion_event.isSet():
break
return
def set_keepalive(self, interval):
"""
Turn on/off keepalive packets (default is off). If this is set, after
``interval`` seconds without sending any data over the connection, a
"keepalive" packet will be sent (and ignored by the remote host). This
can be useful to keep connections alive over a NAT, for example.
:param int interval:
seconds to wait before sending a keepalive packet (or
0 to disable keepalives).
"""
self.packetizer.set_keepalive(interval,
lambda x=weakref.proxy(self): x.global_request('keepalive@lag.net', wait=False))
def global_request(self, kind, data=None, wait=True):
"""
Make a global request to the remote host. These are normally
extensions to the SSH2 protocol.
:param str kind: name of the request.
:param tuple data:
an optional tuple containing additional data to attach to the
request.
:param bool wait:
``True`` if this method should not return until a response is
received; ``False`` otherwise.
:return:
a `.Message` containing possible additional data if the request was
successful (or an empty `.Message` if ``wait`` was ``False``);
``None`` if the request was denied.
"""
if wait:
self.completion_event = threading.Event()
m = Message()
m.add_byte(cMSG_GLOBAL_REQUEST)
m.add_string(kind)
m.add_boolean(wait)
if data is not None:
m.add(*data)
self._log(DEBUG, 'Sending global request "%s"' % kind)
self._send_user_message(m)
if not wait:
return None
while True:
self.completion_event.wait(0.1)
if not self.active:
return None
if self.completion_event.isSet():
break
return self.global_response
def accept(self, timeout=None):
"""
Return the next channel opened by the client over this transport, in
server mode. If no channel is opened before the given timeout, ``None``
is returned.
:param int timeout:
seconds to wait for a channel, or ``None`` to wait forever
:return: a new `.Channel` opened by the client
"""
self.lock.acquire()
try:
if len(self.server_accepts) > 0:
chan = self.server_accepts.pop(0)
else:
self.server_accept_cv.wait(timeout)
if len(self.server_accepts) > 0:
chan = self.server_accepts.pop(0)
else:
# timeout
chan = None
finally:
self.lock.release()
return chan
def connect(self, hostkey=None, username='', password=None, pkey=None,
gss_host=None, gss_auth=False, gss_kex=False, gss_deleg_creds=True):
"""
Negotiate an SSH2 session, and optionally verify the server's host key
and authenticate using a password or private key. This is a shortcut
for `start_client`, `get_remote_server_key`, and
`Transport.auth_password` or `Transport.auth_publickey`. Use those
methods if you want more control.
You can use this method immediately after creating a Transport to
negotiate encryption with a server. If it fails, an exception will be
thrown. On success, the method will return cleanly, and an encrypted
session exists. You may immediately call `open_channel` or
`open_session` to get a `.Channel` object, which is used for data
transfer.
.. note::
If you fail to supply a password or private key, this method may
succeed, but a subsequent `open_channel` or `open_session` call may
fail because you haven't authenticated yet.
:param .PKey hostkey:
the host key expected from the server, or ``None`` if you don't
want to do host key verification.
:param str username: the username to authenticate as.
:param str password:
a password to use for authentication, if you want to use password
authentication; otherwise ``None``.
:param .PKey pkey:
a private key to use for authentication, if you want to use private
key authentication; otherwise ``None``.
:param str gss_host:
The target's name in the kerberos database. Default: hostname
:param bool gss_auth:
``True`` if you want to use GSS-API authentication.
:param bool gss_kex:
Perform GSS-API Key Exchange and user authentication.
:param bool gss_deleg_creds:
Whether to delegate GSS-API client credentials.
:raises SSHException: if the SSH2 negotiation fails, the host key
supplied by the server is incorrect, or authentication fails.
"""
if hostkey is not None:
self._preferred_keys = [hostkey.get_name()]
self.start_client()
# check host key if we were given one
# If GSS-API Key Exchange was performed, we are not required to check
# the host key.
if (hostkey is not None) and not gss_kex:
key = self.get_remote_server_key()
if (key.get_name() != hostkey.get_name()) or (key.asbytes() != hostkey.asbytes()):
self._log(DEBUG, 'Bad host key from server')
self._log(DEBUG, 'Expected: %s: %s' % (hostkey.get_name(), repr(hostkey.asbytes())))
self._log(DEBUG, 'Got : %s: %s' % (key.get_name(), repr(key.asbytes())))
raise SSHException('Bad host key from server')
self._log(DEBUG, 'Host key verified (%s)' % hostkey.get_name())
if (pkey is not None) or (password is not None) or gss_auth or gss_kex:
if gss_auth:
self._log(DEBUG, 'Attempting GSS-API auth... (gssapi-with-mic)')
self.auth_gssapi_with_mic(username, gss_host, gss_deleg_creds)
elif gss_kex:
self._log(DEBUG, 'Attempting GSS-API auth... (gssapi-keyex)')
self.auth_gssapi_keyex(username)
elif pkey is not None:
self._log(DEBUG, 'Attempting public-key auth...')
self.auth_publickey(username, pkey)
else:
self._log(DEBUG, 'Attempting password auth...')
self.auth_password(username, password)
return
def get_exception(self):
"""
Return any exception that happened during the last server request.
This can be used to fetch more specific error information after using
calls like `start_client`. The exception (if any) is cleared after
this call.
:return:
an exception, or ``None`` if there is no stored exception.
.. versionadded:: 1.1
"""
self.lock.acquire()
try:
e = self.saved_exception
self.saved_exception = None
return e
finally:
self.lock.release()
def set_subsystem_handler(self, name, handler, *larg, **kwarg):
"""
Set the handler class for a subsystem in server mode. If a request
for this subsystem is made on an open ssh channel later, this handler
will be constructed and called -- see `.SubsystemHandler` for more
detailed documentation.
Any extra parameters (including keyword arguments) are saved and
passed to the `.SubsystemHandler` constructor later.
:param str name: name of the subsystem.
:param class handler:
subclass of `.SubsystemHandler` that handles this subsystem.
"""
try:
self.lock.acquire()
self.subsystem_table[name] = (handler, larg, kwarg)
finally:
self.lock.release()
def is_authenticated(self):
"""
Return true if this session is active and authenticated.
:return:
True if the session is still open and has been authenticated
successfully; False if authentication failed and/or the session is
closed.
"""
return self.active and (self.auth_handler is not None) and self.auth_handler.is_authenticated()
def get_username(self):
"""
Return the username this connection is authenticated for. If the
session is not authenticated (or authentication failed), this method
returns ``None``.
:return: username that was authenticated (a `str`), or ``None``.
"""
if not self.active or (self.auth_handler is None):
return None
return self.auth_handler.get_username()
def get_banner(self):
"""
Return the banner supplied by the server upon connect. If no banner is
supplied, this method returns ``None``.
:returns: server supplied banner (`str`), or ``None``.
"""
if not self.active or (self.auth_handler is None):
return None
return self.auth_handler.banner
def auth_none(self, username):
"""
Try to authenticate to the server using no authentication at all.
This will almost always fail. It may be useful for determining the
list of authentication types supported by the server, by catching the
`.BadAuthenticationType` exception raised.
:param str username: the username to authenticate as
:return:
`list` of auth types permissible for the next stage of
authentication (normally empty)
:raises BadAuthenticationType: if "none" authentication isn't allowed
by the server for this user
:raises SSHException: if the authentication failed due to a network
error
.. versionadded:: 1.5
"""
if (not self.active) or (not self.initial_kex_done):
raise SSHException('No existing session')
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_none(username, my_event)
return self.auth_handler.wait_for_response(my_event)
def auth_password(self, username, password, event=None, fallback=True):
"""
Authenticate to the server using a password. The username and password
are sent over an encrypted link.
If an ``event`` is passed in, this method will return immediately, and
the event will be triggered once authentication succeeds or fails. On
success, `is_authenticated` will return ``True``. On failure, you may
use `get_exception` to get more detailed error information.
Since 1.1, if no event is passed, this method will block until the
authentication succeeds or fails. On failure, an exception is raised.
Otherwise, the method simply returns.
Since 1.5, if no event is passed and ``fallback`` is ``True`` (the
default), if the server doesn't support plain password authentication
but does support so-called "keyboard-interactive" mode, an attempt
will be made to authenticate using this interactive mode. If it fails,
the normal exception will be thrown as if the attempt had never been
made. This is useful for some recent Gentoo and Debian distributions,
which turn off plain password authentication in a misguided belief
that interactive authentication is "more secure". (It's not.)
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
:param str username: the username to authenticate as
:param basestring password: the password to authenticate with
:param .threading.Event event:
an event to trigger when the authentication attempt is complete
(whether it was successful or not)
:param bool fallback:
``True`` if an attempt at an automated "interactive" password auth
should be made if the server doesn't support normal password auth
:return:
`list` of auth types permissible for the next stage of
authentication (normally empty)
:raises BadAuthenticationType: if password authentication isn't
allowed by the server for this user (and no event was passed in)
:raises AuthenticationException: if the authentication failed (and no
event was passed in)
:raises SSHException: if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to send the password unless we're on a secure link
raise SSHException('No existing session')
if event is None:
my_event = threading.Event()
else:
my_event = event
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_password(username, password, my_event)
if event is not None:
# caller wants to wait for event themselves
return []
try:
return self.auth_handler.wait_for_response(my_event)
except BadAuthenticationType as e:
# if password auth isn't allowed, but keyboard-interactive *is*, try to fudge it
if not fallback or ('keyboard-interactive' not in e.allowed_types):
raise
try:
def handler(title, instructions, fields):
if len(fields) > 1:
raise SSHException('Fallback authentication failed.')
if len(fields) == 0:
# for some reason, at least on os x, a 2nd request will
# be made with zero fields requested. maybe it's just
# to try to fake out automated scripting of the exact
# type we're doing here. *shrug* :)
return []
return [password]
return self.auth_interactive(username, handler)
except SSHException:
# attempt failed; just raise the original exception
raise e
def auth_publickey(self, username, key, event=None):
"""
Authenticate to the server using a private key. The key is used to
sign data from the server, so it must include the private part.
If an ``event`` is passed in, this method will return immediately, and
the event will be triggered once authentication succeeds or fails. On
success, `is_authenticated` will return ``True``. On failure, you may
use `get_exception` to get more detailed error information.
Since 1.1, if no event is passed, this method will block until the
authentication succeeds or fails. On failure, an exception is raised.
Otherwise, the method simply returns.
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
:param str username: the username to authenticate as
:param .PKey key: the private key to authenticate with
:param .threading.Event event:
an event to trigger when the authentication attempt is complete
(whether it was successful or not)
:return:
`list` of auth types permissible for the next stage of
authentication (normally empty)
:raises BadAuthenticationType: if public-key authentication isn't
allowed by the server for this user (and no event was passed in)
:raises AuthenticationException: if the authentication failed (and no
event was passed in)
:raises SSHException: if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException('No existing session')
if event is None:
my_event = threading.Event()
else:
my_event = event
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_publickey(username, key, my_event)
if event is not None:
# caller wants to wait for event themselves
return []
return self.auth_handler.wait_for_response(my_event)
def auth_interactive(self, username, handler, submethods=''):
"""
Authenticate to the server interactively. A handler is used to answer
arbitrary questions from the server. On many servers, this is just a
dumb wrapper around PAM.
This method will block until the authentication succeeds or fails,
peroidically calling the handler asynchronously to get answers to
authentication questions. The handler may be called more than once
if the server continues to ask questions.
The handler is expected to be a callable that will handle calls of the
form: ``handler(title, instructions, prompt_list)``. The ``title`` is
meant to be a dialog-window title, and the ``instructions`` are user
instructions (both are strings). ``prompt_list`` will be a list of
prompts, each prompt being a tuple of ``(str, bool)``. The string is
the prompt and the boolean indicates whether the user text should be
echoed.
A sample call would thus be:
``handler('title', 'instructions', [('Password:', False)])``.
The handler should return a list or tuple of answers to the server's
questions.
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
:param str username: the username to authenticate as
:param callable handler: a handler for responding to server questions
:param str submethods: a string list of desired submethods (optional)
:return:
`list` of auth types permissible for the next stage of
authentication (normally empty).
:raises BadAuthenticationType: if public-key authentication isn't
allowed by the server for this user
:raises AuthenticationException: if the authentication failed
:raises SSHException: if there was a network error
.. versionadded:: 1.5
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException('No existing session')
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_interactive(username, handler, my_event, submethods)
return self.auth_handler.wait_for_response(my_event)
def auth_gssapi_with_mic(self, username, gss_host, gss_deleg_creds):
"""
Authenticate to the Server using GSS-API / SSPI.
:param str username: The username to authenticate as
:param str gss_host: The target host
:param bool gss_deleg_creds: Delegate credentials or not
:return: list of auth types permissible for the next stage of
authentication (normally empty)
:rtype: list
:raise BadAuthenticationType: if gssapi-with-mic isn't
allowed by the server (and no event was passed in)
:raise AuthenticationException: if the authentication failed (and no
event was passed in)
:raise SSHException: if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException('No existing session')
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_gssapi_with_mic(username, gss_host, gss_deleg_creds, my_event)
return self.auth_handler.wait_for_response(my_event)
def auth_gssapi_keyex(self, username):
"""
Authenticate to the Server with GSS-API / SSPI if GSS-API Key Exchange
was the used key exchange method.
:param str username: The username to authenticate as
:param str gss_host: The target host
:param bool gss_deleg_creds: Delegate credentials or not
:return: list of auth types permissible for the next stage of
authentication (normally empty)
:rtype: list
:raise BadAuthenticationType: if GSS-API Key Exchange was not performed
(and no event was passed in)
:raise AuthenticationException: if the authentication failed (and no
event was passed in)
:raise SSHException: if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException('No existing session')
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_gssapi_keyex(username, my_event)
return self.auth_handler.wait_for_response(my_event)
def set_log_channel(self, name):
"""
Set the channel for this transport's logging. The default is
``"paramiko.transport"`` but it can be set to anything you want. (See
the `.logging` module for more info.) SSH Channels will log to a
sub-channel of the one specified.
:param str name: new channel name for logging
.. versionadded:: 1.1
"""
self.log_name = name
self.logger = util.get_logger(name)
self.packetizer.set_log(self.logger)
def get_log_channel(self):
"""
Return the channel name used for this transport's logging.
:return: channel name as a `str`
.. versionadded:: 1.2
"""
return self.log_name
def set_hexdump(self, hexdump):
"""
Turn on/off logging a hex dump of protocol traffic at DEBUG level in
the logs. Normally you would want this off (which is the default),
but if you are debugging something, it may be useful.
:param bool hexdump:
``True`` to log protocol traffix (in hex) to the log; ``False``
otherwise.
"""
self.packetizer.set_hexdump(hexdump)
def get_hexdump(self):
"""
Return ``True`` if the transport is currently logging hex dumps of
protocol traffic.
:return: ``True`` if hex dumps are being logged, else ``False``.
.. versionadded:: 1.4
"""
return self.packetizer.get_hexdump()
def use_compression(self, compress=True):
"""
Turn on/off compression. This will only have an affect before starting
the transport (ie before calling `connect`, etc). By default,
compression is off since it negatively affects interactive sessions.
:param bool compress:
``True`` to ask the remote client/server to compress traffic;
``False`` to refuse compression
.. versionadded:: 1.5.2
"""
if compress:
self._preferred_compression = ('zlib@openssh.com', 'zlib', 'none')
else:
self._preferred_compression = ('none',)
def getpeername(self):
"""
Return the address of the remote side of this Transport, if possible.
This is effectively a wrapper around ``'getpeername'`` on the underlying
socket. If the socket-like object has no ``'getpeername'`` method,
then ``("unknown", 0)`` is returned.
:return:
the address of the remote host, if known, as a ``(str, int)``
tuple.
"""
gp = getattr(self.sock, 'getpeername', None)
if gp is None:
return 'unknown', 0
return gp()
def stop_thread(self):
self.active = False
self.packetizer.close()
while self.is_alive() and (self is not threading.current_thread()):
self.join(10)
### internals...
def _log(self, level, msg, *args):
if issubclass(type(msg), list):
for m in msg:
self.logger.log(level, m)
else:
self.logger.log(level, msg, *args)
def _get_modulus_pack(self):
"""used by KexGex to find primes for group exchange"""
return self._modulus_pack
def _next_channel(self):
"""you are holding the lock"""
chanid = self._channel_counter
while self._channels.get(chanid) is not None:
self._channel_counter = (self._channel_counter + 1) & 0xffffff
chanid = self._channel_counter
self._channel_counter = (self._channel_counter + 1) & 0xffffff
return chanid
def _unlink_channel(self, chanid):
"""used by a Channel to remove itself from the active channel list"""
self._channels.delete(chanid)
def _send_message(self, data):
self.packetizer.send_message(data)
def _send_user_message(self, data):
"""
send a message, but block if we're in key negotiation. this is used
for user-initiated requests.
"""
start = time.time()
while True:
self.clear_to_send.wait(0.1)
if not self.active:
self._log(DEBUG, 'Dropping user packet because connection is dead.')
return
self.clear_to_send_lock.acquire()
if self.clear_to_send.isSet():
break
self.clear_to_send_lock.release()
if time.time() > start + self.clear_to_send_timeout:
raise SSHException('Key-exchange timed out waiting for key negotiation')
try:
self._send_message(data)
finally:
self.clear_to_send_lock.release()
def _set_K_H(self, k, h):
"""used by a kex object to set the K (root key) and H (exchange hash)"""
self.K = k
self.H = h
if self.session_id is None:
self.session_id = h
def _expect_packet(self, *ptypes):
"""used by a kex object to register the next packet type it expects to see"""
self._expected_packet = tuple(ptypes)
def _verify_key(self, host_key, sig):
key = self._key_info[self.host_key_type](Message(host_key))
if key is None:
raise SSHException('Unknown host key type')
if not key.verify_ssh_sig(self.H, Message(sig)):
raise SSHException('Signature verification (%s) failed.' % self.host_key_type)
self.host_key = key
def _compute_key(self, id, nbytes):
"""id is 'A' - 'F' for the various keys used by ssh"""
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_byte(b(id))
m.add_bytes(self.session_id)
out = sofar = sha1(m.asbytes()).digest()
while len(out) < nbytes:
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_bytes(sofar)
digest = sha1(m.asbytes()).digest()
out += digest
sofar += digest
return out[:nbytes]
def _get_cipher(self, name, key, iv):
if name not in self._cipher_info:
raise SSHException('Unknown client cipher ' + name)
if name in ('arcfour128', 'arcfour256'):
# arcfour cipher
cipher = self._cipher_info[name]['class'].new(key)
# as per RFC 4345, the first 1536 bytes of keystream
# generated by the cipher MUST be discarded
cipher.encrypt(" " * 1536)
return cipher
elif name.endswith("-ctr"):
# CTR modes, we need a counter
counter = Counter.new(nbits=self._cipher_info[name]['block-size'] * 8, initial_value=util.inflate_long(iv, True))
return self._cipher_info[name]['class'].new(key, self._cipher_info[name]['mode'], iv, counter)
else:
return self._cipher_info[name]['class'].new(key, self._cipher_info[name]['mode'], iv)
def _set_forward_agent_handler(self, handler):
if handler is None:
def default_handler(channel):
self._queue_incoming_channel(channel)
self._forward_agent_handler = default_handler
else:
self._forward_agent_handler = handler
def _set_x11_handler(self, handler):
# only called if a channel has turned on x11 forwarding
if handler is None:
# by default, use the same mechanism as accept()
def default_handler(channel, src_addr_port):
self._queue_incoming_channel(channel)
self._x11_handler = default_handler
else:
self._x11_handler = handler
def _queue_incoming_channel(self, channel):
self.lock.acquire()
try:
self.server_accepts.append(channel)
self.server_accept_cv.notify()
finally:
self.lock.release()
def _sanitize_window_size(self, window_size):
if window_size is None:
window_size = self.default_window_size
return clamp_value(MIN_PACKET_SIZE, window_size, MAX_WINDOW_SIZE)
def _sanitize_packet_size(self, max_packet_size):
if max_packet_size is None:
max_packet_size = self.default_max_packet_size
return clamp_value(MIN_PACKET_SIZE, max_packet_size, MAX_WINDOW_SIZE)
def run(self):
# (use the exposed "run" method, because if we specify a thread target
# of a private method, threading.Thread will keep a reference to it
# indefinitely, creating a GC cycle and not letting Transport ever be
# GC'd. it's a bug in Thread.)
# Hold reference to 'sys' so we can test sys.modules to detect
# interpreter shutdown.
self.sys = sys
# active=True occurs before the thread is launched, to avoid a race
_active_threads.append(self)
if self.server_mode:
self._log(DEBUG, 'starting thread (server mode): %s' % hex(long(id(self)) & xffffffff))
else:
self._log(DEBUG, 'starting thread (client mode): %s' % hex(long(id(self)) & xffffffff))
try:
try:
self.packetizer.write_all(b(self.local_version + '\r\n'))
self._check_banner()
self._send_kex_init()
self._expect_packet(MSG_KEXINIT)
while self.active:
if self.packetizer.need_rekey() and not self.in_kex:
self._send_kex_init()
try:
ptype, m = self.packetizer.read_message()
except NeedRekeyException:
continue
if ptype == MSG_IGNORE:
continue
elif ptype == MSG_DISCONNECT:
self._parse_disconnect(m)
self.active = False
self.packetizer.close()
break
elif ptype == MSG_DEBUG:
self._parse_debug(m)
continue
if len(self._expected_packet) > 0:
if ptype not in self._expected_packet:
raise SSHException('Expecting packet from %r, got %d' % (self._expected_packet, ptype))
self._expected_packet = tuple()
if (ptype >= 30) and (ptype <= 41):
self.kex_engine.parse_next(ptype, m)
continue
if ptype in self._handler_table:
self._handler_table[ptype](self, m)
elif ptype in self._channel_handler_table:
chanid = m.get_int()
chan = self._channels.get(chanid)
if chan is not None:
self._channel_handler_table[ptype](chan, m)
elif chanid in self.channels_seen:
self._log(DEBUG, 'Ignoring message for dead channel %d' % chanid)
else:
self._log(ERROR, 'Channel request for unknown channel %d' % chanid)
self.active = False
self.packetizer.close()
elif (self.auth_handler is not None) and (ptype in self.auth_handler._handler_table):
self.auth_handler._handler_table[ptype](self.auth_handler, m)
else:
self._log(WARNING, 'Oops, unhandled type %d' % ptype)
msg = Message()
msg.add_byte(cMSG_UNIMPLEMENTED)
msg.add_int(m.seqno)
self._send_message(msg)
except SSHException as e:
self._log(ERROR, 'Exception: ' + str(e))
self._log(ERROR, util.tb_strings())
self.saved_exception = e
except EOFError as e:
self._log(DEBUG, 'EOF in transport thread')
#self._log(DEBUG, util.tb_strings())
self.saved_exception = e
except socket.error as e:
if type(e.args) is tuple:
if e.args:
emsg = '%s (%d)' % (e.args[1], e.args[0])
else: # empty tuple, e.g. socket.timeout
emsg = str(e) or repr(e)
else:
emsg = e.args
self._log(ERROR, 'Socket exception: ' + emsg)
self.saved_exception = e
except Exception as e:
self._log(ERROR, 'Unknown exception: ' + str(e))
self._log(ERROR, util.tb_strings())
self.saved_exception = e
_active_threads.remove(self)
for chan in list(self._channels.values()):
chan._unlink()
if self.active:
self.active = False
self.packetizer.close()
if self.completion_event is not None:
self.completion_event.set()
if self.auth_handler is not None:
self.auth_handler.abort()
for event in self.channel_events.values():
event.set()
try:
self.lock.acquire()
self.server_accept_cv.notify()
finally:
self.lock.release()
self.sock.close()
except:
# Don't raise spurious 'NoneType has no attribute X' errors when we
# wake up during interpreter shutdown. Or rather -- raise
# everything *if* sys.modules (used as a convenient sentinel)
# appears to still exist.
if self.sys.modules is not None:
raise
### protocol stages
def _negotiate_keys(self, m):
# throws SSHException on anything unusual
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.clear()
finally:
self.clear_to_send_lock.release()
if self.local_kex_init is None:
# remote side wants to renegotiate
self._send_kex_init()
self._parse_kex_init(m)
self.kex_engine.start_kex()
def _check_banner(self):
# this is slow, but we only have to do it once
for i in range(100):
# give them 15 seconds for the first line, then just 2 seconds
# each additional line. (some sites have very high latency.)
if i == 0:
timeout = self.banner_timeout
else:
timeout = 2
try:
buf = self.packetizer.readline(timeout)
except ProxyCommandFailure:
raise
except Exception as e:
raise SSHException('Error reading SSH protocol banner' + str(e))
if buf[:4] == 'SSH-':
break
self._log(DEBUG, 'Banner: ' + buf)
if buf[:4] != 'SSH-':
raise SSHException('Indecipherable protocol version "' + buf + '"')
# save this server version string for later
self.remote_version = buf
# pull off any attached comment
comment = ''
i = buf.find(' ')
if i >= 0:
comment = buf[i+1:]
buf = buf[:i]
# parse out version string and make sure it matches
segs = buf.split('-', 2)
if len(segs) < 3:
raise SSHException('Invalid SSH banner')
version = segs[1]
client = segs[2]
if version != '1.99' and version != '2.0':
raise SSHException('Incompatible version (%s instead of 2.0)' % (version,))
self._log(INFO, 'Connected (version %s, client %s)' % (version, client))
def _send_kex_init(self):
"""
announce to the other side that we'd like to negotiate keys, and what
kind of key negotiation we support.
"""
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.clear()
finally:
self.clear_to_send_lock.release()
self.in_kex = True
if self.server_mode:
if (self._modulus_pack is None) and ('diffie-hellman-group-exchange-sha1' in self._preferred_kex):
# can't do group-exchange if we don't have a pack of potential primes
pkex = list(self.get_security_options().kex)
pkex.remove('diffie-hellman-group-exchange-sha1')
self.get_security_options().kex = pkex
available_server_keys = list(filter(list(self.server_key_dict.keys()).__contains__,
self._preferred_keys))
else:
available_server_keys = self._preferred_keys
m = Message()
m.add_byte(cMSG_KEXINIT)
m.add_bytes(os.urandom(16))
m.add_list(self._preferred_kex)
m.add_list(available_server_keys)
m.add_list(self._preferred_ciphers)
m.add_list(self._preferred_ciphers)
m.add_list(self._preferred_macs)
m.add_list(self._preferred_macs)
m.add_list(self._preferred_compression)
m.add_list(self._preferred_compression)
m.add_string(bytes())
m.add_string(bytes())
m.add_boolean(False)
m.add_int(0)
# save a copy for later (needed to compute a hash)
self.local_kex_init = m.asbytes()
self._send_message(m)
def _parse_kex_init(self, m):
cookie = m.get_bytes(16)
kex_algo_list = m.get_list()
server_key_algo_list = m.get_list()
client_encrypt_algo_list = m.get_list()
server_encrypt_algo_list = m.get_list()
client_mac_algo_list = m.get_list()
server_mac_algo_list = m.get_list()
client_compress_algo_list = m.get_list()
server_compress_algo_list = m.get_list()
client_lang_list = m.get_list()
server_lang_list = m.get_list()
kex_follows = m.get_boolean()
unused = m.get_int()
self._log(DEBUG, 'kex algos:' + str(kex_algo_list) + ' server key:' + str(server_key_algo_list) +
' client encrypt:' + str(client_encrypt_algo_list) +
' server encrypt:' + str(server_encrypt_algo_list) +
' client mac:' + str(client_mac_algo_list) +
' server mac:' + str(server_mac_algo_list) +
' client compress:' + str(client_compress_algo_list) +
' server compress:' + str(server_compress_algo_list) +
' client lang:' + str(client_lang_list) +
' server lang:' + str(server_lang_list) +
' kex follows?' + str(kex_follows))
# as a server, we pick the first item in the client's list that we support.
# as a client, we pick the first item in our list that the server supports.
if self.server_mode:
agreed_kex = list(filter(self._preferred_kex.__contains__, kex_algo_list))
else:
agreed_kex = list(filter(kex_algo_list.__contains__, self._preferred_kex))
if len(agreed_kex) == 0:
raise SSHException('Incompatible ssh peer (no acceptable kex algorithm)')
self.kex_engine = self._kex_info[agreed_kex[0]](self)
if self.server_mode:
available_server_keys = list(filter(list(self.server_key_dict.keys()).__contains__,
self._preferred_keys))
agreed_keys = list(filter(available_server_keys.__contains__, server_key_algo_list))
else:
agreed_keys = list(filter(server_key_algo_list.__contains__, self._preferred_keys))
if len(agreed_keys) == 0:
raise SSHException('Incompatible ssh peer (no acceptable host key)')
self.host_key_type = agreed_keys[0]
if self.server_mode and (self.get_server_key() is None):
raise SSHException('Incompatible ssh peer (can\'t match requested host key type)')
if self.server_mode:
agreed_local_ciphers = list(filter(self._preferred_ciphers.__contains__,
server_encrypt_algo_list))
agreed_remote_ciphers = list(filter(self._preferred_ciphers.__contains__,
client_encrypt_algo_list))
else:
agreed_local_ciphers = list(filter(client_encrypt_algo_list.__contains__,
self._preferred_ciphers))
agreed_remote_ciphers = list(filter(server_encrypt_algo_list.__contains__,
self._preferred_ciphers))
if (len(agreed_local_ciphers) == 0) or (len(agreed_remote_ciphers) == 0):
raise SSHException('Incompatible ssh server (no acceptable ciphers)')
self.local_cipher = agreed_local_ciphers[0]
self.remote_cipher = agreed_remote_ciphers[0]
self._log(DEBUG, 'Ciphers agreed: local=%s, remote=%s' % (self.local_cipher, self.remote_cipher))
if self.server_mode:
agreed_remote_macs = list(filter(self._preferred_macs.__contains__, client_mac_algo_list))
agreed_local_macs = list(filter(self._preferred_macs.__contains__, server_mac_algo_list))
else:
agreed_local_macs = list(filter(client_mac_algo_list.__contains__, self._preferred_macs))
agreed_remote_macs = list(filter(server_mac_algo_list.__contains__, self._preferred_macs))
if (len(agreed_local_macs) == 0) or (len(agreed_remote_macs) == 0):
raise SSHException('Incompatible ssh server (no acceptable macs)')
self.local_mac = agreed_local_macs[0]
self.remote_mac = agreed_remote_macs[0]
if self.server_mode:
agreed_remote_compression = list(filter(self._preferred_compression.__contains__, client_compress_algo_list))
agreed_local_compression = list(filter(self._preferred_compression.__contains__, server_compress_algo_list))
else:
agreed_local_compression = list(filter(client_compress_algo_list.__contains__, self._preferred_compression))
agreed_remote_compression = list(filter(server_compress_algo_list.__contains__, self._preferred_compression))
if (len(agreed_local_compression) == 0) or (len(agreed_remote_compression) == 0):
raise SSHException('Incompatible ssh server (no acceptable compression) %r %r %r' % (agreed_local_compression, agreed_remote_compression, self._preferred_compression))
self.local_compression = agreed_local_compression[0]
self.remote_compression = agreed_remote_compression[0]
self._log(DEBUG, 'using kex %s; server key type %s; cipher: local %s, remote %s; mac: local %s, remote %s; compression: local %s, remote %s' %
(agreed_kex[0], self.host_key_type, self.local_cipher, self.remote_cipher, self.local_mac,
self.remote_mac, self.local_compression, self.remote_compression))
# save for computing hash later...
# now wait! openssh has a bug (and others might too) where there are
# actually some extra bytes (one NUL byte in openssh's case) added to
# the end of the packet but not parsed. turns out we need to throw
# away those bytes because they aren't part of the hash.
self.remote_kex_init = cMSG_KEXINIT + m.get_so_far()
def _activate_inbound(self):
"""switch on newly negotiated encryption parameters for inbound traffic"""
block_size = self._cipher_info[self.remote_cipher]['block-size']
if self.server_mode:
IV_in = self._compute_key('A', block_size)
key_in = self._compute_key('C', self._cipher_info[self.remote_cipher]['key-size'])
else:
IV_in = self._compute_key('B', block_size)
key_in = self._compute_key('D', self._cipher_info[self.remote_cipher]['key-size'])
engine = self._get_cipher(self.remote_cipher, key_in, IV_in)
mac_size = self._mac_info[self.remote_mac]['size']
mac_engine = self._mac_info[self.remote_mac]['class']
# initial mac keys are done in the hash's natural size (not the potentially truncated
# transmission size)
if self.server_mode:
mac_key = self._compute_key('E', mac_engine().digest_size)
else:
mac_key = self._compute_key('F', mac_engine().digest_size)
self.packetizer.set_inbound_cipher(engine, block_size, mac_engine, mac_size, mac_key)
compress_in = self._compression_info[self.remote_compression][1]
if (compress_in is not None) and ((self.remote_compression != 'zlib@openssh.com') or self.authenticated):
self._log(DEBUG, 'Switching on inbound compression ...')
self.packetizer.set_inbound_compressor(compress_in())
def _activate_outbound(self):
"""switch on newly negotiated encryption parameters for outbound traffic"""
m = Message()
m.add_byte(cMSG_NEWKEYS)
self._send_message(m)
block_size = self._cipher_info[self.local_cipher]['block-size']
if self.server_mode:
IV_out = self._compute_key('B', block_size)
key_out = self._compute_key('D', self._cipher_info[self.local_cipher]['key-size'])
else:
IV_out = self._compute_key('A', block_size)
key_out = self._compute_key('C', self._cipher_info[self.local_cipher]['key-size'])
engine = self._get_cipher(self.local_cipher, key_out, IV_out)
mac_size = self._mac_info[self.local_mac]['size']
mac_engine = self._mac_info[self.local_mac]['class']
# initial mac keys are done in the hash's natural size (not the potentially truncated
# transmission size)
if self.server_mode:
mac_key = self._compute_key('F', mac_engine().digest_size)
else:
mac_key = self._compute_key('E', mac_engine().digest_size)
sdctr = self.local_cipher.endswith('-ctr')
self.packetizer.set_outbound_cipher(engine, block_size, mac_engine, mac_size, mac_key, sdctr)
compress_out = self._compression_info[self.local_compression][0]
if (compress_out is not None) and ((self.local_compression != 'zlib@openssh.com') or self.authenticated):
self._log(DEBUG, 'Switching on outbound compression ...')
self.packetizer.set_outbound_compressor(compress_out())
if not self.packetizer.need_rekey():
self.in_kex = False
# we always expect to receive NEWKEYS now
self._expect_packet(MSG_NEWKEYS)
def _auth_trigger(self):
self.authenticated = True
# delayed initiation of compression
if self.local_compression == 'zlib@openssh.com':
compress_out = self._compression_info[self.local_compression][0]
self._log(DEBUG, 'Switching on outbound compression ...')
self.packetizer.set_outbound_compressor(compress_out())
if self.remote_compression == 'zlib@openssh.com':
compress_in = self._compression_info[self.remote_compression][1]
self._log(DEBUG, 'Switching on inbound compression ...')
self.packetizer.set_inbound_compressor(compress_in())
def _parse_newkeys(self, m):
self._log(DEBUG, 'Switch to new keys ...')
self._activate_inbound()
# can also free a bunch of stuff here
self.local_kex_init = self.remote_kex_init = None
self.K = None
self.kex_engine = None
if self.server_mode and (self.auth_handler is None):
# create auth handler for server mode
self.auth_handler = AuthHandler(self)
if not self.initial_kex_done:
# this was the first key exchange
self.initial_kex_done = True
# send an event?
if self.completion_event is not None:
self.completion_event.set()
# it's now okay to send data again (if this was a re-key)
if not self.packetizer.need_rekey():
self.in_kex = False
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.set()
finally:
self.clear_to_send_lock.release()
return
def _parse_disconnect(self, m):
code = m.get_int()
desc = m.get_text()
self._log(INFO, 'Disconnect (code %d): %s' % (code, desc))
def _parse_global_request(self, m):
kind = m.get_text()
self._log(DEBUG, 'Received global request "%s"' % kind)
want_reply = m.get_boolean()
if not self.server_mode:
self._log(DEBUG, 'Rejecting "%s" global request from server.' % kind)
ok = False
elif kind == 'tcpip-forward':
address = m.get_text()
port = m.get_int()
ok = self.server_object.check_port_forward_request(address, port)
if ok:
ok = (ok,)
elif kind == 'cancel-tcpip-forward':
address = m.get_text()
port = m.get_int()
self.server_object.cancel_port_forward_request(address, port)
ok = True
else:
ok = self.server_object.check_global_request(kind, m)
extra = ()
if type(ok) is tuple:
extra = ok
ok = True
if want_reply:
msg = Message()
if ok:
msg.add_byte(cMSG_REQUEST_SUCCESS)
msg.add(*extra)
else:
msg.add_byte(cMSG_REQUEST_FAILURE)
self._send_message(msg)
def _parse_request_success(self, m):
self._log(DEBUG, 'Global request successful.')
self.global_response = m
if self.completion_event is not None:
self.completion_event.set()
def _parse_request_failure(self, m):
self._log(DEBUG, 'Global request denied.')
self.global_response = None
if self.completion_event is not None:
self.completion_event.set()
def _parse_channel_open_success(self, m):
chanid = m.get_int()
server_chanid = m.get_int()
server_window_size = m.get_int()
server_max_packet_size = m.get_int()
chan = self._channels.get(chanid)
if chan is None:
self._log(WARNING, 'Success for unrequested channel! [??]')
return
self.lock.acquire()
try:
chan._set_remote_channel(server_chanid, server_window_size, server_max_packet_size)
self._log(DEBUG, 'Secsh channel %d opened.' % chanid)
if chanid in self.channel_events:
self.channel_events[chanid].set()
del self.channel_events[chanid]
finally:
self.lock.release()
return
def _parse_channel_open_failure(self, m):
chanid = m.get_int()
reason = m.get_int()
reason_str = m.get_text()
lang = m.get_text()
reason_text = CONNECTION_FAILED_CODE.get(reason, '(unknown code)')
self._log(ERROR, 'Secsh channel %d open FAILED: %s: %s' % (chanid, reason_str, reason_text))
self.lock.acquire()
try:
self.saved_exception = ChannelException(reason, reason_text)
if chanid in self.channel_events:
self._channels.delete(chanid)
if chanid in self.channel_events:
self.channel_events[chanid].set()
del self.channel_events[chanid]
finally:
self.lock.release()
return
def _parse_channel_open(self, m):
kind = m.get_text()
chanid = m.get_int()
initial_window_size = m.get_int()
max_packet_size = m.get_int()
reject = False
if (kind == 'auth-agent@openssh.com') and (self._forward_agent_handler is not None):
self._log(DEBUG, 'Incoming forward agent connection')
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif (kind == 'x11') and (self._x11_handler is not None):
origin_addr = m.get_text()
origin_port = m.get_int()
self._log(DEBUG, 'Incoming x11 connection from %s:%d' % (origin_addr, origin_port))
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif (kind == 'forwarded-tcpip') and (self._tcp_handler is not None):
server_addr = m.get_text()
server_port = m.get_int()
origin_addr = m.get_text()
origin_port = m.get_int()
self._log(DEBUG, 'Incoming tcp forwarded connection from %s:%d' % (origin_addr, origin_port))
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif not self.server_mode:
self._log(DEBUG, 'Rejecting "%s" channel request from server.' % kind)
reject = True
reason = OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
else:
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
if kind == 'direct-tcpip':
# handle direct-tcpip requests comming from the client
dest_addr = m.get_text()
dest_port = m.get_int()
origin_addr = m.get_text()
origin_port = m.get_int()
reason = self.server_object.check_channel_direct_tcpip_request(
my_chanid, (origin_addr, origin_port), (dest_addr, dest_port))
else:
reason = self.server_object.check_channel_request(kind, my_chanid)
if reason != OPEN_SUCCEEDED:
self._log(DEBUG, 'Rejecting "%s" channel request from client.' % kind)
reject = True
if reject:
msg = Message()
msg.add_byte(cMSG_CHANNEL_OPEN_FAILURE)
msg.add_int(chanid)
msg.add_int(reason)
msg.add_string('')
msg.add_string('en')
self._send_message(msg)
return
chan = Channel(my_chanid)
self.lock.acquire()
try:
self._channels.put(my_chanid, chan)
self.channels_seen[my_chanid] = True
chan._set_transport(self)
chan._set_window(self.default_window_size, self.default_max_packet_size)
chan._set_remote_channel(chanid, initial_window_size, max_packet_size)
finally:
self.lock.release()
m = Message()
m.add_byte(cMSG_CHANNEL_OPEN_SUCCESS)
m.add_int(chanid)
m.add_int(my_chanid)
m.add_int(self.default_window_size)
m.add_int(self.default_max_packet_size)
self._send_message(m)
self._log(DEBUG, 'Secsh channel %d (%s) opened.', my_chanid, kind)
if kind == 'auth-agent@openssh.com':
self._forward_agent_handler(chan)
elif kind == 'x11':
self._x11_handler(chan, (origin_addr, origin_port))
elif kind == 'forwarded-tcpip':
chan.origin_addr = (origin_addr, origin_port)
self._tcp_handler(chan, (origin_addr, origin_port), (server_addr, server_port))
else:
self._queue_incoming_channel(chan)
def _parse_debug(self, m):
always_display = m.get_boolean()
msg = m.get_string()
lang = m.get_string()
self._log(DEBUG, 'Debug msg: ' + util.safe_string(msg))
def _get_subsystem_handler(self, name):
try:
self.lock.acquire()
if name not in self.subsystem_table:
return None, [], {}
return self.subsystem_table[name]
finally:
self.lock.release()
_handler_table = {
MSG_NEWKEYS: _parse_newkeys,
MSG_GLOBAL_REQUEST: _parse_global_request,
MSG_REQUEST_SUCCESS: _parse_request_success,
MSG_REQUEST_FAILURE: _parse_request_failure,
MSG_CHANNEL_OPEN_SUCCESS: _parse_channel_open_success,
MSG_CHANNEL_OPEN_FAILURE: _parse_channel_open_failure,
MSG_CHANNEL_OPEN: _parse_channel_open,
MSG_KEXINIT: _negotiate_keys,
}
_channel_handler_table = {
MSG_CHANNEL_SUCCESS: Channel._request_success,
MSG_CHANNEL_FAILURE: Channel._request_failed,
MSG_CHANNEL_DATA: Channel._feed,
MSG_CHANNEL_EXTENDED_DATA: Channel._feed_extended,
MSG_CHANNEL_WINDOW_ADJUST: Channel._window_adjust,
MSG_CHANNEL_REQUEST: Channel._handle_request,
MSG_CHANNEL_EOF: Channel._handle_eof,
MSG_CHANNEL_CLOSE: Channel._handle_close,
}
class SecurityOptions (object):
"""
Simple object containing the security preferences of an ssh transport.
These are tuples of acceptable ciphers, digests, key types, and key
exchange algorithms, listed in order of preference.
Changing the contents and/or order of these fields affects the underlying
`.Transport` (but only if you change them before starting the session).
If you try to add an algorithm that paramiko doesn't recognize,
``ValueError`` will be raised. If you try to assign something besides a
tuple to one of the fields, ``TypeError`` will be raised.
"""
#__slots__ = [ 'ciphers', 'digests', 'key_types', 'kex', 'compression', '_transport' ]
__slots__ = '_transport'
def __init__(self, transport):
self._transport = transport
def __repr__(self):
"""
Returns a string representation of this object, for debugging.
"""
return '<paramiko.SecurityOptions for %s>' % repr(self._transport)
def _get_ciphers(self):
return self._transport._preferred_ciphers
def _get_digests(self):
return self._transport._preferred_macs
def _get_key_types(self):
return self._transport._preferred_keys
def _get_kex(self):
return self._transport._preferred_kex
def _get_compression(self):
return self._transport._preferred_compression
def _set(self, name, orig, x):
if type(x) is list:
x = tuple(x)
if type(x) is not tuple:
raise TypeError('expected tuple or list')
possible = list(getattr(self._transport, orig).keys())
forbidden = [n for n in x if n not in possible]
if len(forbidden) > 0:
raise ValueError('unknown cipher')
setattr(self._transport, name, x)
def _set_ciphers(self, x):
self._set('_preferred_ciphers', '_cipher_info', x)
def _set_digests(self, x):
self._set('_preferred_macs', '_mac_info', x)
def _set_key_types(self, x):
self._set('_preferred_keys', '_key_info', x)
def _set_kex(self, x):
self._set('_preferred_kex', '_kex_info', x)
def _set_compression(self, x):
self._set('_preferred_compression', '_compression_info', x)
ciphers = property(_get_ciphers, _set_ciphers, None,
"Symmetric encryption ciphers")
digests = property(_get_digests, _set_digests, None,
"Digest (one-way hash) algorithms")
key_types = property(_get_key_types, _set_key_types, None,
"Public-key algorithms")
kex = property(_get_kex, _set_kex, None, "Key exchange algorithms")
compression = property(_get_compression, _set_compression, None,
"Compression algorithms")
class ChannelMap (object):
def __init__(self):
# (id -> Channel)
self._map = weakref.WeakValueDictionary()
self._lock = threading.Lock()
def put(self, chanid, chan):
self._lock.acquire()
try:
self._map[chanid] = chan
finally:
self._lock.release()
def get(self, chanid):
self._lock.acquire()
try:
return self._map.get(chanid, None)
finally:
self._lock.release()
def delete(self, chanid):
self._lock.acquire()
try:
try:
del self._map[chanid]
except KeyError:
pass
finally:
self._lock.release()
def values(self):
self._lock.acquire()
try:
return list(self._map.values())
finally:
self._lock.release()
def __len__(self):
self._lock.acquire()
try:
return len(self._map)
finally:
self._lock.release()
| 42.307592
| 179
| 0.609615
|
5ba79afefa650df446b9345d9504901375ad3669
| 3,914
|
py
|
Python
|
autogen.py
|
klondikemarlen/PythonSQLAlchemyAutoGen
|
1e23a386cdc918afc9528d2e7769285b4de7702c
|
[
"MIT"
] | null | null | null |
autogen.py
|
klondikemarlen/PythonSQLAlchemyAutoGen
|
1e23a386cdc918afc9528d2e7769285b4de7702c
|
[
"MIT"
] | 1
|
2017-04-16T05:03:52.000Z
|
2017-04-16T05:03:52.000Z
|
autogen.py
|
klondikemarlen/PythonSQLAlchemyAutoGen
|
1e23a386cdc918afc9528d2e7769285b4de7702c
|
[
"MIT"
] | null | null | null |
##testing##
# from sqlalchemy import Table, MetaData, Column, Integer, String, Float, Boolean
# from sqlalchemy import ForeignKey
# from sqlalchemy.orm import relationship
# from sqlalchemy.ext.hybrid import hybrid_property
# from sqlalchemy.exc import ArgumentError
# from sqlalchemy import inspect
# from sqlalchemy.orm import mapper
# from sqlalchemy import create_engine
from base_classes import Base
from jinja2 import Environment, FileSystemLoader, select_autoescape
from non_declarative_objects import Hero, Order
import pdb
class Column:
def __init__(self, name, type, value):
self.name = name
self.type = type
self.value = value
def __str__(self):
return "<Column: name='{}', type='{}', value={}>".format(self.name, self.type, repr(self.value))
class BuildTable:
# def __new__(cls):
# return self.obj, cls
def __init__(self, obj, tablename=''):
self.obj = obj
self.tablename = ''
self.column_names = []
#Attributes that require extra work.
#They might be relationships, foreign keys or children or parents or something.
#Or just basic lists, or dicts or booleans
self.relationships = {'lists': [], 'dicts': [], 'nones': []}
try:
obj.__dict__
except AttributeError:
print()
print("Some kind of base case.")
print("Your object is a base class. Pass in a tablename in __init__")
print("build a new class that uses this class as a dict of itself?")
print()
if tablename:
self.tablename = tablename
else:
self.tablename = self.get_table_name()
self.column_names = self.build_columns()
# for column in self.column_names:
# print(column)
def build_columns(self):
"""Generate columns for table.
Also updates relationship attribute which I will be implemented using recursion.
"""
data = {}
try:
data = vars(self.obj)
except TypeError as ex:
if type(self.obj) == type(dict()):
data = self.obj
else:
raise ex
for name in sorted(data.keys()):
column_type = type(data[name])
if name.startswith("__"):
pass #ignore this one.
elif type(list()) == column_type:
self.relationships['lists'].append(name)
elif type(dict()) == column_type:
self.relationships['dicts'].append(name)
elif type(None) == column_type:
self.relationships['nones'].append(name)
elif type(int()) == column_type:
yield Column(name, "Integer", data[name])
elif type(str()) == column_type:
yield Column(name, "String", data[name])
elif type(float()) == column_type:
yield Column(name, "Float", data[name])
elif type(bool()) == column_type:
yield Column(name, "Boolean", data[name])
else:
print(TypeError("Can't yet handle type {}".format(type(data[name]))))
def get_table_name(self):
try:
return self.obj.__name__.lower()
except AttributeError:
return self.obj.__class__.__name__.lower()
########## New Template concept.
# print(vars(Order))
env = Environment(
loader=FileSystemLoader('templates'),
autoescape=select_autoescape(default_for_string=False, default=False)
)
template = env.get_template('generic.py')
# hero_table_cls = BuildTable(Hero())
# print(template.render(cls=hero_table_cls))
order_table_cls = BuildTable(Order)
print(template.render(cls=order_table_cls))
| 31.312
| 104
| 0.583291
|
b6bf922758a0fae48468fa09018e32a27e02bb9a
| 4,344
|
py
|
Python
|
st2tests/integration/orquesta/test_wiring_inquiry.py
|
muyouming/st2
|
a80fa2b6b0f7ff3281ed8dee8ca6e97910fbd00e
|
[
"Apache-2.0"
] | 4,920
|
2015-01-01T15:12:17.000Z
|
2022-03-31T19:31:15.000Z
|
st2tests/integration/orquesta/test_wiring_inquiry.py
|
muyouming/st2
|
a80fa2b6b0f7ff3281ed8dee8ca6e97910fbd00e
|
[
"Apache-2.0"
] | 3,563
|
2015-01-05T19:02:19.000Z
|
2022-03-31T19:23:09.000Z
|
st2tests/integration/orquesta/test_wiring_inquiry.py
|
muyouming/st2
|
a80fa2b6b0f7ff3281ed8dee8ca6e97910fbd00e
|
[
"Apache-2.0"
] | 774
|
2015-01-01T20:41:24.000Z
|
2022-03-31T13:25:29.000Z
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import eventlet
from integration.orquesta import base
from st2common.constants import action as ac_const
class InquiryWiringTest(base.TestWorkflowExecution):
def test_basic_inquiry(self):
# Launch the workflow. The workflow will paused at the pending task.
ex = self._execute_workflow("examples.orquesta-ask-basic")
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_PAUSED)
# Respond to the inquiry.
ac_exs = self._wait_for_task(
ex, "get_approval", ac_const.LIVEACTION_STATUS_PENDING
)
self.st2client.inquiries.respond(ac_exs[0].id, {"approved": True})
# Wait for completion.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_SUCCEEDED)
def test_consecutive_inquiries(self):
# Launch the workflow. The workflow will paused at the pending task.
ex = self._execute_workflow("examples.orquesta-ask-consecutive")
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_PAUSED)
# Respond to the first inquiry.
t1_ac_exs = self._wait_for_task(
ex, "get_approval", ac_const.LIVEACTION_STATUS_PENDING
)
self.st2client.inquiries.respond(t1_ac_exs[0].id, {"approved": True})
# Wait for the workflow to pause again.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_PAUSED)
# Respond to the second inquiry.
t2_ac_exs = self._wait_for_task(
ex, "get_confirmation", ac_const.LIVEACTION_STATUS_PENDING
)
self.st2client.inquiries.respond(t2_ac_exs[0].id, {"approved": True})
# Wait for completion.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_SUCCEEDED)
def test_parallel_inquiries(self):
# Launch the workflow. The workflow will paused at the pending task.
ex = self._execute_workflow("examples.orquesta-ask-parallel")
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_PAUSED)
# Respond to the first inquiry.
t1_ac_exs = self._wait_for_task(
ex, "ask_jack", ac_const.LIVEACTION_STATUS_PENDING
)
self.st2client.inquiries.respond(t1_ac_exs[0].id, {"approved": True})
t1_ac_exs = self._wait_for_task(
ex, "ask_jack", ac_const.LIVEACTION_STATUS_SUCCEEDED
)
# Allow some time for the first inquiry to get processed.
eventlet.sleep(2)
# Respond to the second inquiry.
t2_ac_exs = self._wait_for_task(
ex, "ask_jill", ac_const.LIVEACTION_STATUS_PENDING
)
self.st2client.inquiries.respond(t2_ac_exs[0].id, {"approved": True})
t2_ac_exs = self._wait_for_task(
ex, "ask_jill", ac_const.LIVEACTION_STATUS_SUCCEEDED
)
# Wait for completion.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_SUCCEEDED)
def test_nested_inquiry(self):
# Launch the workflow. The workflow will paused at the pending task.
ex = self._execute_workflow("examples.orquesta-ask-nested")
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_PAUSED)
# Get the action execution of the subworkflow
ac_exs = self._wait_for_task(
ex, "get_approval", ac_const.LIVEACTION_STATUS_PAUSED
)
# Respond to the inquiry in the subworkflow.
t2_t2_ac_exs = self._wait_for_task(
ac_exs[0], "get_approval", ac_const.LIVEACTION_STATUS_PENDING
)
self.st2client.inquiries.respond(t2_t2_ac_exs[0].id, {"approved": True})
# Wait for completion.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_SUCCEEDED)
| 39.135135
| 80
| 0.69314
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.