blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4b9785d208ec7bfd695f67a1c0ae0ae14af5c025
|
d3e4b3e0d30dabe9714429109d2ff7b9141a6b22
|
/Visualization/LagrangeInterpolationVisualization.py
|
88ab36a87c9cd16c736d839ffcb9ba3d3157994f
|
[
"MIT"
] |
permissive
|
SymmetricChaos/NumberTheory
|
184e41bc7893f1891fa7fd074610b0c1520fa7dd
|
65258e06b7f04ce15223c1bc0c2384ef5e9cec1a
|
refs/heads/master
| 2021-06-11T17:37:34.576906
| 2021-04-19T15:39:05
| 2021-04-19T15:39:05
| 175,703,757
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 699
|
py
|
from Polynomials import lagrange_interpolation
import matplotlib.pyplot as plt
import numpy as np
points = [1,3,5,7]
function = lambda x: np.sin(x)
print("""Lagrange interpolation takes a set of n points and finds the "best" polynomial that describes them. Given n points on a plane there is a polynomial of degree n-1 that passes through all of them.""")
print(f"In this example we use {len(points)} points taken from the sine function.")
fig = plt.figure()
ax=fig.add_axes([0,0,1,1])
lp = lagrange_interpolation(points,function)
print(lp)
x = np.linspace(min(points),max(points),50)
y0 = function(x)
y1 = lp.evaluate(x)
plt.plot(x,y0)
plt.plot(x,y1)
plt.scatter(points,function(points))
|
[
"ajfraebel@gmail.com"
] |
ajfraebel@gmail.com
|
0b1900e0a13d5588aa349822a427ad816264765e
|
287fcd6bc49381d5b116dd541a97c0ff37141214
|
/app/section/sections/hero_section.py
|
c5960e017024cdfa7d8610c48d487ea424d32899
|
[] |
no_license
|
elcolono/wagtail-cms
|
95812323768b90e3630c5f90e59a9f0074157ab5
|
b3acb2e5c8f985202da919aaa99ea9db2f6b4d51
|
refs/heads/master
| 2023-05-26T05:24:42.362695
| 2020-10-08T17:23:22
| 2020-10-08T17:23:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,839
|
py
|
from django.db import models
from wagtail.snippets.models import register_snippet
from wagtail.admin.edit_handlers import (
MultiFieldPanel, FieldPanel, StreamFieldPanel, FieldRowPanel)
from wagtail.admin.edit_handlers import ObjectList, TabbedInterface
from wagtail.images.edit_handlers import ImageChooserPanel
from section.blocks import ButtonAction, SectionTitleBlock
from . import SectionBase
from wagtail.core.fields import StreamField
from section.blocks import ActionButton, PrimaryButton
from wagtail.core.models import Page
from section.settings import cr_settings
@register_snippet
class HeroSection(SectionBase, SectionTitleBlock, ButtonAction, Page):
hero_layout = models.CharField(
blank=True,
max_length=100,
verbose_name='Layout',
choices=[
('simple_centered', 'Simple centered'),
('image_right', 'Image on right')
],
default='simple_centered',
)
hero_first_button_text = models.CharField(
blank=True,
max_length=100,
verbose_name='Hero button text',
default='Subscribe',
help_text="Leave field empty to hide.",
)
hero_second_button_text = models.CharField(
blank=True,
max_length=100,
verbose_name='Hero button text',
default='Subscribe',
help_text="Leave field empty to hide.",
)
hero_image = models.ForeignKey(
'wagtailimages.Image',
blank=True,
null=True,
on_delete=models.SET_NULL,
verbose_name='Image',
related_name='+',
)
hero_image_size = models.CharField(
max_length=50,
choices=cr_settings['HERO_IMAGE_SIZE_CHOICES'],
default=cr_settings['HERO_IMAGE_SIZE_CHOICES_DEFAULT'],
verbose_name=('Image size'),
)
hero_action_type_1 = models.CharField(
max_length=50,
choices=cr_settings['HERO_ACTION_TYPE_CHOICES'],
default=cr_settings['HERO_ACTION_TYPE_CHOICES_DEFAULT'],
verbose_name=('Action type (First)'),
)
hero_action_type_2 = models.CharField(
max_length=50,
choices=cr_settings['HERO_ACTION_TYPE_CHOICES'],
default=cr_settings['HERO_ACTION_TYPE_CHOICES_DEFAULT'],
verbose_name=('Action type (Second)'),
)
hero_buttons = StreamField(
[
('action_button', ActionButton()),
('primary_button', PrimaryButton())
],
null=True,
verbose_name="Buttons",
help_text="Please choose Buttons"
)
# basic tab panels
basic_panels = Page.content_panels + [
FieldPanel('hero_layout', heading='Layout', classname="title full"),
MultiFieldPanel(
[
FieldRowPanel([
FieldPanel('hero_layout', classname="col6"),
FieldPanel('hero_image_size', classname="col6"),
]),
FieldRowPanel([
FieldPanel('section_heading',
heading='Heading', classname="col6"),
FieldPanel('section_subheading',
heading='Subheading', classname="col6"),
]),
FieldRowPanel([
FieldPanel('section_description',
heading='Description', classname="col6"),
]),
FieldPanel('hero_first_button_text'),
FieldPanel('hero_second_button_text'),
ImageChooserPanel('hero_image'),
],
heading='Content',
),
SectionBase.section_layout_panels,
SectionBase.section_design_panels,
]
# advanced tab panels
advanced_panels = (
SectionTitleBlock.title_basic_panels,
) + ButtonAction.button_action_panels
# Register Tabs
edit_handler = TabbedInterface(
[
ObjectList(basic_panels, heading="Basic"),
ObjectList(advanced_panels, heading="Plus+"),
]
)
# Page settings
template = 'sections/hero_section_preview.html'
parent_page_types = ['home.HomePage']
subpage_types = []
# Overring methods
def set_url_path(self, parent):
"""
Populate the url_path field based on this page's slug and the specified parent page.
(We pass a parent in here, rather than retrieving it via get_parent, so that we can give
new unsaved pages a meaningful URL when previewing them; at that point the page has not
been assigned a position in the tree, as far as treebeard is concerned.
"""
if parent:
self.url_path = ''
else:
# a page without a parent is the tree root, which always has a url_path of '/'
self.url_path = '/'
return self.url_path
|
[
"andreas.siedler@gmail.com"
] |
andreas.siedler@gmail.com
|
b1ff28e00fcaf827759d3315508259d5c02fe49a
|
912cb61eaa768716d30844990ebbdd80ab2c2f4e
|
/ex070.py
|
aad48d4aa3286cd92534b1c397274d2ac7ddf5ea
|
[] |
no_license
|
luizaacampos/exerciciosCursoEmVideoPython
|
5fc9bed736300916e1c26d115eb2e703ba1dd4ca
|
398bfa5243adae00fb58056d1672cc20ff4a31d6
|
refs/heads/main
| 2023-01-06T21:48:17.068478
| 2020-11-11T12:29:10
| 2020-11-11T12:29:10
| 311,964,179
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 644
|
py
|
total = tk = menor = soma = 0
print('--------------Loja Sallus-----------------')
while True:
prod = input('Nome do produto: ')
valor = float(input('Preço: R$'))
soma += 1
cont = str(input('Quer continuar? [S/N] ')).upper().strip()[0]
total += valor
if valor > 1000.00:
tk += 1
if soma == 1 or valor < menor:
menor = valor
barato = prod
if cont == 'N':
break
print('---------FIM DO PROGRAMA-------------')
print(f'O total da compra foi R${total:.2f}')
print(f'Temos {tk} produtos custando mais de R$1000.00')
print(f'O produto mais barato foi {barato} que custa R${menor:.2f}')
|
[
"luiza.almcampos@gmail.com"
] |
luiza.almcampos@gmail.com
|
30197700259a9549341c49c7bd19ffeca986744d
|
fb0e99751068fa293312f60fedf8b6d0b9eae293
|
/slepé_cesty_vývoje/iskušitel/najdu_testovací_soubory.py
|
452504d722f35dd929333e4039ac4e9dc3d416ee
|
[] |
no_license
|
BGCX261/zora-na-pruzi-hg-to-git
|
d9628a07e3effa6eeb15b9b5ff6d75932a6deaff
|
34a331e17ba87c0de34e7f0c5b43642d5b175215
|
refs/heads/master
| 2021-01-19T16:52:06.478359
| 2013-08-07T19:58:42
| 2013-08-07T19:58:42
| 41,600,435
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,598
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2012 Домоглед <domogled@domogled.eu>
# @author Петр Болф <petr.bolf@domogled.eu>
import os, fnmatch
MASKA_TESTOVACÍCH_SOUBORŮ = 'testuji_*.py'
def najdu_testovací_soubory(cesta):
počet_nalezených_testů = 0
if os.path.isdir(cesta):
for cesta_do_adresáře, nalezené_adresáře, nalezené_soubory in os.walk(cesta):
for jméno_nalezeného_souboru in nalezené_soubory:
if fnmatch.fnmatch(jméno_nalezeného_souboru, MASKA_TESTOVACÍCH_SOUBORŮ):
# if jméno_nalezeného_souboru.endswith('.py') and not jméno_nalezeného_souboru.startswith('__init__'):
cesta_k_nalezenému_souboru = os.path.join(cesta_do_adresáře, jméno_nalezeného_souboru)
počet_nalezených_testů = počet_nalezených_testů + 1
yield cesta_k_nalezenému_souboru
else:
if os.path.isfile(cesta):
if fnmatch.fnmatch(os.path.basename(cesta), MASKA_TESTOVACÍCH_SOUBORŮ):
počet_nalezených_testů = počet_nalezených_testů + 1
yield cesta
else:
raise IOError('Soubor testu "{}" neodpovídá masce {}'.format(cesta, MASKA_TESTOVACÍCH_SOUBORŮ))
else:
raise IOError('Soubor testu "{}" nejestvuje'.format(cesta))
if počet_nalezených_testů == 0:
raise IOError('Nenašel jsem žádný testovací soubor v cestě "{}" za pomocí masky "{}"'.format(cesta, MASKA_TESTOVACÍCH_SOUBORŮ))
|
[
"petr.bolf@domogled.eu"
] |
petr.bolf@domogled.eu
|
bbca1de8f3365de6962acd80b69471036e33422e
|
68c4805ad01edd612fa714b1e0d210115e28bb7d
|
/venv/Lib/site-packages/numba/tests/test_config.py
|
de8371b8b2d4ac9757452a6d5a24a1954ff13f8d
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Happy-Egg/redesigned-happiness
|
ac17a11aecc7459f4ebf0afd7d43de16fb37ae2c
|
08b705e3569f3daf31e44254ebd11dd8b4e6fbb3
|
refs/heads/master
| 2022-12-28T02:40:21.713456
| 2020-03-03T09:04:30
| 2020-03-03T09:04:30
| 204,904,444
| 2
| 1
|
Apache-2.0
| 2022-12-08T06:19:04
| 2019-08-28T10:18:05
|
Python
|
UTF-8
|
Python
| false
| false
| 3,444
|
py
|
import os
import tempfile
import unittest
from .support import TestCase, temp_directory, override_env_config
from numba import config
try:
import yaml
_HAVE_YAML = True
except ImportError:
_HAVE_YAML = False
_skip_msg = "pyyaml needed for configuration file tests"
needs_yaml = unittest.skipIf(not _HAVE_YAML, _skip_msg)
@needs_yaml
class TestConfig(TestCase):
# Disable parallel testing due to envvars modification
_numba_parallel_test_ = False
def setUp(self):
# use support.temp_directory, it can do the clean up
self.tmppath = temp_directory('config_tmp')
super(TestConfig, self).setUp()
def mock_cfg_location(self):
"""
Creates a mock launch location.
Returns the location path.
"""
return tempfile.mkdtemp(dir=self.tmppath)
def inject_mock_cfg(self, location, cfg):
"""
Injects a mock configuration at 'location'
"""
tmpcfg = os.path.join(location, config._config_fname)
with open(tmpcfg, 'wt') as f:
yaml.dump(cfg, f, default_flow_style=False)
def get_settings(self):
"""
Gets the current numba config settings
"""
store = dict()
for x in dir(config):
if x.isupper():
store[x] = getattr(config, x)
return store
def create_config_effect(self, cfg):
"""
Returns a config "original" from a location with no config file
and then the impact of applying the supplied cfg dictionary as
a config file at a location in the returned "current".
"""
# store original cwd
original_cwd = os.getcwd()
# create mock launch location
launch_dir = self.mock_cfg_location()
# switch cwd to the mock launch location, get and store settings
os.chdir(launch_dir)
# use override to ensure that the config is zero'd out with respect
# to any existing settings
with override_env_config('_', '_'):
original = self.get_settings()
# inject new config into a file in the mock launch location
self.inject_mock_cfg(launch_dir, cfg)
try:
# override something but don't change the value, this is to refresh
# the config and make sure the injected config file is read
with override_env_config('_', '_'):
current = self.get_settings()
finally:
# switch back to original dir with no new config
os.chdir(original_cwd)
return original, current
def test_config(self):
# ensure a non empty settings file does impact config and that the
# case of the key makes no difference
key = 'COLOR_SCHEME'
for case in [str.upper, str.lower]:
orig, curr = self.create_config_effect({case(key): 'light_bg'})
self.assertTrue(orig != curr)
self.assertTrue(orig[key] != curr[key])
self.assertEqual(curr[key], 'light_bg')
# check that just the color scheme is the cause of difference
orig.pop(key)
curr.pop(key)
self.assertEqual(orig, curr)
def test_empty_config(self):
# ensure an empty settings file does not impact config
orig, curr = self.create_config_effect({})
self.assertEqual(orig, curr)
if __name__ == '__main__':
unittest.main()
|
[
"yangyang4910709@163.com"
] |
yangyang4910709@163.com
|
8b6ae75cd27c32f78ea740595757c1a84a66c477
|
6e43937c521b841595fbe7f59268ffc72dfefa9d
|
/GSP_WEB/views/index/view.py
|
8abba08ca5d578311be5a2e72cc36170dcf80929
|
[] |
no_license
|
MiscCoding/gsp_web
|
a5e50ce7591157510021cae49c6b2994f4eaabbe
|
a24e319974021ba668c5f8b4000ce96d81d1483e
|
refs/heads/master
| 2020-03-28T15:11:30.301700
| 2019-08-12T04:47:42
| 2019-08-12T04:47:42
| 148,565,440
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,123
|
py
|
#-*- coding: utf-8 -*-
import datetime
from collections import OrderedDict
from dateutil import parser
from elasticsearch import Elasticsearch
from flask import request, render_template, Blueprint, json
from GSP_WEB import login_required, db_session, app
from GSP_WEB.common.encoder.decimalEncoder import DecimalEncoder
from GSP_WEB.common.util.date_util import Local2UTC
from GSP_WEB.models.CommonCode import CommonCode
from GSP_WEB.models.Nations import nations
from GSP_WEB.models.Rules_BlackList import Rules_BlackList
from GSP_WEB.models.Rules_CNC import Rules_CNC
from GSP_WEB.query import dashboard
from GSP_WEB.query.dashboard import *
blueprint_page = Blueprint('bp_index_page', __name__, url_prefix='/index')
@blueprint_page.route('', methods=['GET'])
@login_required
# def getIndex():
# timenow = datetime.datetime.now().strftime("%Y-%m-%d")
# return render_template('index/dashboard.html', timenow = timenow)
def getIndex():
uri = CommonCode.query.filter_by(GroupCode='dashboard_link').filter_by(Code ='001').first()
return render_template('index/dashboard_kibana.html', kibana_link = uri.EXT1)
@blueprint_page.route('/DashboardLink', methods=['PUT'])
def setDashboardLink():
uri = CommonCode.query.filter_by(GroupCode='dashboard_link').filter_by(Code='001').first()
uri.EXT1 = request.form.get('link')
db_session.commit()
return ''
def todayUrlAnalysis(request, query_type = "uri"):
per_page = 1
start_idx = 0
end_dt = "now/d"
str_dt = "now-1d/d"
# "now-1d/d", "now/d"
query = {
"size": per_page,
"from": start_idx,
"query": {
"bool": {
"must": [
{
"range": {"@timestamp": {"gte": str_dt, "lte": end_dt}}
}, {
"term": {"analysis_type": query_type}
}
]
}
}
}
return query
def todayFileAnalysis(request, query_type = "file"):
per_page = 1
start_idx = 0
end_dt = "now/d"
str_dt = "now-1d/d"
# "now-1d/d", "now/d"
query = {
"size": per_page,
"from": start_idx,
"query": {
"bool": {
"must": [
{
"range": {"@timestamp": {"gte": str_dt, "lte": end_dt}}
}, {
"term": {"analysis_type": query_type}
}
]
}
}
}
return query
def totalMaliciousUrlQuery(request, query_type = "uri"):
per_page = 1
start_idx = 0
end_dt = "now/d"
str_dt = "now-1d/d"
# "now-1d/d", "now/d"
# timebefore = (datetime.datetime.now() - datetime.timedelta(minutes=5)).strftime("%Y-%m-%d %H:%M")
# before = parser.parse(timebefore).isoformat()
# timeNow = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
# now = parser.parse(timeNow).isoformat()
query = {
"size": per_page,
"from": start_idx,
"query": {
"bool": {
"must": [
{
"term": {"analysis_type": query_type}
}
# {
# "range":
# {
# "security_level": {"gte": "4"}
# }
# }
]
}
}
}
secQuery = {"range": {"security_level": {"gte": int(app.config['ANALYSIS_RESULTS_SECURITY_LEVEL_MIN'])}}}
query["query"]["bool"]["must"].append(secQuery)
return query
def totalMaliciousQuery(request, query_type):
per_page = 1
start_idx = 0
end_dt = "now/d"
str_dt = "now-1d/d"
# "now-1d/d", "now/d"
# timebefore = (datetime.datetime.now() - datetime.timedelta(minutes=5)).strftime("%Y-%m-%d %H:%M")
# before = parser.parse(timebefore).isoformat()
# timeNow = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
# now = parser.parse(timeNow).isoformat()
query = {
"size": per_page,
"from": start_idx,
"query": {
"bool": {
"must": [
# {
# "range": {"@timestamp": {"gte": str_dt, "lte": end_dt}}
# }
# {
# "range":
# {
# "security_level": {"gte": "4"}
# }
# }
]
}
}
}
secQuery = {"range": {"security_level": {"gte": int(app.config['ANALYSIS_RESULTS_SECURITY_LEVEL_MIN'])}}}
query["query"]["bool"]["must"].append(secQuery)
return query
def todayURLFileCount(type, device):
end_dt = "now/d"
str_dt = "now-1d/d"
query = {
"query": {
"bool": {
"must": [
# {
# "range": {"@timestamp": {"gt": str_dt, "lte": end_dt}}
# }
]
}
}
}
dataFrom = {"match" : {"data_from" : {"query":device, "type":"phrase"}}}
analysisType = {"match": {"analysis_type": {"query": type, "type": "phrase"}}}
range = { "range": {"@timestamp": {"gt": str_dt, "lte": end_dt}}}
# secQuery = {"range": {"security_level": {"gte": int(app.config['ANALYSIS_RESULTS_SECURITY_LEVEL_MIN'])}}}
query["query"]["bool"]["must"].append(dataFrom)
query["query"]["bool"]["must"].append(analysisType)
query["query"]["bool"]["must"].append(range)
return query
@blueprint_page.route('/getTopBoard')
def getTopBoard():
query = dashboard.topboardQuery
results = db_session.execute(query)
total = 0
before_total = 0
totalMaliciousCodeCount = 0
totalTodayUriAnalysisCount = 0
totalTodayUriAnalysisCountNPC = 0
totalTodayUriAnalysisCountIMAS = 0
totalTodayMaliciousFileCount = 0
totalTodayMaliciousFileCountIMAS = 0
totalTodayMaliciousFileCountNPC = 0
totalTodayMaliciousFileCountZombieZero = 0
totalMaliciousUrlCount = 0
totalMaliciousUrlCountRDBMS = 0
totalMaliciousFileCountRDBMS = 0
totalYesterdayMaliciousUrlCount = 0
totalYesterdayMaliciousFileCount = 0
#blackList count query to MySQL
blackListQueryResult = Rules_BlackList.query
blackListQueryResult = blackListQueryResult.filter_by(source = 750)
blackListQueryResult = blackListQueryResult.count()
totalMaliciousFileCountRDBMS = blackListQueryResult
#CNC url count by RDBMS
cncRuleQueryResult = Rules_CNC.query
cncRuleQueryResult = cncRuleQueryResult.count()
totalMaliciousUrlCountRDBMS = cncRuleQueryResult
es = Elasticsearch([{'host': app.config['ELASTICSEARCH_URI'], 'port': app.config['ELASTICSEARCH_PORT']}])
##total Malicious code count
# query_type = ""
# doc = totalMaliciousQuery(request, query_type)
# res = es.search(index="gsp*" + "", doc_type="analysis_results", body=doc)
# totalMaliciousCodeCount = int(res['hits']['total']) #Total malicious code count
##total malicious url count
# MFdoc = totalMaliciousUrlQuery(request, "uri")
# res = es.search(index="gsp*" + "", doc_type="analysis_results", body=MFdoc)
# totalMaliciousUrlCount = int(res['hits']['total'])
##total tody uri analysis count NPC
MUdoc = todayURLFileCount("uri", "NPC")
res = es.count(index="gsp*" + "", doc_type="analysis_results", body=MUdoc)
totalTodayUriAnalySisCountNPC = res['count']
##total tody uri analysis count NPC
MUdoc = todayURLFileCount("uri", "IMAS")
res = es.count(index="gsp*" + "", doc_type="analysis_results", body=MUdoc)
totalTodayUriAnalySisCountIMAS = res['count']
##total today file analysis count NPC
MFdoc = todayURLFileCount("file", "NPC")
res = es.count(index="gsp*" + "", doc_type="analysis_results", body=MFdoc)
totalTodayMaliciousFileCountNPC = res['count']
##total today file analysis count IMAS
MFdoc = todayURLFileCount("file", "IMAS")
res = es.count(index="gsp*" + "", doc_type="analysis_results", body=MFdoc)
totalTodayMaliciousFileCountIMAS = res['count']
##total today file analysis count ZombieZero
MFdoc = todayURLFileCount("file", "zombie zero")
res = es.count(index="gsp*" + "", doc_type="analysis_results", body=MFdoc)
totalTodayMaliciousFileCountZombieZero = res['count']
# MFdoc = todayFileAnalysis(request, "file")
# res = es.search(index="gsp*" + "", doc_type="analysis_results", body=MFdoc)
# totalTodayMaliciousFileCount = int(res['hits']['total'])
##total yesterday malicious url count
MFdoc = dashboard.yesterdayUrlFileAnalysis(request, "uri")
res = es.search(index="gsp*" + "", doc_type="analysis_results", body=MFdoc)
totalYesterdayMaliciousUrlCount= int(res['hits']['total'])
##total yesterday malicious file count
MFdoc = dashboard.yesterdayUrlFileAnalysis(request, "file")
res = es.search(index="gsp*" + "", doc_type="analysis_results", body=MFdoc)
totalYesterdayMaliciousFileCount = int(res['hits']['total'])
result = dict()
result['spread'] = 0
result['cnc'] = 0
result['bcode'] = 0
result['before_spread'] = 0
result['before_cnc'] = 0
result['before_bcode'] = 0
result['link'] = 0
result['before_link'] = 0
result['uri'] = 0
result['before_uri'] = 0
result['file'] = 0
result['before_file'] = 0
result['totalTodayUriAnalysisCount'] = 0
result['totalTodayUriAnalysisCountNPC'] = 0
result['totalTodayUriAnalysisCountIMAS'] = 0
result['totalTodayMaliciousFileCount'] = 0
result['totalTodayMaliciousFileCountNPC'] = 0
result['totalTodayMaliciousFileCountIMAS'] = 0
result['totalTodayMaliciousFileCountZombieZero'] = 0
result['totalMaliciousUrlQuery'] = 0
result['totalYesterdayMaliciousUrlCount'] = 0
result['totalYesterdayMaliciousFileCount'] = 0
#region db 쿼리
for _row in results :
if _row['date'] == datetime.datetime.now().strftime("%Y-%m-%d"):
if _row['Code'] == "003":
result['spread'] = _row['count']
elif _row['Code'] == "001":
result['cnc'] = _row['count']
elif _row['Code'] == "-":
result['bcode'] = _row['count']
total += _row['count']
else:
if _row['Code'] == "003":
result['before_spread'] = _row['count']
elif _row['Code'] == "001":
result['before_cnc'] = _row['count']
elif _row['Code'] == "-":
result['before_bcode'] = _row['count']
before_total += _row['count']
#endregion eb 쿼리
index = app.config['ELASTICSEARCH_INDEX_HEAD'] + datetime.datetime.now().strftime('%Y.%m.%d')
#region es 쿼리
query = dashboard.topboardEsQuery("now-1d/d", "now/d")
es = Elasticsearch([{'host': app.config['ELASTICSEARCH_URI'], 'port': int(app.config['ELASTICSEARCH_PORT'])}])
res = es.search(index="gsp*", body=query, request_timeout=30) #url_crawlds 인덱스 문제로 임시 해결책 18-03-06
for _row in res['aggregations']['types']['buckets']:
if _row['key'] == "link_dna_tuple5":
result['link'] = _row['doc_count']
total += _row['doc_count']
elif _row['key'] == "url_jobs":
result['uri'] = _row['doc_count']
total += _row['doc_count']
elif _row['key'] == "url_crawleds":
result['file'] = _row['doc_count']
total += _row['doc_count']
index = app.config['ELASTICSEARCH_INDEX_HEAD'] + datetime.datetime.now().strftime('%Y.%m.%d')
query = dashboard.topboardEsQuery("now-2d/d", "now-1d/d")
es = Elasticsearch([{'host': app.config['ELASTICSEARCH_URI'], 'port': int(app.config['ELASTICSEARCH_PORT'])}])
res = es.search(index="gsp*", body=query, request_timeout=30) #url_crawlds 인덱스 문제로 임시 해결책 18-03-06
for _row in res['aggregations']['types']['buckets']:
if _row['key'] == "link_dna_tuple5":
result['before_link'] = _row['doc_count']
before_total += _row['doc_count']
elif _row['key'] == "url_jobs":
result['before_uri'] = _row['doc_count']
before_total += _row['doc_count']
elif _row['key'] == "url_crawleds":
result['before_file'] = _row['doc_count']
before_total += _row['doc_count']
#endregion es 쿼리
# result['bcode'] = 34
# result['before_bcode'] = 11
# result['spread'] = 35
# result['before_spread'] = 21
# result['before_cnc'] = 7
# result['file'] = 1752
# result['before_file'] = 1127
result['totalTodayUriAnalysisCount'] = totalTodayUriAnalysisCount
result['totalTodayMaliciousFileCount'] = totalTodayMaliciousFileCount
result['totalMaliciousUrlCount']= totalMaliciousUrlCountRDBMS
result['totalYesterdayMaliciousUrlCount'] = totalYesterdayMaliciousUrlCount
result['totalYesterdayMaliciousFileCount'] = totalYesterdayMaliciousFileCount
result['totalTodayUriAnalysisCountNPC'] = totalTodayUriAnalySisCountNPC
result['totalTodayUriAnalysisCountIMAS'] = totalTodayUriAnalySisCountIMAS
result['totalTodayMaliciousFileCountNPC'] = totalTodayMaliciousFileCountNPC
result['totalTodayMaliciousFileCountIMAS'] = totalTodayMaliciousFileCountIMAS
result['totalTodayMaliciousFileCountZombieZero'] = totalTodayMaliciousFileCountZombieZero
result['cnc'] = totalMaliciousFileCountRDBMS
result['cnc_before'] = 13
result['total'] = total
result['before_total'] = before_total
return json.dumps(result)
@blueprint_page.route('/getLineChart')
def getLineChartData():
query = dashboard.linechartQuery
results = db_session.execute(query)
results_list = []
for _row in results:
results_list.append(_row)
now = datetime.datetime.now()
timetable = []
chartdata = OrderedDict()
series = []
for _dd in range(0,10):
_now = datetime.datetime.now() - datetime.timedelta(days=9) + datetime.timedelta(days=_dd)
_series = dict()
_series['xaxis'] = _now.strftime('%Y-%m-%d')
_series['date'] = _now.strftime('%m월%d일')
isCncExists = False
isSpreadExists = False
isCode = False
for row in results_list:
if row['date'] == _series['xaxis']:
if row is not None:
if row['Code'] == '001':
isCncExists = True
_series['CNC'] = row['count']
elif row['Code'] == '003':
isSpreadExists = True
_series['spread'] = row['count']
elif row['Code'] == "-":
isCode = True
_series['bcode'] = row['count']
if isCncExists != True:
_series['CNC'] = 0
if isSpreadExists != True:
_series['spread'] = 0
if isCode != True:
_series['bcode'] = 0
series.append(_series)
chartdata['data'] = series
result = chartdata
return json.dumps(result)
@blueprint_page.route('/getBarChart')
def getBarChartData():
query = dashboard.barchartQuery
results = db_session.execute(query)
results_list = []
for _row in results:
results_list.append(_row)
now = datetime.datetime.now()
timetable = []
chartdata = OrderedDict()
series = []
for _dd in range(0,10):
_now = datetime.datetime.now() - datetime.timedelta(days=9) + datetime.timedelta(days=_dd)
_series = dict()
_series['xaxis'] = _now.strftime('%Y-%m-%d')
_series['date'] = _now.strftime('%m월%d일')
isExists = False
for row in results_list:
if row['date'] == _series['xaxis']:
if row is not None:
isExists = True
count = row['count']
_series['value'] = int(count)
if isExists != True:
_series['value'] = 0
series.append(_series)
chartdata['data'] = series
result = chartdata
return json.dumps(result)
@blueprint_page.route('/getGrid')
def getGrid():
query = dashboard.gridQuery
results = db_session.execute(query)
results_list = []
for _row in results:
dict_row = dict()
dict_row['date'] = _row[0]
dict_row['cnc'] = _row[1]
dict_row['spread'] = _row[2]
dict_row['bcode'] = _row[3]
dict_row['total'] = _row[1] + _row[2] + _row[3]
results_list.append(dict_row)
return json.dumps(results_list,cls=DecimalEncoder)
# for _item in res['aggregations']['topn']['hits']['hits']:
# _series = dict()
# _series['xaxis'] = _item['_source']['cl_ip']
# _series['yaxis'] = _item['_source']['cnt']
# _series['avg'] = res['aggregations']['avg']['value']
# _series['std_dev'] = res['aggregations']['ex_stats']['std_deviation_bounds']['upper']
@blueprint_page.route('/getWorldChart')
def getWorldChart():
es = Elasticsearch([{'host': app.config['ELASTICSEARCH_URI'], 'port': int(app.config['ELASTICSEARCH_PORT'])}])
timeSetting = request.args['timeSetting']
edTime = parser.parse(timeSetting) + datetime.timedelta(days=1)
str_dt = Local2UTC(parser.parse(timeSetting)).isoformat()
end_dt = Local2UTC(edTime).isoformat()
body = getWorldChartQuery(str_dt,end_dt, app.config['ANALYSIS_RESULTS_SECURITY_LEVEL_MIN'])
res = es.search(index=app.config['ELASTICSEARCH_INDEX'], doc_type="analysis_results", body=body, request_timeout=30)
mapData = []
latlong = dict()
i = 0
for doc in res['aggregations']['group_by_country2']['buckets']:
if doc['key'] == '':
continue
_nation = (_nation for _nation in nations if _nation["code"] == doc['key']).next()
mapData.append({"code": doc['key'], "name": _nation['nation'], 'value': doc['doc_count'], 'color': colorlist[i]})
if i >= colorlist.__len__()-1:
i = 0
else:
i = i +1
latlong[doc['key']] = { "latitude" : _nation['latitude'], "longitude" : _nation['longitude']}
# mapData = []
# latlong = dict()
# mapData.append({"code": 'KR', "name": "korea", 'value': 6, 'color': colorlist[0]})
# mapData.append({"code" : 'CN', "name" : "china", 'value' : 21, 'color' : colorlist[1] } )
# mapData.append({"code": 'US', "name": "us", 'value': 7, 'color': colorlist[2]})
# latlong['KR'] = { "latitude" : 37.00, "longitude" : 127.30 }
# latlong['CN'] = {"latitude": 35.00, "longitude": 105.00}
# latlong['US'] = {"latitude": 38.00, "longitude": -97.00}
chartdata = OrderedDict()
chartdata['latlong'] = latlong
chartdata['mapData'] = mapData
return json.dumps(chartdata)
colorlist = [
'#eea638',
'#d8854f',
'#de4c4f',
'#86a965',
'#d8854f',
'#8aabb0',
'#eea638'
]
|
[
"neogeo-s@hanmail.net"
] |
neogeo-s@hanmail.net
|
164b19c6ae1bd8b400a1296c2b5d3a93cddf328d
|
9b9a02657812ea0cb47db0ae411196f0e81c5152
|
/repoData/RobSpectre-Call-Your-Family/allPythonContent.py
|
dfea466529f966662dd2984fc56f28256ffdd134
|
[] |
no_license
|
aCoffeeYin/pyreco
|
cb42db94a3a5fc134356c9a2a738a063d0898572
|
0ac6653219c2701c13c508c5c4fc9bc3437eea06
|
refs/heads/master
| 2020-12-14T14:10:05.763693
| 2016-06-27T05:15:15
| 2016-06-27T05:15:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 39,193
|
py
|
__FILENAME__ = app
import os
import signal
from flask import Flask
from flask import render_template
from flask import url_for
from flask import request
from twilio import twiml
# Declare and configure application
app = Flask(__name__, static_url_path='/static')
app.config.from_pyfile('local_settings.py')
@app.route('/', methods=['GET', 'POST'])
def index():
# Make sure we have this host configured properly.
config_errors = []
for option in ['TWILIO_ACCOUNT_SID', 'TWILIO_AUTH_TOKEN']:
if not app.config[option]:
config_errors.append("%s is not configured for this host."
% option)
# Define important links
params = {
'sms_request_url': url_for('.sms', _external=True),
'config_errors': config_errors}
return render_template('thankyou.html', params=params)
@app.route('/voice', methods=['POST'])
def voice():
response = twiml.Response()
with response.dial(callerId=app.config['TWILIO_CALLER_ID'],
timeLimit="600") as dial:
dial.number(request.form['PhoneNumber'])
return str(response)
@app.route('/inbound', methods=['POST'])
def inbound():
response = twiml.Response()
response.play('/static/sounds/inbound.mp3')
return str(response)
@app.route('/sms', methods=['GET', 'POST'])
def sms():
# Respond to any text inbound text message with a link to the app!
response = twiml.Response()
response.sms("This number belongs to the Twilio Call Your Family app " \
"for Boston. Please visit " \
"http://callyourfamily.twilio.ly for more info.")
return str(response)
# Handles SIGTERM so that we don't get an error when Heroku wants or needs to
# restart the dyno
def graceful_shutdown(signum, frame):
exit()
signal.signal(signal.SIGTERM, graceful_shutdown)
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
if port == 5000:
app.debug = True
app.run(host='0.0.0.0', port=port)
########NEW FILE########
__FILENAME__ = configure
'''
Hackpack Configure
A script to configure your TwiML apps and Twilio phone numbers to use your
hackpack's Heroku app.
Usage:
Auto-configure using your local_settings.py:
python configure.py
Deploy to new Twilio number and App Sid:
python configure.py --new
Deploy to specific App Sid:
python configure.py --app APxxxxxxxxxxxxxx
Deploy to specific Twilio number:
python configure.py --number +15556667777
Deploy to custom domain:
python configure.py --domain example.com
'''
from optparse import OptionParser
import sys
import subprocess
import logging
from twilio.rest import TwilioRestClient
from twilio import TwilioRestException
import local_settings
class Configure(object):
def __init__(self, account_sid=local_settings.TWILIO_ACCOUNT_SID,
auth_token=local_settings.TWILIO_AUTH_TOKEN,
app_sid=local_settings.TWILIO_APP_SID,
phone_number=local_settings.TWILIO_CALLER_ID,
voice_url='/voice',
sms_url='/sms',
host=None):
self.account_sid = account_sid
self.auth_token = auth_token
self.app_sid = app_sid
self.phone_number = phone_number
self.host = host
self.voice_url = voice_url
self.sms_url = sms_url
self.friendly_phone_number = None
def start(self):
logging.info("Configuring your Twilio hackpack...")
logging.debug("Checking if credentials are set...")
if not self.account_sid:
raise ConfigurationError("ACCOUNT_SID is not set in " \
"local_settings.")
if not self.auth_token:
raise ConfigurationError("AUTH_TOKEN is not set in " \
"local_settings.")
logging.debug("Creating Twilio client...")
self.client = TwilioRestClient(self.account_sid, self.auth_token)
logging.debug("Checking if host is set.")
if not self.host:
logging.debug("Hostname is not set...")
self.host = self.getHerokuHostname()
# Check if urls are set.
logging.debug("Checking if all urls are set.")
if "http://" not in self.voice_url:
self.voice_url = self.host + self.voice_url
logging.debug("Setting voice_url with host: %s" % self.voice_url)
if "http://" not in self.sms_url:
self.sms_url = self.host + self.sms_url
logging.debug("Setting sms_url with host: %s" % self.sms_url)
if self.configureHackpack(self.voice_url, self.sms_url,
self.app_sid, self.phone_number):
# Configure Heroku environment variables.
self.setHerokuEnvironmentVariables(
TWILIO_ACCOUNT_SID=self.account_sid,
TWILIO_AUTH_TOKEN=self.auth_token,
TWILIO_APP_SID=self.app_sid,
TWILIO_CALLER_ID=self.phone_number)
# Ensure local environment variables are set.
self.printLocalEnvironmentVariableCommands(
TWILIO_ACCOUNT_SID=self.account_sid,
TWILIO_AUTH_TOKEN=self.auth_token,
TWILIO_APP_SID=self.app_sid,
TWILIO_CALLER_ID=self.phone_number)
logging.info("Hackpack is now configured. Call %s to test!"
% self.friendly_phone_number)
else:
logging.error("There was an error configuring your hackpack. " \
"Weak sauce.")
def configureHackpack(self, voice_url, sms_url, app_sid,
phone_number, *args):
# Check if app sid is configured and available.
if not app_sid:
app = self.createNewTwiMLApp(voice_url, sms_url)
else:
app = self.setAppRequestUrls(app_sid, voice_url, sms_url)
# Check if phone_number is set.
if not phone_number:
number = self.purchasePhoneNumber()
else:
number = self.retrievePhoneNumber(phone_number)
# Configure phone number to use App Sid.
logging.info("Setting %s to use application sid: %s" %
(number.friendly_name, app.sid))
try:
self.client.phone_numbers.update(number.sid,
voice_application_sid=app.sid,
sms_application_sid=app.sid)
logging.debug("Number set.")
except TwilioRestException, e:
raise ConfigurationError("An error occurred setting the " \
"application sid for %s: %s" % (number.friendly_name,
e))
# We're done!
if number:
return number
else:
raise ConfigurationError("An unknown error occurred configuring " \
"request urls for this hackpack.")
def createNewTwiMLApp(self, voice_url, sms_url):
logging.debug("Asking user to create new app sid...")
i = 0
while True:
i = i + 1
choice = raw_input("Your APP_SID is not configured in your " \
"local_settings. Create a new one? [y/n]").lower()
if choice == "y":
try:
logging.info("Creating new application...")
app = self.client.applications.create(voice_url=voice_url,
sms_url=sms_url,
friendly_name="Hackpack for Heroku and Flask")
break
except TwilioRestException, e:
raise ConfigurationError("Your Twilio app couldn't " \
"be created: %s" % e)
elif choice == "n" or i >= 3:
raise ConfigurationError("Your APP_SID setting must be " \
"set in local_settings.")
else:
logging.error("Please choose yes or no with a 'y' or 'n'")
if app:
logging.info("Application created: %s" % app.sid)
self.app_sid = app.sid
return app
else:
raise ConfigurationError("There was an unknown error " \
"creating your TwiML application.")
def setAppRequestUrls(self, app_sid, voice_url, sms_url):
logging.info("Setting request urls for application sid: %s" \
% app_sid)
try:
app = self.client.applications.update(app_sid, voice_url=voice_url,
sms_url=sms_url,
friendly_name="Hackpack for Heroku and Flask")
except TwilioRestException, e:
if "HTTP ERROR 404" in str(e):
raise ConfigurationError("This application sid was not " \
"found: %s" % app_sid)
else:
raise ConfigurationError("An error setting the request URLs " \
"occured: %s" % e)
if app:
logging.debug("Updated application sid: %s " % app.sid)
return app
else:
raise ConfigurationError("An unknown error occuring "\
"configuring request URLs for app sid.")
def retrievePhoneNumber(self, phone_number):
logging.debug("Retrieving phone number: %s" % phone_number)
try:
logging.debug("Getting sid for phone number: %s" % phone_number)
number = self.client.phone_numbers.list(
phone_number=phone_number)
except TwilioRestException, e:
raise ConfigurationError("An error setting the request URLs " \
"occured: %s" % e)
if number:
logging.debug("Retrieved sid: %s" % number[0].sid)
self.friendly_phone_number = number[0].friendly_name
return number[0]
else:
raise ConfigurationError("An unknown error occurred retrieving " \
"number: %s" % phone_number)
def purchasePhoneNumber(self):
logging.debug("Asking user to purchase phone number...")
i = 0
while True:
i = i + 1
# Find number to purchase
choice = raw_input("Your CALLER_ID is not configured in your " \
"local_settings. Purchase a new one? [y/n]").lower()
if choice == "y":
break
elif choice == "n" or i >= 3:
raise ConfigurationError("To configure this " \
"hackpack CALLER_ID must set in local_settings or " \
"a phone number must be purchased.")
else:
logging.error("Please choose yes or no with a 'y' or 'n'")
logging.debug("Confirming purchase...")
i = 0
while True:
i = i + 1
# Confirm phone number purchase.
choice = raw_input("Are you sure you want to purchase? " \
"Your Twilio account will be charged $1. [y/n]").lower()
if choice == "y":
try:
logging.debug("Purchasing phone number...")
number = self.client.phone_numbers.purchase(
area_code="646")
logging.debug("Phone number purchased: %s" %
number.friendly_name)
break
except TwilioRestException, e:
raise ConfigurationError("Your Twilio app couldn't " \
"be created: %s" % e)
elif choice == "n" or i >= 3:
raise ConfigurationError("To configure this " \
"hackpack CALLER_ID must set in local_settings or " \
"a phone number must be purchased.")
else:
logging.error("Please choose yes or no with a 'y' or 'n'")
# Return number or error out.
if number:
logging.debug("Returning phone number: %s " % number.friendly_name)
self.phone_number = number.phone_number
self.friendly_phone_number = number.friendly_name
return number
else:
raise ConfigurationError("There was an unknown error purchasing " \
"your phone number.")
def getHerokuHostname(self, git_config_path='./.git/config'):
logging.debug("Getting hostname from git configuration file: %s" \
% git_config_path)
# Load git configuration
try:
logging.debug("Loading git config...")
git_config = file(git_config_path).readlines()
except IOError, e:
raise ConfigurationError("Could not find .git config. Does it " \
"still exist? Failed path: %s" % e)
logging.debug("Finding Heroku remote in git configuration...")
subdomain = None
for line in git_config:
if "git@heroku.com" in line:
s = line.split(":")
subdomain = s[1].replace('.git', '')
logging.debug("Heroku remote found: %s" % subdomain)
if subdomain:
host = "http://%s.herokuapp.com" % subdomain.strip()
logging.debug("Returning full host: %s" % host)
return host
else:
raise ConfigurationError("Could not find Heroku remote in " \
"your .git config. Have you created the Heroku app?")
def printLocalEnvironmentVariableCommands(self, **kwargs):
logging.info("Copy/paste these commands to set your local " \
"environment to use this hackpack...")
print "\n"
for k, v in kwargs.iteritems():
if v:
print "export %s=%s" % (k, v)
print "\n"
def setHerokuEnvironmentVariables(self, **kwargs):
logging.info("Setting Heroku environment variables...")
envvars = ["%s=%s" % (k, v) for k, v in kwargs.iteritems() if v]
envvars.insert(0, "heroku")
envvars.insert(1, "config:add")
return subprocess.call(envvars)
class ConfigurationError(Exception):
def __init__(self, message):
#Exception.__init__(self, message)
logging.error(message)
# Logging configuration
logging.basicConfig(level=logging.INFO, format='%(message)s')
# Parser configuration
usage = "Twilio Hackpack Configurator - an easy way to configure " \
"configure your hackpack!\n%prog [options] arg1 arg2"
parser = OptionParser(usage=usage, version="Twilio Hackpack Configurator 1.0")
parser.add_option("-S", "--account_sid", default=None,
help="Use a specific Twilio ACCOUNT_SID.")
parser.add_option("-K", "--auth_token", default=None,
help="Use a specific Twilio AUTH_TOKEN.")
parser.add_option("-n", "--new", default=False, action="store_true",
help="Purchase new Twilio phone number and configure app to use " \
"your hackpack.")
parser.add_option("-N", "--new_app", default=False, action="store_true",
help="Create a new TwiML application sid to use for your " \
"hackpack.")
parser.add_option("-a", "--app_sid", default=None,
help="Configure specific AppSid to use your hackpack.")
parser.add_option("-#", "--phone-number", default=None,
help="Configure specific Twilio number to use your hackpack.")
parser.add_option("-v", "--voice_url", default=None,
help="Set the route for your Voice Request URL: (e.g. '/voice').")
parser.add_option("-s", "--sms_url", default=None,
help="Set the route for your SMS Request URL: (e.g. '/sms').")
parser.add_option("-d", "--domain", default=None,
help="Set a custom domain.")
parser.add_option("-D", "--debug", default=False,
action="store_true", help="Turn on debug output.")
def main():
(options, args) = parser.parse_args()
# Configurator configuration :)
configure = Configure()
# Options tree
if options.account_sid:
configure.account_sid = options.account_sid
if options.auth_token:
configure.auth_token = options.auth_token
if options.new:
configure.phone_number = None
if options.new_app:
configure.app_sid = None
if options.app_sid:
configure.app_sid = options.app_sid
if options.phone_number:
configure.phone_number = options.phone_number
if options.voice_url:
configure.voice_url = options.voice_url
if options.sms_url:
configure.sms_url = options.sms_url
if options.domain:
configure.host = options.domain
if options.debug:
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)s - %(message)s')
configure.start()
if __name__ == "__main__":
main()
########NEW FILE########
__FILENAME__ = local_settings
'''
Configuration Settings
'''
''' Uncomment to configure using the file.
WARNING: Be careful not to post your account credentials on GitHub.
TWILIO_ACCOUNT_SID = "ACxxxxxxxxxxxxx"
TWILIO_AUTH_TOKEN = "yyyyyyyyyyyyyyyy"
TWILIO_APP_SID = "APzzzzzzzzz"
TWILIO_CALLER_ID = "+17778889999"
IOS_URI = "http://phobos.apple.com/whatever"
ANDROID_URI = "http://market.google.com/somethingsomething"
'''
# Begin Heroku configuration - configured through environment variables.
import os
TWILIO_ACCOUNT_SID = os.environ.get('TWILIO_ACCOUNT_SID', None)
TWILIO_AUTH_TOKEN = os.environ.get('TWILIO_AUTH_TOKEN', None)
TWILIO_CALLER_ID = os.environ.get('TWILIO_CALLER_ID', None)
TWILIO_APP_SID = os.environ.get('TWILIO_APP_SID', None)
IOS_URI = os.environ.get('IOS_URI',
'http://itunes.apple.com/us/app/plants-vs.-zombies/id350642635?mt=8&uo=4')
ANDROID_URI = os.environ.get('ANDROID_URI',
'http://market.android.com/details?id=com.popcap.pvz_row')
WEB_URI = os.environ.get('WEB_URI',
'http://www.popcap.com/games/plants-vs-zombies/pc')
########NEW FILE########
__FILENAME__ = context
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import configure
from app import app
########NEW FILE########
__FILENAME__ = test_configure
import unittest
from mock import Mock
from mock import patch
import subprocess
from twilio.rest import TwilioRestClient
from .context import configure
class ConfigureTest(unittest.TestCase):
def setUp(self):
self.configure = configure.Configure(
account_sid="ACxxxxx",
auth_token="yyyyyyyy",
phone_number="+15555555555",
app_sid="APzzzzzzzzz")
self.configure.client = TwilioRestClient(self.configure.account_sid,
self.configure.auth_token)
class TwilioTest(ConfigureTest):
@patch('twilio.rest.resources.Applications')
@patch('twilio.rest.resources.Application')
def test_createNewTwiMLApp(self, MockApp, MockApps):
# Mock the Applications resource and its create method.
self.configure.client.applications = MockApps.return_value
self.configure.client.applications.create.return_value = \
MockApp.return_value
# Mock our input.
configure.raw_input = lambda _: 'y'
# Test
self.configure.createNewTwiMLApp(self.configure.voice_url,
self.configure.sms_url)
# Assert
self.configure.client.applications.create.assert_called_once_with(
voice_url=self.configure.voice_url,
sms_url=self.configure.sms_url,
friendly_name="Hackpack for Heroku and Flask")
@patch('twilio.rest.resources.Applications')
@patch('twilio.rest.resources.Application')
def test_createNewTwiMLAppNegativeInput(self, MockApp, MockApps):
# Mock the Applications resource and its create method.
self.configure.client.applications = MockApps.return_value
self.configure.client.applications.create.return_value = \
MockApp.return_value
# Mock our input .
configure.raw_input = lambda _: 'n'
# Test / Assert
self.assertRaises(configure.ConfigurationError,
self.configure.createNewTwiMLApp,
self.configure.voice_url, self.configure.sms_url)
@patch('twilio.rest.resources.Applications')
@patch('twilio.rest.resources.Application')
def test_setAppSidRequestUrls(self, MockApp, MockApps):
# Mock the Applications resource and its update method.
self.configure.client.applications = MockApps.return_value
self.configure.client.applications.update.return_value = \
MockApp.return_value
# Test
self.configure.setAppRequestUrls(self.configure.app_sid,
self.configure.voice_url,
self.configure.sms_url)
# Assert
self.configure.client.applications.update.assert_called_once_with(
self.configure.app_sid,
voice_url=self.configure.voice_url,
sms_url=self.configure.sms_url,
friendly_name='Hackpack for Heroku and Flask')
@patch('twilio.rest.resources.PhoneNumbers')
@patch('twilio.rest.resources.PhoneNumber')
def test_retrievePhoneNumber(self, MockPhoneNumber, MockPhoneNumbers):
# Mock the PhoneNumbers resource and its list method.
mock_phone_number = MockPhoneNumber.return_value
mock_phone_number.phone_number = self.configure.phone_number
self.configure.client.phone_numbers = MockPhoneNumbers.return_value
self.configure.client.phone_numbers.list.return_value = \
[mock_phone_number]
# Test
self.configure.retrievePhoneNumber(self.configure.phone_number)
# Assert
self.configure.client.phone_numbers.list.assert_called_once_with(
phone_number=self.configure.phone_number)
@patch('twilio.rest.resources.PhoneNumbers')
@patch('twilio.rest.resources.PhoneNumber')
def test_purchasePhoneNumber(self, MockPhoneNumber, MockPhoneNumbers):
# Mock the PhoneNumbers resource and its search and purchase methods
mock_phone_number = MockPhoneNumber.return_value
mock_phone_number.phone_number = self.configure.phone_number
self.configure.client.phone_numbers = MockPhoneNumbers.return_value
self.configure.client.phone_numbers.purchase = \
mock_phone_number
# Mock our input.
configure.raw_input = lambda _: 'y'
# Test
self.configure.purchasePhoneNumber()
# Assert
self.configure.client.phone_numbers.purchase.assert_called_once_with(
area_code="646")
@patch('twilio.rest.resources.PhoneNumbers')
@patch('twilio.rest.resources.PhoneNumber')
def test_purchasePhoneNumberNegativeInput(self, MockPhoneNumbers,
MockPhoneNumber):
# Mock the PhoneNumbers resource and its search and purchase methods
mock_phone_number = MockPhoneNumber.return_value
mock_phone_number.phone_number = self.configure.phone_number
self.configure.client.phone_numbers = MockPhoneNumbers.return_value
self.configure.client.phone_numbers.purchase = \
mock_phone_number
# Mock our input.
configure.raw_input = lambda _: 'n'
# Test / Assert
self.assertRaises(configure.ConfigurationError,
self.configure.purchasePhoneNumber)
@patch('twilio.rest.resources.Applications')
@patch('twilio.rest.resources.Application')
@patch('twilio.rest.resources.PhoneNumbers')
@patch('twilio.rest.resources.PhoneNumber')
def test_configure(self, MockPhoneNumber, MockPhoneNumbers, MockApp,
MockApps):
# Mock the Applications resource and its update method.
mock_app = MockApp.return_value
mock_app.sid = self.configure.app_sid
self.configure.client.applications = MockApps.return_value
self.configure.client.applications.update.return_value = \
mock_app
# Mock the PhoneNumbers resource and its list method.
mock_phone_number = MockPhoneNumber.return_value
mock_phone_number.sid = "PN123"
mock_phone_number.friendly_name = "(555) 555-5555"
mock_phone_number.phone_number = self.configure.phone_number
self.configure.client.phone_numbers = MockPhoneNumbers.return_value
self.configure.client.phone_numbers.list.return_value = \
[mock_phone_number]
# Test
self.configure.configureHackpack(self.configure.voice_url,
self.configure.sms_url,
self.configure.app_sid,
self.configure.phone_number)
# Assert
self.configure.client.applications.update.assert_called_once_with(
self.configure.app_sid,
voice_url=self.configure.voice_url,
sms_url=self.configure.sms_url,
friendly_name='Hackpack for Heroku and Flask')
self.configure.client.phone_numbers.update.assert_called_once_with(
"PN123",
voice_application_sid=self.configure.app_sid,
sms_application_sid=self.configure.app_sid)
@patch('twilio.rest.resources.Applications')
@patch('twilio.rest.resources.Application')
@patch('twilio.rest.resources.PhoneNumbers')
@patch('twilio.rest.resources.PhoneNumber')
def test_configureNoApp(self, MockPhoneNumber, MockPhoneNumbers, MockApp,
MockApps):
# Mock the Applications resource and its update method.
mock_app = MockApp.return_value
mock_app.sid = self.configure.app_sid
self.configure.client.applications = MockApps.return_value
self.configure.client.applications.create.return_value = \
mock_app
# Mock the PhoneNumbers resource and its list method.
mock_phone_number = MockPhoneNumber.return_value
mock_phone_number.sid = "PN123"
mock_phone_number.friendly_name = "(555) 555-5555"
mock_phone_number.phone_number = self.configure.phone_number
self.configure.client.phone_numbers = MockPhoneNumbers.return_value
self.configure.client.phone_numbers.list.return_value = \
[mock_phone_number]
# Set AppSid to None
self.configure.app_sid = None
# Mock our input.
configure.raw_input = lambda _: 'y'
# Test
self.configure.configureHackpack(self.configure.voice_url,
self.configure.sms_url,
self.configure.app_sid,
self.configure.phone_number)
# Assert
self.configure.client.applications.create.assert_called_once_with(
voice_url=self.configure.voice_url,
sms_url=self.configure.sms_url,
friendly_name="Hackpack for Heroku and Flask")
self.configure.client.phone_numbers.update.assert_called_once_with(
"PN123",
voice_application_sid=mock_app.sid,
sms_application_sid=mock_app.sid)
@patch('twilio.rest.resources.Applications')
@patch('twilio.rest.resources.Application')
@patch('twilio.rest.resources.PhoneNumbers')
@patch('twilio.rest.resources.PhoneNumber')
def test_configureNoPhoneNumber(self, MockPhoneNumber, MockPhoneNumbers,
MockApp, MockApps):
# Mock the Applications resource and its update method.
mock_app = MockApp.return_value
mock_app.sid = self.configure.app_sid
self.configure.client.applications = MockApps.return_value
self.configure.client.applications.update.return_value = \
mock_app
# Mock the PhoneNumbers resource and its list method.
mock_phone_number = MockPhoneNumber.return_value
mock_phone_number.sid = "PN123"
mock_phone_number.friendly_name = "(555) 555-5555"
mock_phone_number.phone_number = self.configure.phone_number
self.configure.client.phone_numbers = MockPhoneNumbers.return_value
self.configure.client.phone_numbers.purchase.return_value = \
mock_phone_number
# Set AppSid to None
self.configure.phone_number = None
# Mock our input.
configure.raw_input = lambda _: 'y'
# Test
self.configure.configureHackpack(self.configure.voice_url,
self.configure.sms_url,
self.configure.app_sid,
self.configure.phone_number)
# Assert
self.configure.client.applications.update.assert_called_once_with(
self.configure.app_sid,
voice_url=self.configure.voice_url,
sms_url=self.configure.sms_url,
friendly_name='Hackpack for Heroku and Flask')
self.configure.client.phone_numbers.update.assert_called_once_with(
"PN123",
voice_application_sid=self.configure.app_sid,
sms_application_sid=self.configure.app_sid)
@patch.object(subprocess, 'call')
@patch.object(configure.Configure, 'configureHackpack')
def test_start(self, mock_configureHackpack, mock_call):
mock_call.return_value = None
self.configure.host = 'http://look-here-snacky-11211.herokuapp.com'
self.configure.start()
mock_configureHackpack.assert_called_once_with(
'http://look-here-snacky-11211.herokuapp.com/voice',
'http://look-here-snacky-11211.herokuapp.com/sms',
self.configure.app_sid,
self.configure.phone_number)
@patch.object(subprocess, 'call')
@patch.object(configure.Configure, 'configureHackpack')
@patch.object(configure.Configure, 'getHerokuHostname')
def test_startWithoutHostname(self, mock_getHerokuHostname,
mock_configureHackpack, mock_call):
mock_call.return_value = None
mock_getHerokuHostname.return_value = \
'http://look-here-snacky-11211.herokuapp.com'
self.configure.start()
mock_configureHackpack.assert_called_once_with(
'http://look-here-snacky-11211.herokuapp.com/voice',
'http://look-here-snacky-11211.herokuapp.com/sms',
self.configure.app_sid,
self.configure.phone_number)
class HerokuTest(ConfigureTest):
def test_getHerokuHostname(self):
test = self.configure.getHerokuHostname(
git_config_path='./tests/test_assets/good_git_config')
self.assertEquals(test, 'http://look-here-snacky-11211.herokuapp.com')
def test_getHerokuHostnameNoSuchFile(self):
self.assertRaises(configure.ConfigurationError,
self.configure.getHerokuHostname,
git_config_path='/tmp')
def test_getHerokuHostnameNoHerokuRemote(self):
self.assertRaises(configure.ConfigurationError,
self.configure.getHerokuHostname,
git_config_path='./tests/test_assets/bad_git_config')
@patch.object(subprocess, 'call')
def test_setHerokuEnvironmentVariables(self, mock_call):
mock_call.return_value = None
self.configure.setHerokuEnvironmentVariables(
TWILIO_ACCOUNT_SID=self.configure.account_sid,
TWILIO_AUTH_TOKEN=self.configure.auth_token,
TWILIO_APP_SID=self.configure.app_sid,
TWILIO_CALLER_ID=self.configure.phone_number)
mock_call.assert_called_once_with(["heroku", "config:add",
'%s=%s' % ('TWILIO_ACCOUNT_SID', self.configure.account_sid),
'%s=%s' % ('TWILIO_CALLER_ID', self.configure.phone_number),
'%s=%s' % ('TWILIO_AUTH_TOKEN', self.configure.auth_token),
'%s=%s' % ('TWILIO_APP_SID', self.configure.app_sid)])
class MiscellaneousTest(unittest.TestCase):
def test_configureWithoutAccountSid(self):
test = configure.Configure(account_sid=None, auth_token=None,
phone_number=None, app_sid=None)
self.assertRaises(configure.ConfigurationError,
test.start)
def test_configureWithoutAuthToken(self):
test = configure.Configure(account_sid='ACxxxxxxx', auth_token=None,
phone_number=None, app_sid=None)
self.assertRaises(configure.ConfigurationError,
test.start)
class InputTest(ConfigureTest):
@patch('twilio.rest.resources.Applications')
@patch('twilio.rest.resources.Application')
def test_createNewTwiMLAppWtfInput(self, MockApp, MockApps):
# Mock the Applications resource and its create method.
self.configure.client.applications = MockApps.return_value
self.configure.client.applications.create.return_value = \
MockApp.return_value
# Mock our input
configure.raw_input = Mock()
configure.raw_input.return_value = 'wtf'
# Test / Assert
self.assertRaises(configure.ConfigurationError,
self.configure.createNewTwiMLApp, self.configure.voice_url,
self.configure.sms_url)
self.assertTrue(configure.raw_input.call_count == 3, "Prompt did " \
"not appear three times, instead: %i" %
configure.raw_input.call_count)
self.assertFalse(self.configure.client.applications.create.called,
"Unexpected request to create AppSid made.")
@patch('twilio.rest.resources.PhoneNumbers')
@patch('twilio.rest.resources.PhoneNumber')
def test_purchasePhoneNumberWtfInput(self, MockPhoneNumbers,
MockPhoneNumber):
# Mock the PhoneNumbers resource and its search and purchase methods
mock_phone_number = MockPhoneNumber.return_value
mock_phone_number.phone_number = self.configure.phone_number
self.configure.client.phone_numbers = MockPhoneNumbers.return_value
self.configure.client.phone_numbers.purchase = \
mock_phone_number
# Mock our input.
configure.raw_input = Mock()
configure.raw_input.return_value = 'wtf'
# Test / Assert
self.assertRaises(configure.ConfigurationError,
self.configure.purchasePhoneNumber)
self.assertTrue(configure.raw_input.call_count == 3, "Prompt did " \
"not appear three times, instead: %i" %
configure.raw_input.call_count)
self.assertFalse(self.configure.client.phone_numbers.purchase.called,
"Unexpected request to create AppSid made.")
@patch('twilio.rest.resources.PhoneNumbers')
@patch('twilio.rest.resources.PhoneNumber')
def test_purchasePhoneNumberWtfInputConfirm(self,
MockPhoneNumbers, MockPhoneNumber):
# Mock the PhoneNumbers resource and its search and purchase methods
mock_phone_number = MockPhoneNumber.return_value
mock_phone_number.phone_number = self.configure.phone_number
self.configure.client.phone_numbers = MockPhoneNumbers.return_value
self.configure.client.phone_numbers.purchase = \
mock_phone_number
# Mock our input.
configure.raw_input = Mock()
configure.raw_input.side_effect = ['y', 'wtf', 'wtf', 'wtf']
# Test / Assert
self.assertRaises(configure.ConfigurationError,
self.configure.purchasePhoneNumber)
self.assertTrue(configure.raw_input.call_count == 4, "Prompt did " \
"not appear three times, instead: %i" %
configure.raw_input.call_count)
self.assertFalse(self.configure.client.phone_numbers.purchase.called,
"Unexpectedly requested phone number purchase.")
########NEW FILE########
__FILENAME__ = test_twilio
import unittest
from .context import app
class TwiMLTest(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
def assertTwiML(self, response):
self.assertTrue("<Response>" in response.data, "Did not find " \
"<Response>: %s" % response.data)
self.assertTrue("</Response>" in response.data, "Did not find " \
"</Response>: %s" % response.data)
self.assertEqual("200 OK", response.status)
def sms(self, body, path='/sms', number='+15555555555'):
params = {
'SmsSid': 'SMtesting',
'AccountSid': 'ACtesting',
'From': number,
'To': '+16666666666',
'Body': body,
'ApiVersion': '2010-04-01',
'Direction': 'inbound'}
return self.app.post(path, data=params)
def call(self, path='/voice', caller_id='+15555555555', digits=None,
phone_number=None):
params = {
'CallSid': 'CAtesting',
'AccountSid': 'ACtesting',
'From': caller_id,
'To': '+16666666666',
'CallStatus': 'ringing',
'ApiVersion': '2010-04-01',
'Direction': 'inbound'}
if digits:
params['Digits'] = digits
if phone_number:
params['PhoneNumber'] = phone_number
return self.app.post(path, data=params)
class TwilioTests(TwiMLTest):
def test_voice(self):
response = self.call(phone_number="+15557778888")
self.assertTwiML(response)
def test_inbound(self):
response = self.call(path='/inbound')
self.assertTwiML(response)
def test_sms(self):
response = self.sms("Test")
self.assertTwiML(response)
########NEW FILE########
__FILENAME__ = test_web
import unittest
from .context import app
app.config['TWILIO_ACCOUNT_SID'] = 'ACxxxxxx'
app.config['TWILIO_AUTH_TOKEN'] = 'yyyyyyyyy'
app.config['TWILIO_CALLER_ID'] = '+15558675309'
app.config['IOS_URI'] = \
'http://itunes.apple.com/us/app/plants-vs.-zombies/id350642635?mt=8&uo=4'
app.config['ANDROID_URI'] = \
'http://market.android.com/details?id=com.popcap.pvz_row'
app.config['WEB_URI'] = 'http://www.popcap.com/games/plants-vs-zombies/pc'
class WebTest(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
class IndexTests(WebTest):
def test_index(self):
response = self.app.get('/')
self.assertEqual("200 OK", response.status)
########NEW FILE########
|
[
"dyangUCI@github.com"
] |
dyangUCI@github.com
|
19377378073d0491068a8850c5ec1a202b416b4e
|
e514bbdf8e0abe5ef0b58b94fe5f7d2afb38ea6b
|
/test_suite/shared_data/frame_order/cam/rotor/perm_pseudo_ellipse_z_le_x_le_y_alt/pseudo-ellipse.py
|
b1dec4d76ae1ec4b73e2fd5cf18f201d538cd854
|
[] |
no_license
|
edward-dauvergne/relax
|
98ad63703e68a4535bfef3d6c0529e07cc84ff29
|
9710dc0f2dfe797f413756272d4bec83cf6ca1c9
|
refs/heads/master
| 2020-04-07T04:25:25.382027
| 2017-01-04T15:38:09
| 2017-01-04T15:38:09
| 46,500,334
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,967
|
py
|
# Optimise all 3 pseudo-ellipse permutations for the CaM rotor synthetic frame order data.
# These 3 solutions should mimic the rotor solution.
# Python module imports.
from numpy import array, cross, float64, transpose, zeros
from numpy.linalg import norm
import sys
# relax module imports.
from lib.geometry.coord_transform import spherical_to_cartesian
from lib.geometry.rotations import R_to_euler_zyz
from lib.text.sectioning import section
# The real rotor parameter values.
AVE_POS_X, AVE_POS_Y, AVE_POS_Z = [ -21.269217407269576, -3.122610661328414, -2.400652421655998]
AVE_POS_ALPHA, AVE_POS_BETA, AVE_POS_GAMMA = [5.623469076122531, 0.435439405668396, 5.081265529106499]
AXIS_THETA = 0.9600799785953431
AXIS_PHI = 4.0322755062196229
CONE_SIGMA_MAX = 30.0 / 360.0 * 2.0 * pi
# Reconstruct the rotation axis.
AXIS = zeros(3, float64)
spherical_to_cartesian([1, AXIS_THETA, AXIS_PHI], AXIS)
# Create a full normalised axis system.
x = array([1, 0, 0], float64)
y = cross(AXIS, x)
y /= norm(y)
x = cross(y, AXIS)
x /= norm(x)
AXES = transpose(array([x, y, AXIS], float64))
# The Euler angles.
eigen_alpha, eigen_beta, eigen_gamma = R_to_euler_zyz(AXES)
# Printout.
print("Torsion angle: %s" % CONE_SIGMA_MAX)
print("Rotation axis: %s" % AXIS)
print("Full axis system:\n%s" % AXES)
print("cross(x, y) = z:\n %s = %s" % (cross(AXES[:, 0], AXES[:, 1]), AXES[:, 2]))
print("cross(x, z) = -y:\n %s = %s" % (cross(AXES[:, 0], AXES[:, 2]), -AXES[:, 1]))
print("cross(y, z) = x:\n %s = %s" % (cross(AXES[:, 1], AXES[:, 2]), AXES[:, 0]))
print("Euler angles (alpha, beta, gamma): (%.15f, %.15f, %.15f)" % (eigen_alpha, eigen_beta, eigen_gamma))
# Load the optimised rotor state for creating the pseudo-ellipse data pipes.
state.load(state='frame_order_true', dir='..')
# Set up the dynamic system.
value.set(param='ave_pos_x', val=AVE_POS_X)
value.set(param='ave_pos_y', val=AVE_POS_Y)
value.set(param='ave_pos_z', val=AVE_POS_Z)
value.set(param='ave_pos_alpha', val=AVE_POS_ALPHA)
value.set(param='ave_pos_beta', val=AVE_POS_BETA)
value.set(param='ave_pos_gamma', val=AVE_POS_GAMMA)
value.set(param='eigen_alpha', val=eigen_alpha)
value.set(param='eigen_beta', val=eigen_beta)
value.set(param='eigen_gamma', val=eigen_gamma)
# Set the torsion angle to the rotor opening half-angle.
value.set(param='cone_sigma_max', val=0.1)
# Set the cone opening angles.
value.set(param='cone_theta_x', val=0.3)
value.set(param='cone_theta_y', val=0.6)
# Fix the true pivot point.
frame_order.pivot([ 37.254, 0.5, 16.7465], fix=True)
# Change the model.
frame_order.select_model('pseudo-ellipse')
# Loop over the 3 permutations.
pipe_name = 'pseudo-ellipse'
tag = ''
for perm in [None, 'A', 'B']:
# The original permutation.
if perm == None:
# Title printout.
section(file=sys.stdout, text="Pseudo-ellipse original permutation")
# Create a new data base data pipe for the pseudo-ellipse.
pipe.copy(pipe_from='frame order', pipe_to='pseudo-ellipse')
pipe.switch(pipe_name='pseudo-ellipse')
# Operations for the 'A' and 'B' permutations.
else:
# Title printout.
section(file=sys.stdout, text="Pseudo-ellipse permutation %s" % perm)
# The pipe name and tag.
pipe_name = 'pseudo-ellipse perm %s' % perm
tag = '_perm_%s' % perm
# Create a new data pipe.
pipe.copy(pipe_from='frame order', pipe_to=pipe_name)
pipe.switch(pipe_name=pipe_name)
# Permute the axes.
frame_order.permute_axes(permutation=perm)
# Create a pre-optimisation PDB representation.
frame_order.pdb_model(ave_pos=None, rep='fo_orig'+tag, compress_type=2, force=True)
# High precision optimisation.
frame_order.num_int_pts(num=10000)
minimise.execute('simplex', func_tol=1e-4)
# Create the PDB representation.
frame_order.pdb_model(ave_pos=None, rep='fo'+tag, compress_type=2, force=True)
# Sanity check.
pipe.display()
|
[
"bugman@b7916896-f9f9-0310-9fe5-b3996d8957d5"
] |
bugman@b7916896-f9f9-0310-9fe5-b3996d8957d5
|
8282401112b1b4d464f5eb4541b0d79ec6a226e1
|
af67d7d0f56da5d8ac9a6fbd4b0aedcebf5a6434
|
/buglab/models/evaluate.py
|
6451f664c2e61472f2c8c46d28d3d1c9e7e5661b
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
microsoft/neurips21-self-supervised-bug-detection-and-repair
|
23ef751829dc90d83571cd68c8703e2c985e4521
|
4e51184a63aecd19174ee40fc6433260ab73d56e
|
refs/heads/main
| 2023-05-23T12:23:41.870343
| 2022-01-19T12:16:19
| 2022-01-19T12:16:19
| 417,330,374
| 90
| 23
|
MIT
| 2022-08-30T11:54:55
| 2021-10-15T01:14:33
|
Python
|
UTF-8
|
Python
| false
| false
| 11,936
|
py
|
#!/usr/bin/env python
"""
Usage:
evaluate.py [options] MODEL_FILENAME TEST_DATA_PATH
Options:
--aml Run this in Azure ML
--azure-info=<path> Azure authentication information file (JSON). Used to load data from Azure storage.
--minibatch-size=<size> The minibatch size. [default: 300]
--assume-buggy Never predict NO_BUG
--eval-only-no-bug Evaluate only NO_BUG samples.
--restore-path=<path> The path to previous model file for starting from previous checkpoint.
--limit-num-elements=<num> Limit the number of elements to evaluate on.
--sequential Do not parallelize data loading. Makes debugging easier.
--quiet Do not show progress bar.
-h --help Show this screen.
--debug Enable debug routines. [default: False]
"""
import math
from collections import defaultdict
from pathlib import Path
from typing import List, Tuple
import numpy as np
import torch
from docopt import docopt
from dpu_utils.utils import RichPath, run_and_debug
from buglab.models.gnn import GnnBugLabModel
from buglab.utils.msgpackutils import load_all_msgpack_l_gz
def run(arguments):
azure_info_path = arguments.get("--azure-info", None)
data_path = RichPath.create(arguments["TEST_DATA_PATH"], azure_info_path)
lim = None if arguments["--limit-num-elements"] is None else int(arguments["--limit-num-elements"])
data = load_all_msgpack_l_gz(data_path, shuffle=True, limit_num_yielded_elements=lim)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_path = Path(arguments["MODEL_FILENAME"])
model, nn = GnnBugLabModel.restore_model(model_path, device)
predictions = model.predict(data, nn, device, parallelize=not arguments["--sequential"])
num_samples, num_location_correct = 0, 0
num_buggy_samples, num_repaired_correct, num_repaired_given_location_correct = 0, 0, 0
# Count warnings correct if a bug is reported, irrespectively if it's localized correctly
num_buggy_and_raised_warning, num_non_buggy_and_no_warning = 0, 0
localization_data_per_scout = defaultdict(lambda: np.array([0, 0], dtype=np.int32))
repair_data_per_scout = defaultdict(lambda: np.array([0, 0], dtype=np.int32))
bug_detection_logprobs: List[
Tuple[float, bool, bool, bool, bool]
] = [] # prediction_prob, has_bug, predicted_no_bug, location_correct, rewrite_given_location_is_correct
for datapoint, location_logprobs, rewrite_probs in predictions:
if arguments.get("--assume-buggy", False):
del location_logprobs[-1]
norm = float(torch.logsumexp(torch.tensor(list(location_logprobs.values())), dim=-1))
location_logprobs = {p: v - norm for p, v in location_logprobs.items()}
target_fix_action_idx = datapoint["target_fix_action_idx"]
sample_has_bug = target_fix_action_idx is not None
if sample_has_bug and arguments.get("--eval-only-no-bug", False):
continue
num_samples += 1
# Compute the predicted rewrite:
predicted_node_idx = max(location_logprobs, key=lambda k: location_logprobs[k])
prediction_logprob = location_logprobs[predicted_node_idx]
predicted_rewrite_idx, predicted_rewrite_logprob = None, -math.inf
for rewrite_idx, (rewrite_node_idx, rewrite_logprob) in enumerate(
zip(datapoint["graph"]["reference_nodes"], rewrite_probs)
):
if rewrite_node_idx == predicted_node_idx and rewrite_logprob > predicted_rewrite_logprob:
predicted_rewrite_idx = rewrite_idx
predicted_rewrite_logprob = rewrite_logprob
# Compute the predicted rewrite given the correct target location:
if not sample_has_bug:
assert not arguments.get("--assume-buggy", False)
ground_node_idx = -1
target_rewrite_scout = "NoBug"
rewrite_given_location_is_correct = None
else:
ground_node_idx = datapoint["graph"]["reference_nodes"][target_fix_action_idx]
target_rewrite_scout = datapoint["candidate_rewrite_metadata"][target_fix_action_idx][0]
predicted_rewrite_idx_given_location = None
predicted_rewrite_logprob_given_location = -math.inf
for rewrite_idx, (rewrite_node_idx, rewrite_logprob) in enumerate(
zip(datapoint["graph"]["reference_nodes"], rewrite_probs)
):
if rewrite_node_idx == ground_node_idx and rewrite_logprob > predicted_rewrite_logprob_given_location:
predicted_rewrite_idx_given_location = rewrite_idx
predicted_rewrite_logprob_given_location = rewrite_logprob
rewrite_given_location_is_correct = predicted_rewrite_idx_given_location == target_fix_action_idx
if rewrite_given_location_is_correct:
num_repaired_given_location_correct += 1
repair_data_per_scout[target_rewrite_scout] += [1, 1]
else:
repair_data_per_scout[target_rewrite_scout] += [0, 1]
num_buggy_samples += 1
location_is_correct = predicted_node_idx == ground_node_idx
if location_is_correct:
num_location_correct += 1
localization_data_per_scout[target_rewrite_scout] += [1, 1]
else:
localization_data_per_scout[target_rewrite_scout] += [0, 1]
if sample_has_bug and predicted_node_idx != -1:
num_buggy_and_raised_warning += 1
elif not sample_has_bug and predicted_node_idx == -1:
num_non_buggy_and_no_warning += 1
if location_is_correct and predicted_rewrite_idx == target_fix_action_idx:
num_repaired_correct += 1
bug_detection_logprobs.append(
(
prediction_logprob,
sample_has_bug,
predicted_node_idx != -1,
location_is_correct,
rewrite_given_location_is_correct,
)
)
print("==================================")
print(
f"Accuracy (Localization & Repair) {num_repaired_correct/num_samples:.2%} ({num_repaired_correct}/{num_samples})"
)
print(
f"Bug Detection Accuracy (no Localization or Repair) {(num_buggy_and_raised_warning + num_non_buggy_and_no_warning)/num_samples:.2%} ({num_buggy_and_raised_warning + num_non_buggy_and_no_warning}/{num_samples})"
)
if num_buggy_samples > 0:
print(
f"Bug Detection (no Localization or Repair) False Negatives: {1 - num_buggy_and_raised_warning/num_buggy_samples:.2%}"
)
else:
print("Bug Detection (no Localization or Repair) False Negatives: NaN (0/0)")
if num_samples - num_buggy_samples > 0:
print(
f"Bug Detection (no Localization or Repair) False Positives: {1 - num_non_buggy_and_no_warning / (num_samples - num_buggy_samples):.2%}"
)
else:
print("Bug Detection (no Localization or Repair) False Positives: NaN (0/0)")
print("==================================")
print(f"Localization Accuracy {num_location_correct/num_samples:.2%} ({num_location_correct}/{num_samples})")
for scout_name, (num_correct, total) in sorted(localization_data_per_scout.items(), key=lambda item: item[0]):
print(f"\t{scout_name}: {num_correct/total:.1%} ({num_correct}/{total})")
print("=========================================")
if num_buggy_samples == 0:
print("--eval-only-no-bug is True. Repair Accuracy Given Location cannot be computed.")
else:
print(
f"Repair Accuracy Given Location {num_repaired_given_location_correct/num_buggy_samples:.2%} ({num_repaired_given_location_correct}/{num_buggy_samples})"
)
for scout_name, (num_correct, total) in sorted(repair_data_per_scout.items(), key=lambda item: item[0]):
print(f"\t{scout_name}: {num_correct/total:.1%} ({num_correct}/{total})")
bug_detection_logprobs = sorted(bug_detection_logprobs, reverse=True)
detection_true_warnings = np.array(
[has_bug and correct_location for _, has_bug, _, correct_location, _ in bug_detection_logprobs]
)
true_warnings = np.array(
[
has_bug and correct_location and correct_rewrite_at_location
for _, has_bug, _, correct_location, correct_rewrite_at_location in bug_detection_logprobs
]
)
detection_false_warnings = np.array(
[
predicted_is_buggy and not predicted_correct_location
for _, has_bug, predicted_is_buggy, predicted_correct_location, _ in bug_detection_logprobs
]
)
false_warnings = np.array(
[
(predicted_is_buggy and not predicted_correct_location)
or (predicted_is_buggy and not predicted_correct_rewrite_at_location)
for _, has_bug, predicted_is_buggy, predicted_correct_location, predicted_correct_rewrite_at_location in bug_detection_logprobs
]
)
detection_true_warnings_up_to_threshold = np.cumsum(detection_true_warnings)
detection_false_warnings_up_to_threshold = np.cumsum(detection_false_warnings)
false_discovery_rate = detection_false_warnings_up_to_threshold / (
detection_true_warnings_up_to_threshold + detection_false_warnings_up_to_threshold
)
detection_precision = detection_true_warnings_up_to_threshold / (
detection_true_warnings_up_to_threshold + detection_false_warnings_up_to_threshold
)
detection_recall = detection_true_warnings_up_to_threshold / sum(
1 for _, has_bug, _, _, _ in bug_detection_logprobs if has_bug
)
detection_false_no_bug_warnings = np.array(
[
predicted_is_buggy and not has_bug
for _, has_bug, predicted_is_buggy, predicted_correct_location, _ in bug_detection_logprobs
]
)
no_bug_precision = 1 - np.cumsum(detection_false_no_bug_warnings) / (
sum(1 for _, has_bug, _, _, _ in bug_detection_logprobs if has_bug) + 1e-10
)
threshold_x = np.linspace(0, 1, num=100)
thresholds = np.exp(np.array([b[0] for b in bug_detection_logprobs]))
print("x = np." + repr(threshold_x))
print("### False Detection Rate ###")
fdr = np.interp(threshold_x, thresholds[::-1], false_discovery_rate[::-1], right=0)
print("fdr = np." + repr(fdr))
print("### Detection Precision ###")
detection_precision = np.interp(threshold_x, thresholds[::-1], detection_precision[::-1], right=0)
print("detection_precision = np." + repr(detection_precision))
print("### Detection Recall ###")
detection_recall = np.interp(threshold_x, thresholds[::-1], detection_recall[::-1], right=0)
print("detection_recall = np." + repr(detection_recall))
print("### Detection NO_BUG Precision ###")
no_bug_precision = np.interp(threshold_x, thresholds[::-1], no_bug_precision[::-1], right=0)
print("no_bug_precision = np." + repr(no_bug_precision))
true_warnings_up_to_threshold = np.cumsum(true_warnings)
false_warnings_up_to_threshold = np.cumsum(false_warnings)
precision = true_warnings_up_to_threshold / (true_warnings_up_to_threshold + false_warnings_up_to_threshold)
recall = true_warnings_up_to_threshold / sum(1 for _, has_bug, _, _, _ in bug_detection_logprobs if has_bug)
print("### Precision (Detect and Repair) ###")
precision = np.interp(threshold_x, thresholds[::-1], precision[::-1], right=0)
print("precision = np." + repr(precision))
print("### Recall (Detect and Repair) ###")
recall = np.interp(threshold_x, thresholds[::-1], recall[::-1], right=0)
print("recall = np." + repr(recall))
if __name__ == "__main__":
args = docopt(__doc__)
run_and_debug(lambda: run(args), args.get("--debug", False))
|
[
"miallama@microsoft.com"
] |
miallama@microsoft.com
|
c253644311d7fe2b49eac8dac03132f4f1cdd8ba
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/303/usersdata/302/66800/submittedfiles/testes.py
|
7c56144edfa042834f7911ced03a33c1d0ca5381
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 188
|
py
|
# -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
idade = int(input('Digite sua idade'))
if idade => 18:
print ("maior de idade")
else:
print ('menor de idade')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
72f1a1f05e457a9f11bdee1d6b7442f9a3fe8ee7
|
e436e729b0a78c7062311e0f48c55dd25d13faef
|
/tests/core/test_utils.py
|
b2bc8e17e86be01fde91a5b6c1f2ba12e3fdf488
|
[
"MIT"
] |
permissive
|
cad106uk/market-access-public-frontend
|
71ff602f4817666ed2837432b912f108010a30a1
|
092149105b5ddb1307c613123e94750b0b8b39ac
|
refs/heads/master
| 2023-02-03T18:48:45.838135
| 2020-12-24T09:38:56
| 2020-12-24T09:38:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 741
|
py
|
import datetime
from unittest import TestCase
from apps.core.utils import convert_to_snake_case, chain, get_future_date
class UtilsTestCase(TestCase):
def test_get_future_date(self):
now = datetime.datetime.now()
future_date_str = get_future_date(60)
extra_days = datetime.datetime.strptime(future_date_str, "%a, %d-%b-%Y %H:%M:%S GMT") - now
# +- 1 day is acceptable here
assert extra_days.days in range(59, 60)
def test_convert_to_snake_case(self):
test_string = "Some Test String"
assert "some_test_string" == convert_to_snake_case(test_string)
def test_chain(self):
l1 = (1, 2, 3)
l2 = [4, 5, 6]
assert [*l1, *l2] == list(chain(l1, l2))
|
[
"noreply@github.com"
] |
cad106uk.noreply@github.com
|
1808c14f89677eda21489c6ca86615cddc39f671
|
762db71e9bb66ab5821bd91eff7e0fa813f795a0
|
/code/python/echomesh/util/math/LargestInvertible.py
|
d29937b39ff5a64bf2a144c83e74a0f9632c2172
|
[
"MIT"
] |
permissive
|
huochaip/echomesh
|
0954d5bca14d58c0d762a5d3db4e6dcd246bf765
|
be668971a687b141660fd2e5635d2fd598992a01
|
refs/heads/master
| 2020-06-17T20:21:47.216434
| 2016-08-16T16:49:56
| 2016-08-16T16:49:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,014
|
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import fractions
import math
from six.moves import xrange
# http://stackoverflow.com/questions/4798654/modular-multiplicative-inverse-function-in-python
# from https://en.wikibooks.org/wiki/Algorithm_Implementation/Mathematics/Extended_Euclidean_algorithm
def egcd(a, b):
x, y, u, v = 0, 1, 1, 0
while a:
q, r = b // a, b % a
m, n = x - u*q, y - v*q
b, a, x, y, u, v = a, r, u, v, m, n
return b, x, y
def modinv(a, m):
g, x, y = egcd(a, m)
if g == 1:
return x % m
raise Exception('modular inverse does not exist')
def largest_invertible(x):
"""In the ring Mod(x), returns the invertible number nearest to x / 2, and
its inverse."""
if x >= 5:
for i in xrange(int(x / 2), 1, -1):
try:
ii = (i if i < (x / 2) else x - i)
return ii, modinv(ii, x)
except:
pass
return 1, 1
|
[
"tom@swirly.com"
] |
tom@swirly.com
|
3e788e30fcf2f685d56dbf028eb1b93f22e164be
|
6a07912090214567f77e9cd941fb92f1f3137ae6
|
/cs212/Problem Set 1/2.py
|
97b6cb647b9c05d52c0f4bd57cf754e82586bf20
|
[] |
no_license
|
rrampage/udacity-code
|
4ab042b591fa3e9adab0183d669a8df80265ed81
|
bbe968cd27da7cc453eada5b2aa29176b0121c13
|
refs/heads/master
| 2020-04-18T08:46:00.580903
| 2012-08-25T08:44:24
| 2012-08-25T08:44:24
| 5,352,942
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,333
|
py
|
# cs212 ; Problem Set 1 ; 2
# CS 212, hw1-2: Jokers Wild
#
# -----------------
# User Instructions
#
# Write a function best_wild_hand(hand) that takes as
# input a 7-card hand and returns the best 5 card hand.
# In this problem, it is possible for a hand to include
# jokers. Jokers will be treated as 'wild cards' which
# can take any rank or suit of the same color. The
# black joker, '?B', can be used as any spade or club
# and the red joker, '?R', can be used as any heart
# or diamond.
#
# The itertools library may be helpful. Feel free to
# define multiple functions if it helps you solve the
# problem.
#
# -----------------
# Grading Notes
#
# Muliple correct answers will be accepted in cases
# where the best hand is ambiguous (for example, if
# you have 4 kings and 3 queens, there are three best
# hands: 4 kings along with any of the three queens).
import itertools
def best_wild_hand(hand):
"Try all values for jokers in all 5-card selections."
# Your code here
def test_best_wild_hand():
assert (sorted(best_wild_hand("6C 7C 8C 9C TC 5C ?B".split()))
== ['7C', '8C', '9C', 'JC', 'TC'])
assert (sorted(best_wild_hand("TD TC 5H 5C 7C ?R ?B".split()))
== ['7C', 'TC', 'TD', 'TH', 'TS'])
assert (sorted(best_wild_hand("JD TC TH 7C 7D 7S 7H".split()))
== ['7C', '7D', '7H', '7S', 'JD'])
return 'test_best_wild_hand passes'
# ------------------
# Provided Functions
#
# You may want to use some of the functions which
# you have already defined in the unit to write
# your best_hand function.
def hand_rank(hand):
"Return a value indicating the ranking of a hand."
ranks = card_ranks(hand)
if straight(ranks) and flush(hand):
return (8, max(ranks))
elif kind(4, ranks):
return (7, kind(4, ranks), kind(1, ranks))
elif kind(3, ranks) and kind(2, ranks):
return (6, kind(3, ranks), kind(2, ranks))
elif flush(hand):
return (5, ranks)
elif straight(ranks):
return (4, max(ranks))
elif kind(3, ranks):
return (3, kind(3, ranks), ranks)
elif two_pair(ranks):
return (2, two_pair(ranks), ranks)
elif kind(2, ranks):
return (1, kind(2, ranks), ranks)
else:
return (0, ranks)
def card_ranks(hand):
"Return a list of the ranks, sorted with higher first."
ranks = ['--23456789TJQKA'.index(r) for r, s in hand]
ranks.sort(reverse = True)
return [5, 4, 3, 2, 1] if (ranks == [14, 5, 4, 3, 2]) else ranks
def flush(hand):
"Return True if all the cards have the same suit."
suits = [s for r,s in hand]
return len(set(suits)) == 1
def straight(ranks):
"""Return True if the ordered
ranks form a 5-card straight."""
return (max(ranks)-min(ranks) == 4) and len(set(ranks)) == 5
def kind(n, ranks):
"""Return the first rank that this hand has
exactly n-of-a-kind of. Return None if there
is no n-of-a-kind in the hand."""
for r in ranks:
if ranks.count(r) == n: return r
return None
def two_pair(ranks):
"""If there are two pair here, return the two
ranks of the two pairs, else None."""
pair = kind(2, ranks)
lowpair = kind(2, list(reversed(ranks)))
if pair and lowpair != pair:
return (pair, lowpair)
else:
return None
|
[
"raunak1001@gmail.com"
] |
raunak1001@gmail.com
|
33791d780f140caa7af658d364f82aa0c8a86f28
|
aa1972e6978d5f983c48578bdf3b51e311cb4396
|
/nitro-python-1.0/nssrc/com/citrix/netscaler/nitro/resource/config/cluster/clusternodegroup_streamidentifier_binding.py
|
e684932798111ad39584954df57a8ca7c17454bc
|
[
"Python-2.0",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
MayankTahil/nitro-ide
|
3d7ddfd13ff6510d6709bdeaef37c187b9f22f38
|
50054929214a35a7bb19ed10c4905fffa37c3451
|
refs/heads/master
| 2020-12-03T02:27:03.672953
| 2017-07-05T18:09:09
| 2017-07-05T18:09:09
| 95,933,896
| 2
| 5
| null | 2017-07-05T16:51:29
| 2017-07-01T01:03:20
|
HTML
|
UTF-8
|
Python
| false
| false
| 6,873
|
py
|
#
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class clusternodegroup_streamidentifier_binding(base_resource) :
""" Binding class showing the streamidentifier that can be bound to clusternodegroup.
"""
def __init__(self) :
self._identifiername = None
self._name = None
self.___count = 0
@property
def name(self) :
r"""Name of the nodegroup to which you want to bind a cluster node or an entity.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""Name of the nodegroup to which you want to bind a cluster node or an entity.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def identifiername(self) :
r"""stream identifier and rate limit identifier that need to be bound to this nodegroup.
"""
try :
return self._identifiername
except Exception as e:
raise e
@identifiername.setter
def identifiername(self, identifiername) :
r"""stream identifier and rate limit identifier that need to be bound to this nodegroup.
"""
try :
self._identifiername = identifiername
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(clusternodegroup_streamidentifier_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.clusternodegroup_streamidentifier_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = clusternodegroup_streamidentifier_binding()
updateresource.name = resource.name
updateresource.identifiername = resource.identifiername
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [clusternodegroup_streamidentifier_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].identifiername = resource[i].identifiername
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = clusternodegroup_streamidentifier_binding()
deleteresource.name = resource.name
deleteresource.identifiername = resource.identifiername
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [clusternodegroup_streamidentifier_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].identifiername = resource[i].identifiername
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
r""" Use this API to fetch clusternodegroup_streamidentifier_binding resources.
"""
try :
if not name :
obj = clusternodegroup_streamidentifier_binding()
response = obj.get_resources(service, option_)
else :
obj = clusternodegroup_streamidentifier_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
r""" Use this API to fetch filtered set of clusternodegroup_streamidentifier_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = clusternodegroup_streamidentifier_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
r""" Use this API to count clusternodegroup_streamidentifier_binding resources configued on NetScaler.
"""
try :
obj = clusternodegroup_streamidentifier_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
r""" Use this API to count the filtered set of clusternodegroup_streamidentifier_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = clusternodegroup_streamidentifier_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class clusternodegroup_streamidentifier_binding_response(base_response) :
def __init__(self, length=1) :
self.clusternodegroup_streamidentifier_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.clusternodegroup_streamidentifier_binding = [clusternodegroup_streamidentifier_binding() for _ in range(length)]
|
[
"Mayank@Mandelbrot.local"
] |
Mayank@Mandelbrot.local
|
95fd505de6e612fe4910474599f2a8c9473be8bd
|
b31c0f0d1e8a3bf575e6b86591ec1071cd9a8a3d
|
/mlonmcu/platform/espidf/__init__.py
|
511bc4c5d06b1e9b61f42c245b0d3c14dfe8b50d
|
[
"Apache-2.0"
] |
permissive
|
tum-ei-eda/mlonmcu
|
e75238cd7134771217153c740301a8327a7b93b1
|
f1b934d5bd42b5471d21bcf257bf88c055698918
|
refs/heads/main
| 2023-08-07T15:12:13.466944
| 2023-07-15T13:26:21
| 2023-07-15T13:26:21
| 448,808,394
| 22
| 4
|
Apache-2.0
| 2023-06-09T23:00:19
| 2022-01-17T08:20:05
|
Python
|
UTF-8
|
Python
| false
| false
| 877
|
py
|
#
# Copyright (c) 2022 TUM Department of Electrical and Computer Engineering.
#
# This file is part of MLonMCU.
# See https://github.com/tum-ei-eda/mlonmcu.git for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""MLonMCU ESP-IDF platform"""
# pylint: disable=wildcard-import, redefined-builtin
from .espidf import EspIdfPlatform
__all__ = ["EspIdfPlatform"]
|
[
"philipp.van-kempen@tum.de"
] |
philipp.van-kempen@tum.de
|
125671ac083b8ab5d77142fb5411d4afa74e234c
|
7673df8dec063e83aa01187d5a02ca8b4ac3761d
|
/Basic/functions.py
|
8f1badb2bde7f5c4aa358988eb3330bc69a6532a
|
[] |
no_license
|
jedthompson99/Python_Course
|
cc905b42a26a2aaf008ce5cb8aaaa6b3b66df61e
|
618368390f8a7825459a20b4bc28e80c22da5dda
|
refs/heads/master
| 2023-07-01T08:39:11.309175
| 2021-08-09T17:28:32
| 2021-08-09T17:28:32
| 361,793,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
def full_name(first, last):
print(f'{first} {last}')
full_name('Kristine', 'Hudgens')
def auth(email, password):
if email == 'kristine@hudgens.com' and password == 'secret':
print('You are authorized')
else:
print('You are not authorized')
auth('kristine@hudgens.com', 'asdf')
def hundred():
for num in range(1, 101):
print(num)
hundred()
def counter(max_value):
for num in range(1, max_value):
print(num)
counter(501)
|
[
"jedthompson@gmail.com"
] |
jedthompson@gmail.com
|
ecfe49b03baa1334ccc75a2b3bdbf0eb1e4e241a
|
4fca17a3dbc3e74ba7e46bd7869eb6d138e4c422
|
/_0163_Missing_Ranges.py
|
b5c8d5b3ad79b18657c10fbcb233bf4e9f0f2ccd
|
[] |
no_license
|
mingweihe/leetcode
|
a2cfee0e004627b817a3c0321bb9c74128f8c1a7
|
edff905f63ab95cdd40447b27a9c449c9cefec37
|
refs/heads/master
| 2021-06-19T07:46:46.897952
| 2021-05-02T05:13:17
| 2021-05-02T05:13:17
| 205,740,338
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
class Solution(object):
def findMissingRanges(self, nums, lower, upper):
"""
:type nums: List[int]
:type lower: int
:type upper: int
:rtype: List[str]
"""
res = []
for x in nums:
if x == lower:
lower += 1
elif lower < x:
if lower + 1 == x:
res.append(str(lower))
else:
res.append('%s->%s' % (lower, x-1))
lower = x + 1
if lower == upper:
res.append(str(upper))
elif lower < upper:
res.append('%s->%s' % (lower, upper))
return res
|
[
"10962421@qq.com"
] |
10962421@qq.com
|
4aa90e4762ebc9bc01901de23e573ec8e5b9bca2
|
da9942c175c7289ff9ad1e8de0fb817ff2103292
|
/62.py
|
3a9dc97ed2467f894184da448ff2fe60116df59a
|
[] |
no_license
|
db2398/set7
|
325acf2415642a82b6c0efb48142ed65208f6049
|
fa5a2e4b75344368225e60da7a1acf27c522c692
|
refs/heads/master
| 2020-06-14T14:33:04.014545
| 2019-07-03T11:18:53
| 2019-07-03T11:18:53
| 195,027,788
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 76
|
py
|
t=input()
sd=set(t)
if(sd=={"0","1"}):
print("yes")
else:
print("no")
|
[
"noreply@github.com"
] |
db2398.noreply@github.com
|
a149aaf98e52f7341e3dcc68f0efb14590b43c19
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02700/s274282920.py
|
922c558418997e6ed17552a908b8b8cb32996882
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
A,B,C,D = (int(x) for x in input().split())
while True:
C -= B
if C <= 0:
print('Yes')
break
else:
A -= D
if A <= 0:
print('No')
break
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
6b854b39440765b0f5c80e3c3f73c5fdf6d4f8b8
|
4d10250b7ce80730414468e5e0060a207253a6d0
|
/jplephem/test.py
|
bc8ec152f0e375d2117b0930f489d0e20a305d78
|
[] |
no_license
|
NatalieP-J/python
|
c68fdb84a6c9c432b34e57ae4e376f652451578a
|
c74bcfabde4704939550875bc42fc3e8a5dbc5bf
|
refs/heads/master
| 2021-01-23T03:08:06.448979
| 2013-08-21T04:04:11
| 2013-08-21T04:04:11
| 10,916,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,108
|
py
|
"""Tests for ``jplephem``.
See the accompanying ``jpltest`` module for a more intense numerical
test suite that can verify that ``jplephem`` delivers, in a large number
of cases, the same results as when the ephemerides are run at JPL. This
smaller and more feature-oriented suite can be run with::
python -m unittest discover jplephem
"""
import numpy as np
from functools import partial
from jplephem import Ephemeris, DateError
from unittest import TestCase
class Tests(TestCase):
def check0(self, x, y, z, dx, dy, dz):
eq = partial(self.assertAlmostEqual, delta=1.0)
eq(x, 39705023.28)
eq(y, 131195345.65)
eq(z, 56898495.41)
eq(dx, -2524248.19)
eq(dy, 619970.11)
eq(dz, 268928.26)
def check1(self, x, y, z, dx, dy, dz):
eq = partial(self.assertAlmostEqual, delta=1.0)
eq(x, -144692624.00)
eq(y, -32707965.14)
eq(z, -14207167.26)
eq(dx, 587334.38)
eq(dy, -2297419.36)
eq(dz, -996628.74)
def test_scalar_input(self):
import de421
e = Ephemeris(de421)
self.check0(*e.compute('earthmoon', 2414994.0))
self.check1(*e.compute('earthmoon', 2415112.5))
def test_array_input(self):
import de421
e = Ephemeris(de421)
v = e.compute('earthmoon', np.array([2414994.0, 2415112.5]))
v = np.array(v)
self.check0(*v[:,0])
self.check1(*v[:,1])
def test_ephemeris_end_date(self):
import de421
e = Ephemeris(de421)
x, y, z = e.position('earthmoon', e.jomega)
self.assertAlmostEqual(x, -2.81196460e+07, delta=1.0)
self.assertAlmostEqual(y, 1.32000379e+08, delta=1.0)
self.assertAlmostEqual(z, 5.72139011e+07, delta=1.0)
def test_too_early_date(self):
import de421
e = Ephemeris(de421)
self.assertRaises(DateError, e.compute, 'earthmoon', e.jalpha - 0.01)
def test_too_late_date(self):
import de421
e = Ephemeris(de421)
self.assertRaises(DateError, e.compute, 'earthmoon', e.jomega + 16.01)
|
[
"natalie.price.jones@mail.utoronto.ca"
] |
natalie.price.jones@mail.utoronto.ca
|
b416e000c05055c966ef50e7bead35df903c7b05
|
8b8a06abf18410e08f654fb8f2a9efda17dc4f8f
|
/app/request_session.py
|
f6a0cb38f59f5353b537a1d430baac107a5c80f0
|
[] |
no_license
|
corporacionrst/software_RST
|
d903dfadf87c97c692a821a9dd3b79b343d8d485
|
7a621c4f939b5c01fd222434deea920e2447c214
|
refs/heads/master
| 2021-04-26T23:23:27.241893
| 2018-10-05T23:21:34
| 2018-10-05T23:21:34
| 123,985,674
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,903
|
py
|
from sistema.usuarios.models import Perfil
def getPerfil(request):
return Perfil.objects.get(usuario=request.user)
# def getStore(request):
# return Perfil.objects.get(usuario=request.user).tienda
def OKadmin(request):
if request.user.is_authenticated():
if "ADMIN" in Perfil.objects.get(usuario=request.user).puesto.nombre:
return True
return False
def OKbodega(request):
if request.user.is_authenticated():
ppl=Perfil.objects.get(usuario=request.user).puesto.nombre
if "BODEGA" in ppl:
return True
elif "ADMIN" in ppl:
return True
return False
def OKconta(request):
if request.user.is_authenticated():
ppl=Perfil.objects.get(usuario=request.user).puesto.nombre
if "CONTA" in ppl:
return True
elif "ADMIN" in ppl:
return True
return False
def OKmultitienda(request):
if request.user.is_authenticated():
return Perfil.objects.get(usuario=request.user).multitienda
return False
def OKcobros(request):
if request.user.is_authenticated():
ppl=Perfil.objects.get(usuario=request.user).puesto.nombre
if "COBROS" in ppl or "ADMIN" in ppl:
return True
return False
def OKventas(request):
if request.user.is_authenticated():
ppl=Perfil.objects.get(usuario=request.user).puesto.nombre
if "VENTA" in ppl:
return True
elif "ADMIN" in ppl:
return True
return False
def OKpeople(request):
if request.user.is_authenticated():
return True
return False
def sumar_DATO(request,numero):
val=Perfil.objects.get(usuario=request.user)
if numero=="4":
v = val.documento4.split("~")
val.documento4=v[0]+"~"+v[1]+"~"+str(int(v[2])+1)
val.save()
return v[0]+"~"+v[1]+"~"+str(int(v[2])+1)
def obtenerPlantilla(request):
if OKadmin(request):
return "admin.html"
elif OKconta(request):
return "conta.html"
elif OKbodega(request):
return "bodega.html"
elif OKcobros(request):
return "cobros.html"
else:
return "ventas.html"
|
[
"admin@corporacionrst.com"
] |
admin@corporacionrst.com
|
28baac5a621d65ae8bfeae46ed657209afc3d95a
|
2af6a5c2d33e2046a1d25ae9dd66d349d3833940
|
/res/scripts/client/gui/shared/utils/requesters/tokenrequester.py
|
1ace65ad86b0304adeff25edcc9173651083c9f2
|
[] |
no_license
|
webiumsk/WOT-0.9.12-CT
|
e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2
|
2506e34bd6634ad500b6501f4ed4f04af3f43fa0
|
refs/heads/master
| 2021-01-10T01:38:38.080814
| 2015-11-11T00:08:04
| 2015-11-11T00:08:04
| 45,803,240
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 3,878
|
py
|
# 2015.11.10 21:29:45 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/shared/utils/requesters/TokenRequester.py
import cPickle
from functools import partial
import BigWorld
from adisp import async
from constants import REQUEST_COOLDOWN, TOKEN_TYPE
from debug_utils import LOG_CURRENT_EXCEPTION
from TokenResponse import TokenResponse
from ids_generators import SequenceIDGenerator
def _getAccountRepository():
import Account
return Account.g_accountRepository
class TokenRequester(object):
__idsGen = SequenceIDGenerator()
def __init__(self, tokenType, wrapper = TokenResponse, cache = True):
super(TokenRequester, self).__init__()
if callable(wrapper):
self.__wrapper = wrapper
else:
raise ValueError, 'Wrapper is invalid: {0}'.format(wrapper)
self.__tokenType = tokenType
self.__callback = None
self.__lastResponse = None
self.__requestID = 0
self.__cache = cache
self.__timeoutCbID = None
return
def isInProcess(self):
return self.__callback is not None
def clear(self):
self.__callback = None
repository = _getAccountRepository()
if repository:
repository.onTokenReceived -= self.__onTokenReceived
self.__lastResponse = None
self.__requestID = 0
self.__clearTimeoutCb()
return
def getReqCoolDown(self):
return getattr(REQUEST_COOLDOWN, TOKEN_TYPE.COOLDOWNS[self.__tokenType], 10.0)
@async
def request(self, timeout = None, callback = None):
requester = getattr(BigWorld.player(), 'requestToken', None)
if not requester or not callable(requester):
if callback:
callback(None)
return
elif self.__cache and self.__lastResponse and self.__lastResponse.isValid():
if callback:
callback(self.__lastResponse)
return
else:
self.__callback = callback
self.__requestID = self.__idsGen.next()
if timeout:
self.__loadTimeout(self.__requestID, self.__tokenType, max(timeout, 0.0))
repository = _getAccountRepository()
if repository:
repository.onTokenReceived += self.__onTokenReceived
requester(self.__requestID, self.__tokenType)
return
def __onTokenReceived(self, requestID, tokenType, data):
if self.__requestID != requestID or tokenType != self.__tokenType:
return
else:
repository = _getAccountRepository()
if repository:
repository.onTokenReceived -= self.__onTokenReceived
try:
self.__lastResponse = self.__wrapper(**cPickle.loads(data))
except TypeError:
LOG_CURRENT_EXCEPTION()
self.__requestID = 0
if self.__callback is not None:
self.__callback(self.__lastResponse)
self.__callback = None
return
def __clearTimeoutCb(self):
if self.__timeoutCbID is not None:
BigWorld.cancelCallback(self.__timeoutCbID)
self.__timeoutCbID = None
return
def __loadTimeout(self, requestID, tokenType, timeout):
self.__clearTimeoutCb()
self.__timeoutCbID = BigWorld.callback(timeout, partial(self.__onTimeout, requestID, tokenType))
def __onTimeout(self, requestID, tokenType):
self.__clearTimeoutCb()
self.__onTokenReceived(requestID, tokenType, cPickle.dumps({'error': 'TIMEOUT'}, -1))
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\shared\utils\requesters\tokenrequester.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:29:46 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
0b85630a9123b498e5f50e15d65fb027b4057127
|
1c6b5d41cc84c103ddb2db3689f61f47eaa2c13b
|
/CV_ToolBox-master/VOC_2_COCO/xml_helper.py
|
c97bb05d81b946aa96ae1e1ee0c4209f0f9cc9a7
|
[] |
no_license
|
Asher-1/DataAugmentation
|
e543a93912239939ccf77c98d9156c8ed15e1090
|
c9c143e7cccf771341d2f18aa11daf8b9f817670
|
refs/heads/main
| 2023-07-01T22:49:10.908175
| 2021-08-13T10:01:56
| 2021-08-13T10:01:56
| 395,602,033
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,300
|
py
|
# -*- coding=utf-8 -*-
import os
import xml.etree.ElementTree as ET
import xml.dom.minidom as DOC
# 从xml文件中提取bounding box信息, 格式为[[x_min, y_min, x_max, y_max, name]]
def parse_xml(xml_path):
'''
输入:
xml_path: xml的文件路径
输出:
从xml文件中提取bounding box信息, 格式为[[x_min, y_min, x_max, y_max, name]]
'''
tree = ET.parse(xml_path)
root = tree.getroot()
objs = root.findall('object')
coords = list()
for ix, obj in enumerate(objs):
name = obj.find('name').text
box = obj.find('bndbox')
x_min = int(box[0].text)
y_min = int(box[1].text)
x_max = int(box[2].text)
y_max = int(box[3].text)
coords.append([x_min, y_min, x_max, y_max, name])
return coords
# 将bounding box信息写入xml文件中, bouding box格式为[[x_min, y_min, x_max, y_max, name]]
def generate_xml(img_name, coords, img_size, out_root_path):
'''
输入:
img_name:图片名称,如a.jpg
coords:坐标list,格式为[[x_min, y_min, x_max, y_max, name]],name为概况的标注
img_size:图像的大小,格式为[h,w,c]
out_root_path: xml文件输出的根路径
'''
doc = DOC.Document() # 创建DOM文档对象
annotation = doc.createElement('annotation')
doc.appendChild(annotation)
title = doc.createElement('folder')
title_text = doc.createTextNode('Tianchi')
title.appendChild(title_text)
annotation.appendChild(title)
title = doc.createElement('filename')
title_text = doc.createTextNode(img_name)
title.appendChild(title_text)
annotation.appendChild(title)
source = doc.createElement('source')
annotation.appendChild(source)
title = doc.createElement('database')
title_text = doc.createTextNode('The Tianchi Database')
title.appendChild(title_text)
source.appendChild(title)
title = doc.createElement('annotation')
title_text = doc.createTextNode('Tianchi')
title.appendChild(title_text)
source.appendChild(title)
size = doc.createElement('size')
annotation.appendChild(size)
title = doc.createElement('width')
title_text = doc.createTextNode(str(img_size[1]))
title.appendChild(title_text)
size.appendChild(title)
title = doc.createElement('height')
title_text = doc.createTextNode(str(img_size[0]))
title.appendChild(title_text)
size.appendChild(title)
title = doc.createElement('depth')
title_text = doc.createTextNode(str(img_size[2]))
title.appendChild(title_text)
size.appendChild(title)
for coord in coords:
object = doc.createElement('object')
annotation.appendChild(object)
title = doc.createElement('name')
title_text = doc.createTextNode(coord[4])
title.appendChild(title_text)
object.appendChild(title)
pose = doc.createElement('pose')
pose.appendChild(doc.createTextNode('Unspecified'))
object.appendChild(pose)
truncated = doc.createElement('truncated')
truncated.appendChild(doc.createTextNode('1'))
object.appendChild(truncated)
difficult = doc.createElement('difficult')
difficult.appendChild(doc.createTextNode('0'))
object.appendChild(difficult)
bndbox = doc.createElement('bndbox')
object.appendChild(bndbox)
title = doc.createElement('xmin')
title_text = doc.createTextNode(str(int(float(coord[0]))))
title.appendChild(title_text)
bndbox.appendChild(title)
title = doc.createElement('ymin')
title_text = doc.createTextNode(str(int(float(coord[1]))))
title.appendChild(title_text)
bndbox.appendChild(title)
title = doc.createElement('xmax')
title_text = doc.createTextNode(str(int(float(coord[2]))))
title.appendChild(title_text)
bndbox.appendChild(title)
title = doc.createElement('ymax')
title_text = doc.createTextNode(str(int(float(coord[3]))))
title.appendChild(title_text)
bndbox.appendChild(title)
# 将DOM对象doc写入文件
f = open(os.path.jpin(out_root_path, img_name[:-4] + '.xml'), 'w')
f.write(doc.toprettyxml(indent=''))
f.close()
|
[
"ludahai19@163.com"
] |
ludahai19@163.com
|
a504526e7afcb6817c2878fa279d32e1dfc65ac6
|
72f5adc4b6f79dd40e975c86abcdbd3d0ccada86
|
/venv/bin/pip3.7
|
3786525abb997c921a0c0979436550edefdc7960
|
[] |
no_license
|
katrek/flask_vacancy_parser
|
77101604ec5bfeb47c009b9d8329b42d9d30bf4a
|
bbea4ae860bb78f7264b05e92c6664f8e4c4b3cf
|
refs/heads/master
| 2023-01-11T11:58:09.275448
| 2019-08-29T06:36:53
| 2019-08-29T06:36:53
| 204,666,913
| 1
| 1
| null | 2023-01-03T12:19:03
| 2019-08-27T09:22:35
|
Python
|
UTF-8
|
Python
| false
| false
| 420
|
7
|
#!/Users/artemtkachev/PycharmProjects/flask_parser2/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
|
[
"akatrek@gmail.com"
] |
akatrek@gmail.com
|
03771c28af243c41e09a09630addbf700d35abaa
|
b7f3edb5b7c62174bed808079c3b21fb9ea51d52
|
/tools/android/native_lib_memory/parse_smaps.py
|
c167a327bfe8764e6dce320d671e151267c9aba7
|
[
"Zlib",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"APSL-2.0",
"MIT",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-unknown"
] |
permissive
|
otcshare/chromium-src
|
26a7372773b53b236784c51677c566dc0ad839e4
|
64bee65c921db7e78e25d08f1e98da2668b57be5
|
refs/heads/webml
| 2023-03-21T03:20:15.377034
| 2020-11-16T01:40:14
| 2020-11-16T01:40:14
| 209,262,645
| 18
| 21
|
BSD-3-Clause
| 2023-03-23T06:20:07
| 2019-09-18T08:52:07
| null |
UTF-8
|
Python
| false
| false
| 9,429
|
py
|
#!/usr/bin/python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Parses /proc/[pid]/smaps on a device and shows the total amount of swap used.
"""
from __future__ import print_function
import argparse
import collections
import logging
import os
import re
import sys
_SRC_PATH = os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir, os.pardir)
sys.path.append(os.path.join(_SRC_PATH, 'third_party', 'catapult', 'devil'))
from devil.android import device_utils
class Mapping(object):
"""A single entry (mapping) in /proc/[pid]/smaps."""
def __init__(self, start, end, permissions, offset, pathname):
"""Initializes an instance.
Args:
start: (str) Start address of the mapping.
end: (str) End address of the mapping.
permissions: (str) Permission string, e.g. r-wp.
offset: (str) Offset into the file or 0 if this is not a file mapping.
pathname: (str) Path name, or pseudo-path, e.g. [stack]
"""
self.start = int(start, 16)
self.end = int(end, 16)
self.permissions = permissions
self.offset = int(offset, 16)
self.pathname = pathname.strip()
self.fields = collections.OrderedDict()
def AddField(self, line):
"""Adds a field to an entry.
Args:
line: (str) As it appears in /proc/[pid]/smaps.
"""
assert ':' in line
split_index = line.index(':')
k, v = line[:split_index].strip(), line[split_index + 1:].strip()
assert k not in self.fields
if v.endswith('kB'):
v = int(v[:-2])
self.fields[k] = v
def ToString(self):
"""Returns a string representation of a mapping.
The returned string is similar (but not identical) to the /proc/[pid]/smaps
entry it was generated from.
"""
lines = []
lines.append('%x-%x %s %x %s' % (
self.start, self.end, self.permissions, self.offset, self.pathname))
for name in self.fields:
format_str = None
if isinstance(self.fields[name], int):
format_str = '%s: %d kB'
else:
format_str = '%s: %s'
lines.append(format_str % (name, self.fields[name]))
return '\n'.join(lines)
def _ParseProcSmapsLines(lines):
SMAPS_ENTRY_START_RE = (
# start-end
'^([0-9a-f]{1,16})-([0-9a-f]{1,16}) '
# Permissions
'([r\-][w\-][x\-][ps]) '
# Offset
'([0-9a-f]{1,16}) '
# Device
'([0-9a-f]{2,3}:[0-9a-f]{2,3}) '
# Inode
'([0-9]*) '
# Pathname
'(.*)')
assert re.search(SMAPS_ENTRY_START_RE,
'35b1800000-35b1820000 r-xp 00000000 08:02 135522 '
'/usr/lib64/ld-2.15.so')
entry_re = re.compile(SMAPS_ENTRY_START_RE)
mappings = []
for line in lines:
match = entry_re.search(line)
if match:
(start, end, perms, offset, _, _, pathname) = match.groups()
mappings.append(Mapping(start, end, perms, offset, pathname))
else:
mappings[-1].AddField(line)
return mappings
def ParseProcSmaps(device, pid, store_file=False):
"""Parses /proc/[pid]/smaps on a device, and returns a list of Mapping.
Args:
device: (device_utils.DeviceUtils) device to parse the file from.
pid: (int) PID of the process.
store_file: (bool) Whether to also write the file to disk.
Returns:
[Mapping] all the mappings in /proc/[pid]/smaps.
"""
command = ['cat', '/proc/%d/smaps' % pid]
lines = device.RunShellCommand(command, check_return=True)
if store_file:
with open('smaps-%d' % pid, 'w') as f:
f.write('\n'.join(lines))
return _ParseProcSmapsLines(lines)
def _GetPageTableFootprint(device, pid):
"""Returns the page table footprint for a process in kiB."""
command = ['cat', '/proc/%d/status' % pid]
lines = device.RunShellCommand(command, check_return=True)
for line in lines:
if line.startswith('VmPTE:'):
value = int(line[len('VmPTE: '):line.index('kB')])
return value
def _SummarizeMapping(mapping, metric):
return '%s %s %s: %d kB (Total Size: %d kB)' % (
hex(mapping.start),
mapping.pathname, mapping.permissions, metric,
(mapping.end - mapping.start) / 1024)
def _PrintMappingsMetric(mappings, field_name):
"""Shows a summary of mappings for a given metric.
For the given field, compute its aggregate value over all mappings, and
prints the mappings sorted by decreasing metric value.
Args:
mappings: ([Mapping]) all process mappings.
field_name: (str) Mapping field to process.
"""
total_kb = sum(m.fields[field_name] for m in mappings)
print('Total Size (kB) = %d' % total_kb)
sorted_by_metric = sorted(mappings,
key=lambda m: m.fields[field_name], reverse=True)
for mapping in sorted_by_metric:
metric = mapping.fields[field_name]
if not metric:
break
print(_SummarizeMapping(mapping, metric))
def _PrintSwapStats(mappings):
print('SWAP:')
_PrintMappingsMetric(mappings, 'Swap')
def _FootprintForAnonymousMapping(mapping):
assert mapping.pathname.startswith('[anon:')
if (mapping.pathname == '[anon:libc_malloc]'
and mapping.fields['Shared_Dirty'] != 0):
# libc_malloc mappings can come from the zygote. In this case, the shared
# dirty memory is likely dirty in the zygote, don't count it.
return mapping.fields['Rss']
else:
return mapping.fields['Private_Dirty']
def _PrintEstimatedFootprintStats(mappings, page_table_kb):
print('Private Dirty:')
_PrintMappingsMetric(mappings, 'Private_Dirty')
print('\n\nShared Dirty:')
_PrintMappingsMetric(mappings, 'Shared_Dirty')
print('\n\nPrivate Clean:')
_PrintMappingsMetric(mappings, 'Private_Clean')
print('\n\nShared Clean:')
_PrintMappingsMetric(mappings, 'Shared_Clean')
print('\n\nSwap PSS:')
_PrintMappingsMetric(mappings, 'SwapPss')
print('\n\nPage table = %d kiB' % page_table_kb)
def _ComputeEstimatedFootprint(mappings, page_table_kb):
"""Returns the estimated footprint in kiB.
Args:
mappings: ([Mapping]) all process mappings.
page_table_kb: (int) Sizeof the page tables in kiB.
"""
footprint = page_table_kb
for mapping in mappings:
# Chrome shared memory.
#
# Even though it is shared memory, it exists because the process exists, so
# account for its entirety.
if mapping.pathname.startswith('/dev/ashmem/shared_memory'):
footprint += mapping.fields['Rss']
elif mapping.pathname.startswith('[anon'):
footprint += _FootprintForAnonymousMapping(mapping)
# Mappings without a name are most likely Chrome's native memory allocators:
# v8, PartitionAlloc, Oilpan.
# All of it should be charged to our process.
elif mapping.pathname.strip() == '':
footprint += mapping.fields['Rss']
# Often inherited from the zygote, only count the private dirty part,
# especially as the swap part likely comes from the zygote.
elif mapping.pathname.startswith('['):
footprint += mapping.fields['Private_Dirty']
# File mappings. Can be a real file, and/or Dalvik/ART.
else:
footprint += mapping.fields['Private_Dirty']
return footprint
def _ShowAllocatorFootprint(mappings, allocator):
"""Shows the total footprint from a specific allocator.
Args:
mappings: ([Mapping]) all process mappings.
allocator: (str) Allocator name.
"""
total_footprint = 0
pathname = '[anon:%s]' % allocator
for mapping in mappings:
if mapping.pathname == pathname:
total_footprint += _FootprintForAnonymousMapping(mapping)
print('\tFootprint from %s: %d kB' % (allocator, total_footprint))
def _CreateArgumentParser():
parser = argparse.ArgumentParser()
parser.add_argument('--pid', help='PID.', required=True, type=int)
parser.add_argument('--estimate-footprint',
help='Show the estimated memory foootprint',
action='store_true')
parser.add_argument('--store-smaps', help='Store the smaps file locally',
action='store_true')
parser.add_argument('--show-allocator-footprint',
help='Show the footprint from a given allocator',
choices=['v8', 'libc_malloc', 'partition_alloc'],
nargs='+')
parser.add_argument(
'--device', help='Device to use', type=str, default='default')
return parser
def main():
parser = _CreateArgumentParser()
args = parser.parse_args()
devices = device_utils.DeviceUtils.HealthyDevices(device_arg=args.device)
if not devices:
logging.error('No connected devices')
return
device = devices[0]
if not device.HasRoot():
device.EnableRoot()
# Enable logging after device handling as devil is noisy at INFO level.
logging.basicConfig(level=logging.INFO)
mappings = ParseProcSmaps(device, args.pid, args.store_smaps)
if args.estimate_footprint:
page_table_kb = _GetPageTableFootprint(device, args.pid)
_PrintEstimatedFootprintStats(mappings, page_table_kb)
footprint = _ComputeEstimatedFootprint(mappings, page_table_kb)
print('\n\nEstimated Footprint = %d kiB' % footprint)
else:
_PrintSwapStats(mappings)
if args.show_allocator_footprint:
print('\n\nMemory Allocators footprint:')
for allocator in args.show_allocator_footprint:
_ShowAllocatorFootprint(mappings, allocator)
if __name__ == '__main__':
main()
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
f0d8fc5a6739e6510b5819ce8a9f6155c79f922b
|
f8b5aafac15f408a48fabf853a918015c927e6fe
|
/backup/virtualenv/venv27/lib/python2.7/site-packages/openstackclient/identity/v3/role.py
|
0376070907d96274184e4a7d75690462833415c3
|
[] |
no_license
|
to30/tmp
|
bda1ac0ca3fc61e96c2a1c491367b698d7e97937
|
ec809683970af6787728c2c41f161f416155982a
|
refs/heads/master
| 2021-01-01T04:25:52.040770
| 2016-05-13T16:34:59
| 2016-05-13T16:34:59
| 58,756,087
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,740
|
py
|
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Identity v3 Role action implementations"""
import logging
import six
import sys
from cliff import command
from cliff import lister
from cliff import show
from keystoneclient import exceptions as ksc_exc
from openstackclient.common import utils
from openstackclient.i18n import _ # noqa
class AddRole(command.Command):
"""Adds a role to a user or group on a domain or project"""
log = logging.getLogger(__name__ + '.AddRole')
def get_parser(self, prog_name):
parser = super(AddRole, self).get_parser(prog_name)
parser.add_argument(
'role',
metavar='<role>',
help='Role to add to <user> (name or ID)',
)
domain_or_project = parser.add_mutually_exclusive_group()
domain_or_project.add_argument(
'--domain',
metavar='<domain>',
help='Include <domain> (name or ID)',
)
domain_or_project.add_argument(
'--project',
metavar='<project>',
help='Include `<project>` (name or ID)',
)
user_or_group = parser.add_mutually_exclusive_group()
user_or_group.add_argument(
'--user',
metavar='<user>',
help='Include <user> (name or ID)',
)
user_or_group.add_argument(
'--group',
metavar='<group>',
help='Include <group> (name or ID)',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if (not parsed_args.user and not parsed_args.domain
and not parsed_args.group and not parsed_args.project):
return
role = utils.find_resource(
identity_client.roles,
parsed_args.role,
)
if parsed_args.user and parsed_args.domain:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
)
domain = utils.find_resource(
identity_client.domains,
parsed_args.domain,
)
identity_client.roles.grant(
role.id,
user=user.id,
domain=domain.id,
)
elif parsed_args.user and parsed_args.project:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
)
project = utils.find_resource(
identity_client.projects,
parsed_args.project,
)
identity_client.roles.grant(
role.id,
user=user.id,
project=project.id,
)
elif parsed_args.group and parsed_args.domain:
group = utils.find_resource(
identity_client.groups,
parsed_args.group,
)
domain = utils.find_resource(
identity_client.domains,
parsed_args.domain,
)
identity_client.roles.grant(
role.id,
group=group.id,
domain=domain.id,
)
elif parsed_args.group and parsed_args.project:
group = utils.find_resource(
identity_client.groups,
parsed_args.group,
)
project = utils.find_resource(
identity_client.projects,
parsed_args.project,
)
identity_client.roles.grant(
role.id,
group=group.id,
project=project.id,
)
else:
sys.stderr.write("Role not added, incorrect set of arguments \
provided. See openstack --help for more details\n")
return
class CreateRole(show.ShowOne):
"""Create new role"""
log = logging.getLogger(__name__ + '.CreateRole')
def get_parser(self, prog_name):
parser = super(CreateRole, self).get_parser(prog_name)
parser.add_argument(
'name',
metavar='<role-name>',
help='New role name',
)
parser.add_argument(
'--or-show',
action='store_true',
help=_('Return existing role'),
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
try:
role = identity_client.roles.create(name=parsed_args.name)
except ksc_exc.Conflict as e:
if parsed_args.or_show:
role = utils.find_resource(identity_client.roles,
parsed_args.name)
self.log.info('Returning existing role %s', role.name)
else:
raise e
role._info.pop('links')
return zip(*sorted(six.iteritems(role._info)))
class DeleteRole(command.Command):
"""Delete role(s)"""
log = logging.getLogger(__name__ + '.DeleteRole')
def get_parser(self, prog_name):
parser = super(DeleteRole, self).get_parser(prog_name)
parser.add_argument(
'roles',
metavar='<role>',
nargs="+",
help='Role(s) to delete (name or ID)',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
for role in parsed_args.roles:
role_obj = utils.find_resource(
identity_client.roles,
role,
)
identity_client.roles.delete(role_obj.id)
return
class ListRole(lister.Lister):
"""List roles"""
log = logging.getLogger(__name__ + '.ListRole')
def get_parser(self, prog_name):
parser = super(ListRole, self).get_parser(prog_name)
domain_or_project = parser.add_mutually_exclusive_group()
domain_or_project.add_argument(
'--domain',
metavar='<domain>',
help='Filter roles by <domain> (name or ID)',
)
domain_or_project.add_argument(
'--project',
metavar='<project>',
help='Filter roles by <project> (name or ID)',
)
user_or_group = parser.add_mutually_exclusive_group()
user_or_group.add_argument(
'--user',
metavar='<user>',
help='Filter roles by <user> (name or ID)',
)
user_or_group.add_argument(
'--group',
metavar='<group>',
help='Filter roles by <group> (name or ID)',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
identity_client = self.app.client_manager.identity
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
)
elif parsed_args.group:
group = utils.find_resource(
identity_client.groups,
parsed_args.group,
)
if parsed_args.domain:
domain = utils.find_resource(
identity_client.domains,
parsed_args.domain,
)
elif parsed_args.project:
project = utils.find_resource(
identity_client.projects,
parsed_args.project,
)
# no user or group specified, list all roles in the system
if not parsed_args.user and not parsed_args.group:
columns = ('ID', 'Name')
data = identity_client.roles.list()
elif parsed_args.user and parsed_args.domain:
columns = ('ID', 'Name', 'Domain', 'User')
data = identity_client.roles.list(
user=user,
domain=domain,
)
for user_role in data:
user_role.user = user.name
user_role.domain = domain.name
elif parsed_args.user and parsed_args.project:
columns = ('ID', 'Name', 'Project', 'User')
data = identity_client.roles.list(
user=user,
project=project,
)
for user_role in data:
user_role.user = user.name
user_role.project = project.name
elif parsed_args.user:
columns = ('ID', 'Name')
data = identity_client.roles.list(
user=user,
domain='default',
)
elif parsed_args.group and parsed_args.domain:
columns = ('ID', 'Name', 'Domain', 'Group')
data = identity_client.roles.list(
group=group,
domain=domain,
)
for group_role in data:
group_role.group = group.name
group_role.domain = domain.name
elif parsed_args.group and parsed_args.project:
columns = ('ID', 'Name', 'Project', 'Group')
data = identity_client.roles.list(
group=group,
project=project,
)
for group_role in data:
group_role.group = group.name
group_role.project = project.name
else:
sys.stderr.write("Error: If a user or group is specified, either "
"--domain or --project must also be specified to "
"list role grants.\n")
return ([], [])
return (columns,
(utils.get_item_properties(
s, columns,
formatters={},
) for s in data))
class RemoveRole(command.Command):
"""Remove role from domain/project : user/group"""
log = logging.getLogger(__name__ + '.RemoveRole')
def get_parser(self, prog_name):
parser = super(RemoveRole, self).get_parser(prog_name)
parser.add_argument(
'role',
metavar='<role>',
help='Role to remove (name or ID)',
)
domain_or_project = parser.add_mutually_exclusive_group()
domain_or_project.add_argument(
'--domain',
metavar='<domain>',
help='Include <domain> (name or ID)',
)
domain_or_project.add_argument(
'--project',
metavar='<project>',
help='Include <project> (name or ID)',
)
user_or_group = parser.add_mutually_exclusive_group()
user_or_group.add_argument(
'--user',
metavar='<user>',
help='Include <user> (name or ID)',
)
user_or_group.add_argument(
'--group',
metavar='<group>',
help='Include <group> (name or ID)',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if (not parsed_args.user and not parsed_args.domain
and not parsed_args.group and not parsed_args.project):
return
role = utils.find_resource(
identity_client.roles,
parsed_args.role,
)
if parsed_args.user and parsed_args.domain:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
)
domain = utils.find_resource(
identity_client.domains,
parsed_args.domain,
)
identity_client.roles.revoke(
role.id,
user=user.id,
domain=domain.id,
)
elif parsed_args.user and parsed_args.project:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
)
project = utils.find_resource(
identity_client.projects,
parsed_args.project,
)
identity_client.roles.revoke(
role.id,
user=user.id,
project=project.id,
)
elif parsed_args.group and parsed_args.domain:
group = utils.find_resource(
identity_client.groups,
parsed_args.group,
)
domain = utils.find_resource(
identity_client.domains,
parsed_args.domain,
)
identity_client.roles.revoke(
role.id,
group=group.id,
domain=domain.id,
)
elif parsed_args.group and parsed_args.project:
group = utils.find_resource(
identity_client.groups,
parsed_args.group,
)
project = utils.find_resource(
identity_client.projects,
parsed_args.project,
)
identity_client.roles.revoke(
role.id,
group=group.id,
project=project.id,
)
else:
sys.stderr.write("Role not removed, incorrect set of arguments \
provided. See openstack --help for more details\n")
return
class SetRole(command.Command):
"""Set role properties"""
log = logging.getLogger(__name__ + '.SetRole')
def get_parser(self, prog_name):
parser = super(SetRole, self).get_parser(prog_name)
parser.add_argument(
'role',
metavar='<role>',
help='Role to modify (name or ID)',
)
parser.add_argument(
'--name',
metavar='<name>',
help='Set role name',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if not parsed_args.name:
return
role = utils.find_resource(
identity_client.roles,
parsed_args.role,
)
identity_client.roles.update(role.id, name=parsed_args.name)
return
class ShowRole(show.ShowOne):
"""Display role details"""
log = logging.getLogger(__name__ + '.ShowRole')
def get_parser(self, prog_name):
parser = super(ShowRole, self).get_parser(prog_name)
parser.add_argument(
'role',
metavar='<role>',
help='Role to display (name or ID)',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
role = utils.find_resource(
identity_client.roles,
parsed_args.role,
)
role._info.pop('links')
return zip(*sorted(six.iteritems(role._info)))
|
[
"tomonaga@mx2.mesh.ne.jp"
] |
tomonaga@mx2.mesh.ne.jp
|
48dd32f18373f2b389e32630ded0044734fd4b19
|
4d44674625100e62be2bb5033339fb641bd454ac
|
/snippet/example/python/project/project/db/sqlalchemy/models.py
|
782d92b417a09747274a173923da7001f80a4da4
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
xgfone/snippet
|
8b9004a649d2575b493a376c4b4f3d4a7c56a4b0
|
b0b734dd35478b7ef3e6193623981f4f29b6748c
|
refs/heads/master
| 2022-03-18T12:41:09.033144
| 2022-02-20T15:26:35
| 2022-02-20T15:26:35
| 41,615,643
| 158
| 61
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,152
|
py
|
# coding: utf-8
from __future__ import absolute_import, print_function, unicode_literals, division
import logging
from sqlalchemy.ext.declarative import declarative_base
from oslo_db.sqlalchemy import models
from sqlalchemy import create_engine
from sqlalchemy import Column, String, Integer, DateTime
from sqlalchemy.sql import fun
LOG = logging.getLogger(__name__)
BASE = declarative_base()
class TestData(models.ModelBase, BASE):
__tablename__ = 'test_data'
id = Column(Integer, primary_key=True, autoincrement=True)
data = Column(String(256), nullable=False)
create_time = Column(DateTime, server_default=func.now(), nullable=False)
def __init__(self, *args, **kwargs):
super(TestData, self).__init__()
for k, v in kwargs.items():
setattr(self, k, v)
def create_tables(engine=None):
if not engine:
try:
import sys
engine = sys.argv[1]
except IndexError:
engine = "sqlite:///:memory:"
engine = create_engine(engine, echo=True)
BASE.metadata.create_all(engine)
if __name__ == '__main__':
create_tables("sqlite:///:memory:")
|
[
"xgfone@126.com"
] |
xgfone@126.com
|
a1f9f2880c5805d0642099f67fac1e61760b9185
|
c342d39a064441d7c83b94e896dfbac1dc155666
|
/setup.py
|
cc22030282c6d003af194c2c298389e898f5d44d
|
[
"MIT"
] |
permissive
|
arsho/generator
|
a67d876bf9dded9bacdbd50a9ab3999f90c81731
|
5dc346850ec99a47ca7c074e3e5dec0b5fff30e2
|
refs/heads/master
| 2021-01-01T16:54:41.955771
| 2017-07-21T14:37:34
| 2017-07-21T14:37:34
| 97,951,569
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,154
|
py
|
# -*- coding: utf-8 -*-
from setuptools import setup
def readme():
with open('README.rst', encoding='utf8') as f:
return f.read()
setup(name='generator',
version='0.0.1',
description='Generator is a package for generating strong password and check strength of user defined password.',
long_description=readme(),
install_requires=[],
classifiers=[
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Development Status :: 5 - Production/Stable',
'Topic :: Software Development :: Libraries :: Python Modules'
],
keywords='password generator strength pass',
url='http://github.com/arsho/generator',
author='Ahmedur Rahman Shovon',
author_email='shovon.sylhet@gmail.com',
license='MIT',
packages=['generator'],
include_package_data=True,
zip_safe=False
)
|
[
"shovon.sylhet@gmail.com"
] |
shovon.sylhet@gmail.com
|
8f09ee1c175eaa67db58c061ed1f27c69414af94
|
20ade86a0c0f0ca6be3fae251488f985c2a26241
|
/exp/analyze_5.py
|
d038d5fa9c073324d036a898b7df5cf86f573c6a
|
[] |
no_license
|
adysonmaia/phd-sp-static
|
69344fdd4edb4c216e4b88b0193308b33a30e72c
|
79038d165c19f90e1f54597f7049553720f34c74
|
refs/heads/master
| 2023-04-14T15:59:07.414873
| 2019-10-24T07:56:37
| 2019-10-24T07:56:37
| 355,110,847
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,099
|
py
|
import csv
import numpy as np
import scipy.stats as st
import matplotlib
import matplotlib.pyplot as plt
DPI = 100
Y_PARAM = {
'max_dv': {
'label': 'Deadline Violation - ms',
'limit': [0.0, 10.0]
},
'dsr': {
'label': 'Deadline Satisfaction - %',
'limit': [40.0, 100.0]
},
'avg_rt': {
'label': 'Response Time - ms',
'limit': [0.0, 18.0]
},
'cost': {
'label': 'Cost',
'limit': [1000.0, 1500.0]
},
'max_unavail': {
'label': 'Availability - %',
'limit': [70.0, 100.0]
},
'avg_unavail': {
'label': 'Unavailability - %',
'limit': [0.0, 10.0]
},
'avg_avail': {
'label': 'Availability - %',
'limit': [0.0, 100.0]
},
'time': {
'label': 'Execution Time - s',
'limit': [0.0, 300.0]
},
}
X_PARAM = {
'probability': {
'label': 'Elite Probability',
'limit': [10, 90],
},
'stop_threshold': {
'label': 'Stop Threshold',
'limit': [0, 1],
}
}
def get_data_from_file(filename):
results = []
with open(filename) as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count > 0:
results.append(row)
line_count += 1
return results
def filter_data(data, **kwargs):
def to_string_values(values):
str_values = []
if not isinstance(values, list):
values = [values]
for value in values:
str_values.append(str(value))
return str_values
def in_filter(row):
for key, value in row.items():
if key in f_values and value not in f_values[key]:
return False
return True
f_values = {k: to_string_values(v) for k, v in kwargs.items()}
return list(filter(lambda row: in_filter(row), data))
def format_metric(value, metric):
value = float(value)
if metric == 'max_unavail':
value = 100.0 * (1.0 - value)
elif metric == 'avg_unavail':
value = 100.0 * value
elif metric == 'avg_avail':
value = 100.0 * value
elif metric == 'dsr':
value = 100.0 * value
return value
def format_field(value, field):
value = float(value)
if field == 'stop_threshold':
value = round(value, 2)
return value
def calc_stats(values):
nb_runs = len(values)
mean = np.mean(values)
sem = st.sem(values)
if sem > 0.0:
# Calc confidence interval, return [mean - e, mean + e]
error = st.t.interval(0.95, nb_runs - 1, loc=mean, scale=sem)
error = error[1] - mean
else:
error = 0.0
return mean, error
def gen_figure(data, metric, x, x_field, data_filter, filename=None):
plt.clf()
matplotlib.rcParams.update({'font.size': 20})
filtered = filter_data(data, **data_filter)
y = []
y_errors = []
for x_value in x:
x_filter = {x_field: x_value}
x_data = filter_data(filtered, **x_filter)
values = list(map(lambda r: format_metric(r[metric], metric), x_data))
mean, error = calc_stats(values)
y.append(mean)
y_errors.append(error)
print("{} x={:.1f}, y={:.1f}".format(metric, x_value, mean))
x = [format_field(i, x_field) for i in x]
plt.errorbar(x, y, yerr=y_errors, markersize=10, fmt='-o')
plt.subplots_adjust(bottom=0.2, top=0.97, left=0.12, right=0.96)
x_param = X_PARAM[x_field]
y_param = Y_PARAM[metric]
plt.xlabel(x_param['label'])
plt.ylabel(y_param['label'])
plt.ylim(*y_param['limit'])
# plt.xlim(*x_param['limit'])
plt.xticks(x)
plt.grid(True)
if not filename:
plt.show()
else:
plt.savefig(filename, dpi=DPI, bbox_inches='tight', pad_inches=0.05)
def run():
data = get_data_from_file('exp/output/exp_5.csv')
all_solutions = [
('moga', 'preferred'),
]
metric_solutions = {
'max_dv': all_solutions,
# 'dsr': all_solutions,
# 'avg_rt': all_solutions,
'cost': all_solutions,
'avg_unavail': all_solutions,
'time': all_solutions
}
params = [
{
'title': 'st',
'filter': {},
'x_field': 'stop_threshold',
'x_values': np.arange(0.0, 0.6, 0.1)
},
]
for param in params:
for metric, solutions in metric_solutions.items():
for solution, sol_version in solutions:
fig_title = param['title']
filter = param['filter']
filter['solution'] = solution
filter['version'] = sol_version
x = param['x_values']
x_field = param['x_field']
filename = "exp/figs/exp_5/fig_{}_{}_{}_{}.png".format(
fig_title, metric, solution, sol_version
)
gen_figure(data, metric, x, x_field, filter, filename)
if __name__ == '__main__':
print("Execute as 'python3 analyze.py exp_5'")
|
[
"adyson.maia@gmail.com"
] |
adyson.maia@gmail.com
|
17d235e0928561692d73711efe48d58fd5d371fa
|
06aa3ec3262f6dd6866ea194ed6385f8e53509bf
|
/manuscript_codes/AML211DiffALL/remove_nonexistent_fromAnnotatedcsv.py
|
409adfbaa7c37d20329ae26f43f38331d13472ce
|
[] |
no_license
|
KuehLabUW/UPSIDE
|
95ce078382792d1beb0574c3b19c04e467befa58
|
3c90de9677f24e258800cb95bce6cb528f4ad4ac
|
refs/heads/master
| 2023-07-13T15:58:07.963672
| 2021-08-30T21:14:48
| 2021-08-30T21:14:48
| 329,134,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 754
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 23 16:21:25 2019
this script concatenates fluorescent data and subim data from separate csv files
and return a merged csv file for all positions
@author: phnguyen
"""
import pandas as pd
import os
csvs_dirname = '/media/phnguyen/Data2/Imaging/CellMorph/data/AML211DiffALL/csvs/'
os.chdir(csvs_dirname)
filename = 'AML211DiffALL_LargeMask_Annotated.csv'
df = pd.read_csv(filename)
print(len(df))
pos = [];
for i in range(len(df.index)):
if os.path.isfile(df.dirname[i]) == False:
pos.append(i)
print(i)
df.drop(df.index[pos], inplace=True)
#save the combined dataframe
df.to_csv(csvs_dirname+'AML211DiffALL_LargeMask_Annotated_trimmed.csv', sep=',')
|
[
"kuehlab@uw.edu"
] |
kuehlab@uw.edu
|
b39f7d7bc5979960cc3a326e3a5e41d319fc3636
|
16c5a7c5f45a6faa5f66f71e043ce8999cb85d80
|
/app/honor/student/listen_everyday/object_page/history_page.py
|
014714a71b529b852af33e51e693c88f7b3b6757
|
[] |
no_license
|
vectorhuztt/test_android_copy
|
ca497301b27f49b2aa18870cfb0fd8b4640973e5
|
f70ab6b1bc2f69d40299760f91870b61e012992e
|
refs/heads/master
| 2021-04-03T19:26:48.009105
| 2020-06-05T01:29:51
| 2020-06-05T01:29:51
| 248,389,861
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,494
|
py
|
# coding: utf-8
# -------------------------------------------
# Author: Vector
# Date: 2018/12/17 16:11
# -------------------------------------------
from selenium.webdriver.common.by import By
from app.honor.student.login.object_page.home_page import HomePage
from conf.base_page import BasePage
from conf.decorator import teststep
from utils.wait_element import WaitElement
class HistoryPage(BasePage):
wait = WaitElement()
home = HomePage()
@teststep
def wait_check_history_page(self):
locator = (By.XPATH, "//android.widget.TextView[@text='历史推荐']")
return self.wait.wait_check_element(locator)
@teststep
def wait_check_clear_button_page(self):
locator = (By.ID, self.id_type() + 'clear')
return self.wait.wait_check_element(locator, timeout=5)
@teststep
def wait_check_red_hint_page(self):
locator = (By.ID, self.id_type() + 'tv_hint')
return self.wait.wait_check_element(locator, timeout=5)
@teststep
def wait_check_img_page(self):
locator = (By.ID, self.id_type() + 'img')
return self.wait.wait_check_element(locator, timeout=5)
@teststep
def wait_check_tips_page(self):
locator = (By.ID, self.id_type() + 'md_content')
return self.wait.wait_check_element(locator, timeout=5)
@teststep
def game_name(self):
locator = (By.ID, self.id_type() + 'game_name')
return self.wait.wait_find_elements(locator)
@teststep
def right_rate(self, game_name):
locator = (By.XPATH, '//android.widget.TextView[contains(@text,"{0}")]/../following-sibling::android.widget.'
'TextView[contains(@resource-id, "{1}right_rate")]'.format(game_name, self.id_type()))
return self.wait.wait_find_element(locator)
@teststep
def game_date(self, game_name):
locator = (By.XPATH, '//android.widget.TextView[contains(@text,"{0}")]/../following-sibling::'
'android.widget.TextView[contains(@resource-id,"time")]'.format(game_name))
return self.wait.wait_find_element(locator)
@teststep
def tips_operate_commit(self):
"""温馨提示 页面信息 -- 确定"""
if self.wait_check_tips_page(): # 温馨提示 页面
self.home.tips_content()
self.home.commit_button() # 确定按钮
@teststep
def history_page_operate(self):
print('听力历史处理页面')
game_names = self.game_name()
game_num = len(game_names) if len(game_names) < 10 else len(game_names) - 1
print('游戏个数:', game_num)
for i in range(game_num):
if self.wait_check_history_page():
name = game_names[i].text
right_rate = self.right_rate(name).text
game_date = self.game_date(name).text
print(name)
print(right_rate)
print(game_date)
if i == 3 or i == 5 or i == 7:
if name == '听音连句':
game_names[i].click()
if not self.wait_check_clear_button_page():
self.base_assert.except_error('Error-- 未发现听音连句的清除按钮')
else:
print('进入听音连句游戏页面')
self.home.click_back_up_button()
self.tips_operate_commit()
if name == '听后选择':
game_names[i].click()
if not self.wait_check_red_hint_page():
self.base_assert.except_error('Error-- 未发现听后选择的红色提示')
else:
print('进入听后选择游戏页面')
self.home.click_back_up_button()
self.tips_operate_commit()
if name == '听音选图':
game_names[i].click()
if not self.wait_check_img_page():
self.base_assert.except_error('Error-- 未发现听音选图的图片')
else:
print('进入听音选图游戏页面')
self.home.click_back_up_button()
self.tips_operate_commit()
print('-'*30, '\n')
self.home.click_back_up_button()
|
[
"vectorztt@163.com"
] |
vectorztt@163.com
|
737f7c4f3db32fbbc32c0d5f8ed335fc3f63e82b
|
25ebc03b92df764ff0a6c70c14c2848a49fe1b0b
|
/daily/20200504/example_egoist/walker.py
|
519c662fb68e00489ebc5b0bbaa8f85170fd985e
|
[] |
no_license
|
podhmo/individual-sandbox
|
18db414fafd061568d0d5e993b8f8069867dfcfb
|
cafee43b4cf51a321f4e2c3f9949ac53eece4b15
|
refs/heads/master
| 2023-07-23T07:06:57.944539
| 2023-07-09T11:45:53
| 2023-07-09T11:45:53
| 61,940,197
| 6
| 0
| null | 2022-10-19T05:01:17
| 2016-06-25T11:27:04
|
Python
|
UTF-8
|
Python
| false
| false
| 42
|
py
|
../../20200503/example_metashape/walker.py
|
[
"ababjam61+github@gmail.com"
] |
ababjam61+github@gmail.com
|
a9f60f3ed1fe3f516a90a7101d86cf5d08986545
|
3b80ec0a14124c4e9a53985d1fa0099f7fd8ad72
|
/realestate/urls.py
|
11e290ebf235d7ae4d3ce6986f61c81f4176ded0
|
[] |
no_license
|
aayushgupta97/RealEstate_Django_TTN
|
ec4dde7aa3a1bcfa4d88adb5ea7ebb20127e7489
|
9af7c26c85c46ac5b0e3b3fad4a7b1067df20c47
|
refs/heads/master
| 2020-05-04T08:09:03.917026
| 2019-04-18T08:30:05
| 2019-04-18T08:30:05
| 179,041,202
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 567
|
py
|
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from pages import views as page_views
urlpatterns = [
path('properties/', include('properties.urls')),
path('', include('pages.urls')),
path('admin/', admin.site.urls),
path('accounts/', include('accounts.urls')),
path('contacts/', include('contacts.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
handler404 = page_views.handler404
# handler500 = page_views.handler500
|
[
"aayushgupta2097@gmail.com"
] |
aayushgupta2097@gmail.com
|
fc2f5b4eaf1d9c7e2539b1ef43e5b12ba9fbe924
|
38fecea29fa82eb203fd964acd54ffacc7e4c388
|
/chapter03/page048_colored_grid.py
|
9a62621c8c535c213b8b8c6e2da4ef4c1286ade9
|
[] |
no_license
|
mjgpy3/mfp-python3-examples
|
3c74f09c6155e9fbf35bd8ec104bdfe4429b9f4b
|
09547141d25859fe93a6a0e70c828877ee93f736
|
refs/heads/master
| 2020-12-03T18:38:30.411800
| 2020-01-18T20:42:20
| 2020-01-18T20:42:20
| 231,431,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
#!/usr/bin/env python3
from page040_grid import Grid
class ColoredGrid(Grid):
# Cannot do specific property setters in Python
def set_distances(self, distances):
self.distances = distances
farthest, self.maximum = distances.max()
def background_color_for(self, cell):
distance = self.distances[cell]
if not distance:
return (255, 255, 255)
intensity = float(self.maximum - distance) / self.maximum
dark = round(255 * intensity)
bright = 128 + round(127 * intensity)
return (dark, bright, dark)
|
[
"mjg.py3@gmail.com"
] |
mjg.py3@gmail.com
|
4aaf7f9daeeb93706d4bbb8c3bd8d49f690c0c93
|
d9b3289354d8f75ae8dd9988a89b08596bd4cae9
|
/pgadmin/pgadmin/browser/server_groups/servers/resource_groups/__init__.py
|
336fe7d01d25a73d9bfd68a2da083098d2be10c2
|
[] |
no_license
|
DataCraft-AI/pgdevops
|
8827ab8fb2f60d97a22c03317903b71a12a49611
|
f489bfb22b5b17255f85517cb1443846133dc378
|
refs/heads/master
| 2023-02-10T05:44:00.117387
| 2020-01-22T13:40:58
| 2020-01-22T13:40:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,471
|
py
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2018, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
"""Implements Resource Groups for PPAS 9.4 and above"""
import simplejson as json
from functools import wraps
import pgadmin.browser.server_groups.servers as servers
from flask import render_template, make_response, request, jsonify
from flask_babel import gettext
from pgadmin.browser.collection import CollectionNodeModule
from pgadmin.browser.utils import NodeView
from pgadmin.utils.ajax import make_json_response, \
make_response as ajax_response, internal_server_error, gone
from pgadmin.utils.ajax import precondition_required
from pgadmin.utils.driver import get_driver
from config import PG_DEFAULT_DRIVER
from pgadmin.utils import IS_PY2
# If we are in Python3
if not IS_PY2:
unicode = str
class ResourceGroupModule(CollectionNodeModule):
"""
class ResourceGroupModule(CollectionNodeModule)
A module class for Resource Group node derived from CollectionNodeModule.
Methods:
-------
* __init__(*args, **kwargs)
- Method is used to initialize the ResourceGroupModule and it's base module.
* BackendSupported(manager, **kwargs)
- This function is used to check the database server type and version.
Resource Group only supported in PPAS 9.4 and above.
* get_nodes(gid, sid, did)
- Method is used to generate the browser collection node.
* node_inode()
- Method is overridden from its base class to make the node as leaf node.
* script_load()
- Load the module script for resource group, when any of the server node is
initialized.
"""
NODE_TYPE = 'resource_group'
COLLECTION_LABEL = gettext("Resource Groups")
def __init__(self, *args, **kwargs):
"""
Method is used to initialize the ResourceGroupModule and it's base module.
Args:
*args:
**kwargs:
"""
super(ResourceGroupModule, self).__init__(*args, **kwargs)
self.min_ver = 90400
self.max_ver = None
self.server_type = ['ppas']
def get_nodes(self, gid, sid):
"""
Method is used to generate the browser collection node
Args:
gid: Server Group ID
sid: Server ID
"""
yield self.generate_browser_collection_node(sid)
@property
def node_inode(self):
"""
Override this property to make the node as leaf node.
Returns: False as this is the leaf node
"""
return False
@property
def script_load(self):
"""
Load the module script for resource group, when any of the server node is initialized.
Returns: node type of the server module.
"""
return servers.ServerModule.NODE_TYPE
@property
def module_use_template_javascript(self):
"""
Returns whether Jinja2 template is used for generating the javascript
module.
"""
return False
blueprint = ResourceGroupModule(__name__)
class ResourceGroupView(NodeView):
"""
class ResourceGroupView(NodeView)
A view class for resource group node derived from NodeView. This class is
responsible for all the stuff related to view like create/update/delete resource group,
showing properties of resource group node, showing sql in sql pane.
Methods:
-------
* __init__(**kwargs)
- Method is used to initialize the ResourceGroupView and it's base view.
* module_js()
- This property defines (if javascript) exists for this node.
Override this property for your own logic
* check_precondition()
- This function will behave as a decorator which will checks
database connection before running view, it will also attaches
manager,conn & template_path properties to self
* list()
- This function is used to list all the resource group nodes within that collection.
* nodes()
- This function will used to create all the child node within that collection.
Here it will create all the resource group node.
* properties(gid, sid, did, rg_id)
- This function will show the properties of the selected resource group node
* create(gid, sid, did, rg_id)
- This function will create the new resource group object
* update(gid, sid, did, rg_id)
- This function will update the data for the selected resource group node
* delete(self, gid, sid, rg_id):
- This function will drop the resource group object
* msql(gid, sid, did, rg_id)
- This function is used to return modified SQL for the selected resource group node
* get_sql(data, rg_id)
- This function will generate sql from model data
* sql(gid, sid, did, rg_id):
- This function will generate sql to show it in sql pane for the selected resource group node.
"""
node_type = blueprint.node_type
parent_ids = [
{'type': 'int', 'id': 'gid'},
{'type': 'int', 'id': 'sid'}
]
ids = [
{'type': 'int', 'id': 'rg_id'}
]
operations = dict({
'obj': [
{'get': 'properties', 'delete': 'delete', 'put': 'update'},
{'get': 'list', 'post': 'create'}
],
'nodes': [{'get': 'node'}, {'get': 'nodes'}],
'sql': [{'get': 'sql'}],
'msql': [{'get': 'msql'}, {'get': 'msql'}],
'stats': [{'get': 'statistics'}],
'dependency': [{'get': 'dependencies'}],
'dependent': [{'get': 'dependents'}],
'module.js': [{}, {}, {'get': 'module_js'}]
})
def __init__(self, **kwargs):
"""
Method is used to initialize the ResourceGroupView and it's base view.
Also initialize all the variables create/used dynamically like conn, template_path.
Args:
**kwargs:
"""
self.conn = None
self.template_path = None
super(ResourceGroupView, self).__init__(**kwargs)
def module_js(self):
"""
This property defines (if javascript) exists for this node.
Override this property for your own logic.
"""
return make_response(
render_template(
"resource_groups/js/resource_groups.js",
_=gettext
),
200, {'Content-Type': 'application/x-javascript'}
)
def check_precondition(f):
"""
This function will behave as a decorator which will checks
database connection before running view, it will also attaches
manager,conn & template_path properties to self
"""
@wraps(f)
def wrap(*args, **kwargs):
# Here args[0] will hold self & kwargs will hold gid,sid,did
self = args[0]
self.driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = self.driver.connection_manager(kwargs['sid'])
self.conn = self.manager.connection()
# If DB not connected then return error to browser
if not self.conn.connected():
return precondition_required(
gettext(
"Connection to the server has been lost."
)
)
self.template_path = 'resource_groups/sql'
return f(*args, **kwargs)
return wrap
@check_precondition
def list(self, gid, sid):
"""
This function is used to list all the resource group nodes within that collection.
Args:
gid: Server Group ID
sid: Server ID
"""
sql = render_template("/".join([self.template_path, 'properties.sql']))
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
return ajax_response(
response=res['rows'],
status=200
)
@check_precondition
def node(self, gid, sid, rg_id):
"""
This function will used to create all the child node within that collection.
Here it will create all the resource group node.
Args:
gid: Server Group ID
sid: Server ID
"""
sql = render_template("/".join([self.template_path, 'nodes.sql']),
rgid=rg_id)
status, result = self.conn.execute_2darray(sql)
if not status:
return internal_server_error(errormsg=result)
if len(result['rows']) == 0:
return gone(gettext("""Could not find the resource group."""))
res = self.blueprint.generate_browser_node(
result['rows'][0]['oid'],
sid,
result['rows'][0]['name'],
icon="icon-resource_group"
)
return make_json_response(
data=res,
status=200
)
@check_precondition
def nodes(self, gid, sid):
"""
This function will used to create all the child node within that collection.
Here it will create all the resource group node.
Args:
gid: Server Group ID
sid: Server ID
"""
res = []
sql = render_template("/".join([self.template_path, 'nodes.sql']))
status, result = self.conn.execute_2darray(sql)
if not status:
return internal_server_error(errormsg=result)
for row in result['rows']:
res.append(
self.blueprint.generate_browser_node(
row['oid'],
sid,
row['name'],
icon="icon-resource_group"
))
return make_json_response(
data=res,
status=200
)
@check_precondition
def properties(self, gid, sid, rg_id):
"""
This function will show the properties of the selected resource group node.
Args:
gid: Server Group ID
sid: Server ID
rg_id: Resource Group ID
"""
sql = render_template("/".join([self.template_path, 'properties.sql']), rgid=rg_id)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(gettext("""Could not find the resource group."""))
return ajax_response(
response=res['rows'][0],
status=200
)
@check_precondition
def create(self, gid, sid):
"""
This function will create the new resource group object
Args:
gid: Server Group ID
sid: Server ID
"""
required_args = [
'name'
]
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
for arg in required_args:
if arg not in data:
return make_json_response(
status=410,
success=0,
errormsg=gettext(
"Could not find the required parameter (%s)." % arg
)
)
try:
# Below logic will create new resource group
sql = render_template("/".join([self.template_path, 'create.sql']), rgname=data['name'], conn=self.conn)
if sql and sql.strip('\n') and sql.strip(' '):
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
# Below logic will update the cpu_rate_limit and dirty_rate_limit for resource group
# we need to add this logic because in resource group you can't run multiple commands in one transaction.
sql = render_template("/".join([self.template_path, 'update.sql']), data=data, conn=self.conn)
# Checking if we are not executing empty query
if sql and sql.strip('\n') and sql.strip(' '):
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
# Below logic is used to fetch the oid of the newly created resource group
sql = render_template("/".join([self.template_path, 'getoid.sql']), rgname=data['name'])
# Checking if we are not executing empty query
rg_id = 0
if sql and sql.strip('\n') and sql.strip(' '):
status, rg_id = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=rg_id)
return jsonify(
node=self.blueprint.generate_browser_node(
rg_id,
sid,
data['name'],
icon="icon-resource_group"
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def update(self, gid, sid, rg_id):
"""
This function will update the data for the selected resource group node
Args:
gid: Server Group ID
sid: Server ID
rg_id: Resource Group ID
"""
required_args = [
'name', 'cpu_rate_limit', 'dirty_rate_limit'
]
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
try:
sql = render_template("/".join([self.template_path, 'properties.sql']), rgid=rg_id)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
old_data = res['rows'][0]
for arg in required_args:
if arg not in data:
data[arg] = old_data[arg]
if data['name'] != old_data['name']:
sql = render_template("/".join([self.template_path, 'update.sql']),
oldname=old_data['name'], newname=data['name'], conn=self.conn)
if sql and sql.strip('\n') and sql.strip(' '):
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
# Below logic will update the cpu_rate_limit and dirty_rate_limit for resource group
# we need to add this logic because in resource group you can't run multiple commands
# in one transaction.
if (data['cpu_rate_limit'] != old_data['cpu_rate_limit']) \
or (data['dirty_rate_limit'] != old_data['dirty_rate_limit']):
sql = render_template("/".join([self.template_path, 'update.sql']), data=data, conn=self.conn)
if sql and sql.strip('\n') and sql.strip(' '):
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
return jsonify(
node=self.blueprint.generate_browser_node(
rg_id,
sid,
data['name'],
icon="icon-%s" % self.node_type
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def delete(self, gid, sid, rg_id):
"""
This function will drop the resource group object
Args:
gid: Server Group ID
sid: Server ID
rg_id: Resource Group ID
"""
try:
# Get name for resource group from rg_id
sql = render_template("/".join([self.template_path, 'delete.sql']), rgid=rg_id, conn=self.conn)
status, rgname = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=rgname)
if rgname is None:
return make_json_response(
success=0,
errormsg=gettext(
'Error: Object not found.'
),
info=gettext(
'The specified resource group could not be found.\n'
)
)
# drop resource group
sql = render_template("/".join([self.template_path, 'delete.sql']), rgname=rgname, conn=self.conn)
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=gettext("Resource Group dropped"),
data={
'id': rg_id,
'sid': sid,
'gid': gid,
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def msql(self, gid, sid, rg_id=None):
"""
This function is used to return modified SQL for the selected resource group node
Args:
gid: Server Group ID
sid: Server ID
rg_id: Resource Group ID
"""
data = dict()
for k, v in request.args.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except ValueError:
data[k] = v
sql, name = self.get_sql(data, rg_id)
# Most probably this is due to error
if not isinstance(sql, (str, unicode)):
return sql
sql = sql.strip('\n').strip(' ')
if sql == '':
sql = "--modified SQL"
return make_json_response(
data=sql,
status=200
)
def get_sql(self, data, rg_id=None):
"""
This function will generate sql from model data
Args:
data: Contains the value of name, cpu_rate_limit, dirty_rate_limit
rg_id: Resource Group Id
"""
required_args = [
'name', 'cpu_rate_limit', 'dirty_rate_limit'
]
if rg_id is not None:
sql = render_template("/".join([self.template_path, 'properties.sql']), rgid=rg_id)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("The specified resource group could not be found.")
)
old_data = res['rows'][0]
for arg in required_args:
if arg not in data:
data[arg] = old_data[arg]
sql = ''
name_changed = False
if data['name'] != old_data['name']:
name_changed = True
sql = render_template("/".join([self.template_path, 'update.sql']),
oldname=old_data['name'], newname=data['name'], conn=self.conn)
if (data['cpu_rate_limit'] != old_data['cpu_rate_limit']) \
or data['dirty_rate_limit'] != old_data['dirty_rate_limit']:
if name_changed:
sql += "\n-- Following query will be executed in a separate transaction\n"
sql += render_template("/".join([self.template_path, 'update.sql']), data=data, conn=self.conn)
else:
sql = render_template("/".join([self.template_path, 'create.sql']), rgname=data['name'], conn=self.conn)
if ('cpu_rate_limit' in data and data['cpu_rate_limit'] > 0) \
or ('dirty_rate_limit' in data and data['dirty_rate_limit'] > 0):
sql += "\n-- Following query will be executed in a separate transaction\n"
sql += render_template("/".join([self.template_path, 'update.sql']), data=data, conn=self.conn)
return sql, data['name'] if 'name' in data else old_data['name']
@check_precondition
def sql(self, gid, sid, rg_id):
"""
This function will generate sql for sql pane
Args:
gid: Server Group ID
sid: Server ID
rg_id: Resource Group ID
"""
sql = render_template("/".join([self.template_path, 'properties.sql']), rgid=rg_id)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("The specified resource group could not be found.")
)
# Making copy of output for future use
old_data = dict(res['rows'][0])
sql = render_template("/".join([self.template_path, 'create.sql']), display_comments=True,
rgname=old_data['name'], conn=self.conn)
sql += "\n"
sql += render_template("/".join([self.template_path, 'update.sql']), data=old_data, conn=self.conn)
return ajax_response(response=sql)
ResourceGroupView.register_node_view(blueprint)
|
[
"denis@lussier.io"
] |
denis@lussier.io
|
de0beb1610545ee78ac1dcc707d7fc40e2c1a0fb
|
748bbab674d1a5ae6a59bfd4ac22efcb4355e82a
|
/Prog-II/Back_Front/back/modelo.py
|
500e048c4dda6a3d2bb759c389dc9ab5b947b11b
|
[] |
no_license
|
Lima001/Tecnico-IFC
|
8819114a35080eb914a2d836a0accbf79d3268d8
|
771fa39dd6046a9d92860fbde70c10dcecd975a3
|
refs/heads/master
| 2021-02-07T01:31:04.929420
| 2020-06-24T02:09:38
| 2020-06-24T02:09:38
| 243,967,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
py
|
from peewee import *
arq = "dados.db"
db = SqliteDatabase(arq)
class BaseModel(Model):
class Meta:
database = db
class Cachorro(BaseModel):
nome = CharField()
idade = IntegerField()
raca = CharField()
if __name__ == "__main__":
db.connect()
db.create_tables([Cachorro])
dog1 = Cachorro.create(nome="Rex",idade=1,raca="Pastor Alemao")
print(dog1.nome + "|" + str(dog1.idade) + "|" + dog1.raca)
|
[
"limaedugabriel@gmail.com"
] |
limaedugabriel@gmail.com
|
0dabd218576ed96dbe4a021fce762f03727b90ae
|
b4948c322401435a02370dd96708399fda4a48fc
|
/demo/simple_code/test_pass.py
|
453fa756db68194cdd14c29692c9fa5fb24807be
|
[] |
no_license
|
fengbingchun/Python_Test
|
413e2c9bb844a5b3641e6e6daf37df277589006e
|
eaedcd55dbc156b685fa891538e1120ea68fa343
|
refs/heads/master
| 2023-06-21T02:28:07.310364
| 2023-06-11T04:46:29
| 2023-06-11T04:46:29
| 99,814,791
| 7
| 6
| null | 2022-09-30T00:38:06
| 2017-08-09T14:01:48
|
C++
|
UTF-8
|
Python
| false
| false
| 428
|
py
|
# Blog: https://blog.csdn.net/fengbingchun/article/details/125242357
# 1. empty function
def func():
pass # remember to implement this
func()
# 2. empty class
class fbc:
pass
fbc()
# 3. loop
num = 5
for i in range(num):
pass
# 4. conditional statement
a = 5
b = 10
if (a < b):
pass
else:
print("b<=a")
for letter in "Python3":
if letter == "h":
pass
else:
print("", letter, end="")
print("\ntest finish")
|
[
"fengbingchun@163.com"
] |
fengbingchun@163.com
|
fa791cbb653d2472098d0a3b028680e2bc9b6414
|
61432a6d3b25e5b3142fe1f154acf5764bc2d596
|
/custom_report/controllers/controllers.py
|
0d654793e8486cc3dde196ee71832650723dcae7
|
[] |
no_license
|
halltech-ci/tfc_agro
|
8c2c7911901e8c7bcf548fb05ca8f7891ab4ef51
|
a737dfdccfca51136cb01894a00f21f5365a771a
|
refs/heads/master_1
| 2020-12-22T08:59:40.507801
| 2020-08-17T21:20:18
| 2020-08-17T21:20:18
| 236,734,216
| 0
| 3
| null | 2020-05-09T23:19:24
| 2020-01-28T12:50:00
|
Python
|
UTF-8
|
Python
| false
| false
| 788
|
py
|
# -*- coding: utf-8 -*-
from odoo import http
# class CustomReport(http.Controller):
# @http.route('/custom_report/custom_report/', auth='public')
# def index(self, **kw):
# return "Hello, world"
# @http.route('/custom_report/custom_report/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('custom_report.listing', {
# 'root': '/custom_report/custom_report',
# 'objects': http.request.env['custom_report.custom_report'].search([]),
# })
# @http.route('/custom_report/custom_report/objects/<model("custom_report.custom_report"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('custom_report.object', {
# 'object': obj
# })
|
[
"maurice.atche@halltech-africa.com"
] |
maurice.atche@halltech-africa.com
|
1c76c0d73c6d00dda9f771fd4eb96c5024ac5792
|
0ab40aa11442ef5868438844ca193a88cc2ab0af
|
/Crosstalk/analyze_cross_talk.py
|
10c923b7c414428f9d01a510ade08e2d6b0559f8
|
[] |
no_license
|
nischalmishra/TEMPO_python
|
2d85b0a401e776e4a1ae65920bd7553a3896170a
|
643a9577fd6686ec32d85205b5988ec757eec4c8
|
refs/heads/master
| 2020-07-20T10:20:40.333931
| 2019-09-05T14:26:36
| 2019-09-05T14:26:36
| 206,623,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,238
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 19 10:38:50 2017
@author: nmishra
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.io.idl import readsav
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pandas as pd
def read_outlier_mask():
outlier_mask= np.genfromtxt(r'C:\Users\nmishra\Workspace\TEMPO\outlier_mask\final_outlier_mask_2_sigma.csv', delimiter=',')
quad_A = outlier_mask[0:1024, 0:1024]
quad_B = outlier_mask[1024:, 0:1024]
quad_C = outlier_mask[1024:, 1024:]
quad_D = outlier_mask[0:1024:, 1024:]
outlier_mask_final = [quad_A, quad_B, quad_C, quad_D]
return outlier_mask_final
"""
This function reads the outlier_mask
"""
def filter_outlier_median(quads):
if np.array(quads).ndim ==3:
ndims, nx_quad, ny_quad = quads.shape
elif np.array(quads).ndim ==2:
ndims=1
nx_quad, ny_quad = quads.shape
else:
nx_quad= 1
ndims=1
ny_quad = len(quads)
hist_data = np.reshape(quads,(ndims*nx_quad*ny_quad, 1))
diff = abs(hist_data - np.median(hist_data)) # find the distance to the median
median_diff = np.median(diff) # find the median of this distance
measured_threshold = diff/median_diff if median_diff else 0.
outlier_filtered_data = hist_data[measured_threshold < 6.]
#print(outlier_filtered_data)
return outlier_filtered_data
def perform_bias_subtraction_ave (active_quad, trailing_overclocks):
# sepearate out even and odd detectors for both the active quads and trailing overclocks
# The trailing overclocks are averaged and the average offset is subtracted
# from the active quad. This is done for both, ping and pong
""" Remove offset from active quads. Take care of ping-pong by breaking
Quads and overclocks into even and odd
"""
# sepearate out even and odd detectors
nx_quad,ny_quad = active_quad.shape
bias_subtracted_quad = np.array([[0]*ny_quad]*nx_quad)
even_detector_bias = trailing_overclocks[ :, ::2]
# remove outliers
# First 4 hot lines in even and odd
# last odd lne in odd
even_detector_bias = even_detector_bias[:, 4:]
avg_bias_even = np.mean(even_detector_bias, axis=1)
odd_detector_bias = trailing_overclocks[:, 1::2]
odd_samples = odd_detector_bias[:, 4:]
rows, cols = odd_samples.shape
odd_detector_bias = odd_samples[:, 0:cols-1]
avg_bias_odd = np.mean(odd_detector_bias, axis=1)
even_detector_active_quad = active_quad[:, ::2]
odd_detector_active_quad = active_quad[:, 1::2]
bias_subtracted_quad_even = even_detector_active_quad - avg_bias_even[:, None]
bias_subtracted_quad_odd = odd_detector_active_quad - avg_bias_odd[:, None]
bias_subtracted_quad = np.reshape(bias_subtracted_quad, (nx_quad, ny_quad))
bias_subtracted_quad[:, ::2] = bias_subtracted_quad_even
bias_subtracted_quad[:, 1::2] = bias_subtracted_quad_odd
return bias_subtracted_quad
def perform_bias_subtraction_ave_sto (active_quad, trailing_overclocks):
# sepearate out even and odd detectors for both the active quads and trailing overclocks
# The trailing overclocks are averaged and the average offset is subtracted
# from the active quad. This is done for both, ping and pong
""" Remove offset from active quads. Take care of ping-pong by breaking
Quads and overclocks into even and odd
"""
bias_subtracted_quad = np.zeros((1,1024))
even_detector_bias = trailing_overclocks[:, ::2]
even_detector_bias = even_detector_bias[:, 1:]
avg_bias_even = np.mean(even_detector_bias)
#print(np.mean(avg_bias_even))
odd_detector_bias = trailing_overclocks[:, 1::2]
odd_detector_bias = odd_detector_bias[:, 1:10 ]
avg_bias_odd = np.mean(odd_detector_bias)
# plt.plot(np.mean(even_detector_bias, axis=0).T,'.', color='blue')
# plt.plot(np.mean(odd_detector_bias, axis=0).T,'.', color='black')
# plt.show()
# cc
#
#print(np.mean(avg_bias_odd))
even_detector_active_quad = active_quad[::2]
odd_detector_active_quad = active_quad[1::2]
bias_subtracted_quad_even = even_detector_active_quad - avg_bias_even
bias_subtracted_quad_odd = odd_detector_active_quad - avg_bias_odd
bias_subtracted_quad[:, ::2] = np.array(bias_subtracted_quad_even)
bias_subtracted_quad[:, 1::2] = np.array(bias_subtracted_quad_odd)
#print(avg_bias_even, avg_bias_odd, np.mean(bias_subtracted_quad_even), np.mean(bias_subtracted_quad_odd))
return bias_subtracted_quad
def perform_smear_subtraction(active_quad, int_time):
# the underlying assumption in smear subtraction is that the dark current
#in the storage region is really small and hence neglected from the analysis.
#typically, Csmear = tFT / (ti+ tFT) * (AVG[C(w)] - DCStor * tRO
# tft = 8ms
tFT = 8.3333*10**(3)
ti = int_time
smear_factor = (tFT / (ti+ tFT))* np.mean(active_quad, axis=0)
#print(smear_factor.shape)
#cc
smear_subtracted_quad = active_quad - smear_factor[None, :]
return smear_subtracted_quad
def perform_Dark_removal(data_file, i):
# calculate dark current
IDL_variable = readsav(data_file)
all_full_frame = IDL_variable.q
quad_full_frame = all_full_frame[:, i , :, :]
avg_quad = np.mean(quad_full_frame[:, :, :], axis=0)
active_quad = avg_quad[4:1028, 10:1034]
tsoc = avg_quad[4:1028, 1034:1056]
dark_current = perform_bias_subtraction_ave(active_quad, tsoc)
return dark_current
def create_image(image_data, title, figure_name, spot):
plt.figure()
ax = plt.gca()
if spot==2:
image = ax.imshow(image_data[720:860, 720:860], cmap='nipy_spectral', origin='lower')
elif spot==1:
image = ax.imshow(image_data[185:325, 170:310], cmap='nipy_spectral', origin='lower')
plt.title(title)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(image, cax= cax)
plt.grid(False)
plt.savefig(figure_name,dpi=95,bbox_inches="tight")
#plt.show()
plt.close('all')
def create_hist(image, title, figure_name, COLOR) :
if np.array(image).ndim ==2:
nx_quad, ny_quad = image.shape
else:
nx_quad= 1
ny_quad = len(image)
#print(ny_quad)
#cc
label = 'Mean = '+ str(round(np.mean(image), 2))
plt.figure(figsize=(8, 5))
plt.hist(np.reshape(image, (nx_quad* ny_quad, 1)),10, facecolor=COLOR, label=label)
plt.grid(True, linestyle=':')
legend = plt.legend(loc='best', ncol=3, shadow=True,
prop={'size':10}, numpoints=1)
legend.get_frame().set_edgecolor('wheat')
legend.get_frame().set_linewidth(2.0)
#plt.xlim(-10, 10)
#plt.ylim(0, 40000)
plt.ylabel('Frequency (# of pixels)', fontsize=12,
fontweight="bold")
plt.xlabel(' Dark current (DN) ', fontsize=12,
fontweight="bold")
plt.title(title)
#plt.savefig(figure_name, dpi=100, bbox_inches="tight")
#plt.show()
plt.close('all')
def plot_row_avg(row_avg, title, figure_name, COLOR, xlabel):
# let's take the mean tsoc for 100 frames
nrows = 1
ncols = 1
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(7,5))
fig.subplots_adjust(left=0.125, right=0.95, bottom=0.1, top=0.9,
wspace=0.3, hspace=.25)
ax.plot(row_avg, '.', color=COLOR)
ax.grid(True, linestyle=':')
ax.set_title(title, fontsize=12, fontweight='bold')
ax.set_ylabel('Signal - Offset (DN)', fontsize=12, fontweight='bold')
ax.set_xlabel(xlabel, fontsize=12, fontweight='bold')
#ax.set_ylim(ylim[0], ylim[1])
#plt.savefig(figure_name, dpi=100, bbox_inches="tight")
plt.show()
plt.close('all')
def main():
"""
Tme main function
"""
#nx_quad = 1056 # For Tempo
#ny_quad = 1046 # For Tempo
#nlat = nx_quad*2
#nspec = ny_quad*2
file_path = r'F:\TEMPO\Data\GroundTest\FPS\Crosstalk'
file_path_dark = r'F:\TEMPO\Data\GroundTest\FPS\Crosstalk'
save_file_path = r'C:\Users\nmishra\Workspace\TEMPO\Cross_Talk_Test'
outlier_mask = read_outlier_mask()
temp_files = os.listdir(file_path)
for files in range(0, 4):
dframe1 = []
dframe2 = []
rows_max_A = [ ]
cols_max_A = [ ]
rows_max_B = [ ]
cols_max_B = [ ]
rows_max_C = [ ]
cols_max_C = [ ]
rows_max_D = [ ]
cols_max_D = [ ]
save_dir = os.path.join(save_file_path, temp_files[files])
if not os.path.exists(save_dir):
os.makedirs(save_dir)
saved_data_files = os.path.join(file_path, temp_files[files],'Script_Data','saved_quads')
saved_dark_files = os.path.join(file_path_dark, temp_files[files],'Script_Data','saved_quads','Dark')
all_int_files = [each for each in os.listdir(saved_data_files) \
if each.endswith('dat.sav')]
# all_dark_files = [each for each in os.listdir(saved_dark_files) \
# if each.endswith('dat.sav')]
for data_files in all_int_files:
data_file = os.path.join(saved_data_files, data_files)
print(data_file)
IDL_variable = readsav(data_file)
data_path_name_split = data_files.split('_')
int_time = int(data_path_name_split[-1].split('.')[0])
quads = ['Quad A', 'Quad B', 'Quad C', 'Quad D']
color = ['Blue','Green','Red','Orange']
all_full_frame = IDL_variable.q
ylim1= [0, 16000]
ylim2 = [-6, 6]
for i in range(0, 4):
quad_full_frame = all_full_frame[:, i, :, :]
avg_quad = np.mean(quad_full_frame[:, :, :], axis=0)
active_quad = avg_quad[4:1028, 10:1034]
tsoc = avg_quad[4:1028, 1034:1056]
#------perform bias subtraction using trailing overclocks and save the dark current image----------
#bias_subtracted_quad = perform_bias_subtraction_ave(active_quad, tsoc)
# mask out the outliers
cross_talk_array = avg_quad
nx1, ny1 = cross_talk_array.shape
# let's reshape the array to 1-D so we can work with single loop
#cross_talk_array = np.reshape(cross_talk_array, (nx1*ny1, 1))
if(temp_files[files] in("Channel_A", "Channel_C")) :
if len(data_path_name_split)>9:
spot = 2
input_signal = (data_path_name_split[-5])
quad_illuminated = data_path_name_split[-6]
else:
spot = 1
input_signal = (data_path_name_split[-4])
quad_illuminated = data_path_name_split[-5]
if spot==1:
dark_data_file= os.path.join(saved_dark_files, all_dark_files[0])
elif spot==2:
dark_data_file= os.path.join(saved_dark_files, all_dark_files[1])
# subtract off the dark current
cross_talk_array = cross_talk_array
row_average = np.mean(cross_talk_array, axis=1)
column_average = np.mean(cross_talk_array, axis=0)
string1 = quad_illuminated[0:4]+' '+quad_illuminated[4]+ ' Illuminated'
string2 = 'Input Signal = '+ input_signal
string3 = 'spot'+ str(spot)
title1 = quads[i]+' Image\n ' + string1+' @'+string3+', '+string2
title2 = quads[i]+' Row Average Profile \n ' +'('+ string1+' @'+string3+', '+string2 +')'
title3 = quads[i]+' Column Average Profile \n ' +'('+ string1+' @'+string3+', '+string2 +')'
title4 = quads[i]+' Image Profile \n ' +'('+ string1+' @'+string3+', '+string2 +')'
elif(temp_files[files] in("Channel_D")) :
if len(data_path_name_split)>9:
spot = 2
input_signal = (data_path_name_split[-5])
quad_illuminated = data_path_name_split[-6]
else:
spot = 1
quad_illuminated = data_path_name_split[-4]
input_signal ='Not Given'
string1 = quad_illuminated[0:4]+' '+quad_illuminated[4]+ ' Illuminated'
string2 = 'Input Signal = '+ input_signal
string3 = 'spot'+ str(spot)
print(string1)
print(string2)
print(string3)
title1 = quads[i]+' Image\n ' + '('+ string1+' @'+string3+', '+string2+')'
title2 = quads[i]+' Row Average Profile \n ' +'('+ string1+' @'+string3+', '+string2 +')'
title3 = quads[i]+' Column Average Profile \n ' +'('+ string1+' @'+string3+', '+string2 +')'
title4 = quads[i]+' ImageProfile \n ' +'('+ string1+' @'+string3+', '+string2 +')'
else:
if len(data_path_name_split)>8:
spot = 2
quad_illuminated = data_path_name_split[-5]
else:
spot=1
quad_illuminated = data_path_name_split[-4]
string1 = quad_illuminated[0:4]+' '+quad_illuminated[4]+ ' Illuminated'
string3 = 'Spot'+ str(spot)
title1 = quads[i]+' Image\n ' + string1+' @'+string3
title2 = quads[i]+' Row Average Profile \n ' + string1+' @'+string3
title3 = quads[i]+' Column Average Profile \n ' + string1+' @'+string3
title4 = quads[i]+' Image Profile \n ' + string1+' @'+string3
if quad_illuminated.lower() == quads[i].replace(" ","").lower():
ylim = ylim1
#print(ylim)
else:
smear_subtracted_quad[(smear_subtracted_quad>1500)] = np.mean(smear_subtracted_quad)
ylim = ylim2
if spot == 1:
#rows, cols = np.reshape()
if i == 0:
rows_max_A.append(cross_talk_array[185:325, 170:310])
elif i == 1:
rows_max_B.append(cross_talk_array[185:325, 170:310])
elif i == 2:
rows_max_C.append(cross_talk_array[185:325, 170:310])
elif i == 3:
rows_max_D.append(cross_talk_array[185:325, 170:310])
elif spot==2:
if i==0:
rows_max_A.append(cross_talk_array[720:860, 720:860])
elif i==1:
rows_max_B.append(cross_talk_array[720:860, 720:860])
elif i==2:
rows_max_C.append(cross_talk_array[720:860, 720:860])
elif i==3:
rows_max_D.append(cross_talk_array[720:860, 720:860])
quad_save = 'Cross_Talk_Image_Ghost'
save_dir_image = os.path.join(save_dir, quads[i], quad_save)
if not os.path.exists(save_dir_image):
os.makedirs(save_dir_image)
figure_name = save_dir_image + '/'+ data_files + '.png'
create_image(cross_talk_array, title1, figure_name, spot)
#save_plot = 'plot_row_average'
save_plot = 'plot_all_data'
save_dir_plot = os.path.join(save_dir, quads[i], save_plot)
if not os.path.exists(save_dir_plot):
os.makedirs(save_dir_plot)
figure_name = save_dir_plot + '/'+ data_files + '.png'
xlabel = 'Pixel Indices (#)'
plot_row_avg(cross_talk_array, title4, figure_name, color[i], xlabel)
save_plot = 'plot_column_average'
save_dir_plot = os.path.join(save_dir, quads[i], save_plot)
if not os.path.exists(save_dir_plot):
os.makedirs(save_dir_plot)
figure_name = save_dir_plot + '/'+ data_files + '.png'
xlabel = 'Spatial Pixel Indices (#)'
#plot_row_avg(column_average, title3, figure_name, color[i], xlabel)
save_plot = 'plot_row_average'
save_dir_plot = os.path.join(save_dir, quads[i], save_plot)
if not os.path.exists(save_dir_plot):
os.makedirs(save_dir_plot)
figure_name = save_dir_plot + '/'+ data_files + '.png'
xlabel = 'Spectral Pixel Indices (#)'
#plot_row_avg(row_average, title2, figure_name, color[i], xlabel)
#cc
# dframe1 = pd.DataFrame(
# {'Quad_A_rows' : rows_max_A,
# 'Quad_B_rows' : rows_max_B,
# 'Quad_C_rows' : rows_max_C,
# 'Quad_D_rows': rows_max_D,
# })
# dframe2 = pd.DataFrame(
# {'Quad_A_cols' : cols_max_A,
# 'Quad_B_cols' : cols_max_B,
# 'Quad_C_cols' : cols_max_C,
# 'Quad_D_cols': cols_max_D,
# })
ndims, row_s,col_s = np.array(rows_max_A).shape
rows_max_A = np.reshape(np.array(rows_max_A), (ndims*row_s*col_s, 1))
rows_max_B = np.reshape(np.array(rows_max_B), (ndims* row_s*col_s,1 ))
rows_max_C = np.reshape(np.array(rows_max_C), (ndims*row_s*col_s, 1))
rows_max_D = np.reshape(np.array(rows_max_D), (ndims*row_s*col_s, 1))
csv_name_A = save_dir+'/'+temp_files[files]+'_cross_talk_A.csv'
csv_name_B = save_dir+'/'+temp_files[files]+'_cross_talk_B.csv'
csv_name_C = save_dir+'/'+temp_files[files]+'_cross_talk_C.csv'
csv_name_D = save_dir+'/'+temp_files[files]+'_cross_talk_D.csv'
np.savetxt(csv_name_A, np.asarray(rows_max_A), delimiter=',', fmt='%1.2f')
np.savetxt(csv_name_B, np.asarray(rows_max_B), delimiter=',', fmt='%1.2f')
np.savetxt(csv_name_C, np.asarray(rows_max_C), delimiter=',', fmt='%1.2f')
np.savetxt(csv_name_D, np.asarray(rows_max_D), delimiter=',', fmt='%1.2f')
#csv_name_cols = save_dir+'/'+temp_files[files]+'_cols_mean.csv'
#dframe1.to_csv(csv_name_rows, header=True, columns=['Quad_A_rows','Quad_B_rows','Quad_C_rows','Quad_D_rows'])
#dframe2.to_csv(csv_name_cols, header=True, columns=['Quad_A_cols','Quad_B_cols','Quad_C_cols','Quad_D_cols'])
#cc
if __name__ == "__main__":
main()
|
[
"nischal.mishra@gmail.com"
] |
nischal.mishra@gmail.com
|
a58b76fab4d8fa60abf11ac71cab242a7beccad6
|
c5a1c95e9d8ce937f71caf8340cf11fe98e64f56
|
/day9/problem5/[이재형] 하샤드 수.py
|
ff36e229a9d9bb46a7cac28263c8e782cc36fcf6
|
[] |
no_license
|
Boot-Camp-Coding-Test/Programmers
|
963e5ceeaa331d99fbc7465f7b129bd68e96eae3
|
83a4b62ba2268a47859a6ce88ae1819bc96dcd85
|
refs/heads/main
| 2023-05-23T08:21:57.398594
| 2021-06-12T16:39:21
| 2021-06-12T16:39:21
| 366,589,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
def solution(x):
a = []
for i in range(len(str(x))):
a.append(int(str(x)[i]))
if x % sum(a) == 0:
return True
else :
return False
|
[
"noreply@github.com"
] |
Boot-Camp-Coding-Test.noreply@github.com
|
1e1a220013ea65a97547f55b52bf0e6e8ba7ee32
|
4b742f57981b3db902e7048fe05faf263ff52138
|
/base/migrations/0010_pgpkey_passphrase.py
|
174c1f9c2f96097e66f55808d6348a2d55d10933
|
[
"MIT"
] |
permissive
|
erichuang2015/Hiren-MailBox
|
eace0c90b5815f3e4a660dfda75910256704db96
|
ff4cad0998007e8c9a2a200af3a2e05a3d947d12
|
refs/heads/master
| 2020-04-02T01:31:55.680288
| 2018-09-13T15:21:46
| 2018-09-13T15:21:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
# Generated by Django 2.0.4 on 2018-05-22 04:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0009_auto_20180504_0501'),
]
operations = [
migrations.AddField(
model_name='pgpkey',
name='passphrase',
field=models.TextField(default=''),
preserve_default=False,
),
]
|
[
"git.pyprism@gmail.com"
] |
git.pyprism@gmail.com
|
f8b32217c9daae58faab52a87b96758125de8793
|
4fe52c6f01afb05ac787a361a239466ceac69964
|
/pyjournal2/build_util.py
|
9acc2f6977346f32e542ec3806689de1074d6201
|
[
"BSD-3-Clause"
] |
permissive
|
cmsquared/pyjournal2
|
85beec6e3a0423d0ee873d189c3a879dd9a7db7c
|
cfa67529033c5fd7bcd5c60b87c8122ef8c22425
|
refs/heads/master
| 2020-04-03T18:30:15.119923
| 2018-10-31T00:41:07
| 2018-10-31T00:41:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,934
|
py
|
"""This module controls building the journal from the entry sources"""
import os
import webbrowser
import pyjournal2.shell_util as shell_util
def get_source_dir(defs):
"""return the directory where we put the sources"""
return "{}/journal-{}/source/".format(defs["working_path"], defs["nickname"])
def get_topics(defs):
"""return a list of the currently known topics"""
source_dir = get_source_dir(defs)
topics = []
# get the list of directories in source/ -- these are the topics
for d in os.listdir(source_dir):
if os.path.isdir(os.path.join(source_dir, d)) and not d.startswith("_"):
topics.append(d)
return topics
def create_topic(topic, defs):
"""create a new topic directory"""
source_dir = get_source_dir(defs)
try:
os.mkdir(os.path.join(source_dir, topic))
except:
sys.error("unable to create a new topic")
def build(defs, show=0):
"""build the journal. This entails writing the TOC files that link to
the individual entries and then running the Sphinx make command
"""
source_dir = get_source_dir(defs)
topics = get_topics(defs)
# for each topic, we want to create a "topic.rst" that then has
# things subdivided by year-month, and that a
# "topic-year-month.rst" that includes the individual entries
for topic in topics:
tdir = os.path.join(source_dir, topic)
os.chdir(tdir)
# look over the directories here, they will be in the form YYYY-MM-DD
years = []
entries = []
for d in os.listdir(tdir):
if os.path.isdir(os.path.join(tdir, d)):
y, _, _ = d.split("-")
if y not in years:
years.append(y)
entries.append(d)
years.sort()
entries.sort()
# we need to create ReST files of the form YYYY.rst. These
# will each then contain the links to the entries for that
# year
for y in years:
y_entries = [q for q in entries if q.startswith(y)]
with open("{}.rst".format(y), "w") as yf:
yf.write("****\n")
yf.write("{}\n".format(y))
yf.write("****\n\n")
yf.write(".. toctree::\n")
yf.write(" :maxdepth: 2\n")
yf.write(" :caption: Contents:\n\n")
for entry in y_entries:
yf.write(" {}/{}.rst\n".format(entry, entry))
# now write the topic.rst
with open("{}.rst".format(topic), "w") as tf:
tf.write(len(topic)*"*" + "\n")
tf.write("{}\n".format(topic))
tf.write(len(topic)*"*" + "\n")
tf.write(".. toctree::\n")
tf.write(" :maxdepth: 2\n")
tf.write(" :caption: Contents:\n\n")
for y in years:
tf.write(" {}.rst\n".format(y))
# now write the index.rst
os.chdir(source_dir)
with open("index.rst", "w") as mf:
mf.write("Research Journal\n")
mf.write("================\n\n")
mf.write(".. toctree::\n")
mf.write(" :maxdepth: 2\n")
mf.write(" :caption: Contents:\n\n")
for topic in sorted(topics):
mf.write(" {}/{}\n".format(topic, topic))
mf.write("\n")
mf.write("Indices and tables\n")
mf.write("==================\n\n")
mf.write("* :ref:`genindex`\n")
mf.write("* :ref:`modindex`\n")
mf.write("* :ref:`search`\n")
# now do the building
build_dir = "{}/journal-{}/".format(defs["working_path"], defs["nickname"])
os.chdir(build_dir)
_, _, rc = shell_util.run("make html")
if rc != 0:
print("build may have been unsuccessful")
index = os.path.join(build_dir, "build/html/index.html")
# use webbrowser module
if show == 1:
webbrowser.open_new_tab(index)
|
[
"michael.zingale@stonybrook.edu"
] |
michael.zingale@stonybrook.edu
|
30a77a5b2a326c40c06e455066908091bac0870a
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_113/ch44_2020_09_30_10_47_17_987015.py
|
f078ba9a9524ee5b166c21af69a4c0e35a23748f
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
#x=True
lista = ['Janeiro','Fevereiro','Março','Abril','Maio','Junho','Julho','Agosto','Setembro','Outubro','Novembro','Dezembro']
#while x==True:
mes = input('Qual o mês? ')
print(lista[mes])
|
[
"you@example.com"
] |
you@example.com
|
224efd07081d700cef2f4bff2f9f658dcccc15e2
|
256efb0e9ff8b7420b412c260e6c05cd7c52c5ce
|
/B/resolve.py
|
5e0f2bb0fd1bde1c3ffc1f155dfc45171749a311
|
[
"MIT"
] |
permissive
|
staguchi0703/ABC176
|
37a85f6d83570967696712a98dd39e1f1a08b04b
|
16f2f188ef5c73f85d08b028f14cd963b33d55af
|
refs/heads/master
| 2022-12-07T18:15:02.659948
| 2020-08-24T15:00:29
| 2020-08-24T15:00:29
| 289,476,704
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
def resolve():
'''
code here
'''
N = input()
sum_num = 0
for item in N:
sum_num += int(item)
if sum_num % 9 == 0:
print('Yes')
else:
print('No')
|
[
"s.taguchi0703@gmail.com"
] |
s.taguchi0703@gmail.com
|
611a492f714cd96b2ba9c94b3644617e50c8c6ce
|
86294539ffa65b34a862b200c84ee068187dc743
|
/do2things/manage.py
|
78b2063220ba03afa6e0bd0a501b0280f45ed107
|
[
"MIT"
] |
permissive
|
tlake/do2things
|
6acb4f43990b0d0e4a9b80090e21246c1d39398a
|
4e83bea1fc579006200e9ca3a627c1bc04a6a53b
|
refs/heads/master
| 2021-01-21T04:24:57.108087
| 2016-08-22T08:56:11
| 2016-08-22T08:56:11
| 39,576,039
| 0
| 0
| null | 2015-08-27T01:28:15
| 2015-07-23T15:40:53
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 252
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "do2things.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"tanner.lake@gmail.com"
] |
tanner.lake@gmail.com
|
3d2281ceea099e3636a2d5593f4e69d3ab66ddbf
|
c7846ee0828539c2a2019928c1cbf3abd35665bf
|
/1226.py
|
e40445bed21211b32f058a29fb64d1cef368c25a
|
[] |
no_license
|
whiteblue0/sw_problems
|
10476601c8d6d68d42e2f30af87fcde1e5dbbcc5
|
1cefc6236cccc20477bf4eadb458a0fd06b09126
|
refs/heads/master
| 2020-06-20T10:44:57.463275
| 2020-05-03T07:27:57
| 2020-05-03T07:27:57
| 197,098,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 881
|
py
|
import sys
sys.stdin = open('1226.txt')
def ispass(y,x):
if 0<=y<L and 0<=x<L and data[y][x] != 1 and visited[y][x] == 0:
return True
else:
return False
def DFS(sy,sx):
global end
visited[sy][sx] = 1
if data[sy][sx] == 3:
end = 1
for i in range(4):
ny = sy + dy[i]
nx = sx + dx[i]
if ispass(ny, nx):
visited[ny][nx] = 1
DFS(ny,nx)
# 우하좌상
dy = [0,1,0,-1]
dx = [1,0,-1,0]
T = 10
for tc in range(1,T+1):
N = int(input())
L = 16
data = [list(map(int, input())) for _ in range(L)]
visited = [[0]*L for _ in range(L)]
for i in range(L):
for j in range(L):
if data[i][j] == 2:
start = (i,j)
end = 0
DFS(start[0],start[1])
# for i in range(L):
# print(visited[i])
print('#{} {}'.format(tc,end))
|
[
"21port@naver.com"
] |
21port@naver.com
|
0296d247cff0d46ffe781196db159f2dc53ad9a7
|
0dc3e9b70da8ccd056e0a0fab2b1d8f850c3d470
|
/lantern/django/django_celery/src/apps/dealers/models.py
|
bc779127742bd20a2e9942ebdd4779103f3156e4
|
[] |
no_license
|
ArturYefriemov/green_lantern
|
28e7150af7b9d2281a107ad80026828ad77af62a
|
2841b647e1bfae4a7505e91e8a8695d03f35a3a2
|
refs/heads/master
| 2021-03-01T16:54:58.881835
| 2020-11-17T19:42:23
| 2020-11-17T19:42:23
| 245,799,969
| 0
| 0
| null | 2020-07-14T18:51:13
| 2020-03-08T11:13:32
|
Python
|
UTF-8
|
Python
| false
| false
| 735
|
py
|
from django.contrib.auth.models import AbstractUser
from django.db import models
# Create your models here.
class Country(models.Model):
name = models.CharField(max_length=32, unique=True)
class City(models.Model):
name = models.CharField(max_length=32, db_index=True)
country = models.ForeignKey(to='Country', on_delete=models.CASCADE, null=True)
class Address(models.Model):
address1 = models.CharField(max_length=128)
address2 = models.CharField(max_length=128, blank=True)
zip_code = models.PositiveSmallIntegerField()
city = models.ForeignKey(to='City', on_delete=models.CASCADE)
class Dealer(AbstractUser):
address = models.ForeignKey(to='Address', on_delete=models.CASCADE, null=True)
|
[
"odarchenko@ex.ua"
] |
odarchenko@ex.ua
|
0f12e75f326736ce1da7a7a6b1fb5297088bafd5
|
5bfbf31332a5c4750ab57d305f400aa5e20bf6bd
|
/contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_utah_zip.py
|
5c11f0c756f7c389107ebd4a9b7e6f5e7f2270a7
|
[
"Apache-2.0"
] |
permissive
|
alexsherstinsky/great_expectations
|
9d4ae4c06546c5ab2ee0d04fb7840e3515c25677
|
2fc4bb36a5b3791c8ada97c5364531cd7510d4ed
|
refs/heads/develop
| 2023-08-04T13:13:38.978967
| 2023-07-24T18:29:46
| 2023-07-24T18:29:46
| 203,888,556
| 1
| 0
|
Apache-2.0
| 2020-07-27T09:12:21
| 2019-08-22T23:31:19
|
Python
|
UTF-8
|
Python
| false
| false
| 5,481
|
py
|
from typing import Optional
import zipcodes
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.execution_engine import PandasExecutionEngine
from great_expectations.expectations.expectation import ColumnMapExpectation
from great_expectations.expectations.metrics import (
ColumnMapMetricProvider,
column_condition_partial,
)
def is_valid_utah_zip(zip: str):
list_of_dicts_of_utah_zips = zipcodes.filter_by(state="UT")
list_of_utah_zips = [d["zip_code"] for d in list_of_dicts_of_utah_zips]
if len(zip) > 10:
return False
elif type(zip) != str:
return False
elif zip in list_of_utah_zips:
return True
else:
return False
# This class defines a Metric to support your Expectation.
# For most ColumnMapExpectations, the main business logic for calculation will live in this class.
class ColumnValuesToBeValidUtahZip(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_utah_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_utah_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
class ExpectColumnValuesToBeValidUtahZip(ColumnMapExpectation):
"""Expect values in this column to be valid Utah zipcodes.
See https://pypi.org/project/zipcodes/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_utah_zip": ["84001", "84320", "84713", "84791"],
"invalid_utah_zip": ["-10000", "1234", "99999", "25487"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_utah_zip"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_utah_zip"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_utah_zip"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration] = None
) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
configuration = configuration or self.configuration
# # Check other things in configuration.kwargs and raise Exceptions if needed
# try:
# assert (
# ...
# ), "message"
# assert (
# ...
# ), "message"
# except AssertionError as e:
# raise InvalidExpectationConfigurationError(str(e))
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["zipcodes"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidUtahZip().print_diagnostic_checklist()
|
[
"noreply@github.com"
] |
alexsherstinsky.noreply@github.com
|
ec61f2c11c142888f2e43279e15779776f084d75
|
b75b3bb6a2c6dd8b4a5b89718eb83d6451000cd4
|
/hackbright.py
|
715553d9b927069928d6bfc85808ce5824d2e0b2
|
[] |
no_license
|
CodeHotPink/project-tracking-flask
|
22efebeaddf83d2746ba9137f1b478da8c34b1a9
|
bdd58b17034406f28d5ceaa0c834eb0d6ad06be3
|
refs/heads/master
| 2020-04-03T18:46:04.010020
| 2018-10-31T04:02:38
| 2018-10-31T04:02:38
| 155,496,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,034
|
py
|
"""Hackbright Project Tracker.
A front-end for a database that allows users to work with students, class
projects, and the grades students receive in class projects.
"""
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
db = SQLAlchemy()
def connect_to_db(app):
"""Connect the database to our Flask app."""
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///hackbright'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.app = app
db.init_app(app)
def get_student_by_github(github):
"""Given a GitHub account name, print info about the matching student."""
QUERY = """
SELECT first_name, last_name, github
FROM students
WHERE github = :github
"""
db_cursor = db.session.execute(QUERY, {'github': github})
row = db_cursor.fetchone()
print(row)
print("Student: {} {}\nGitHub account: {}".format(row[0], row[1], row[2]))
return row
def make_new_student(first_name, last_name, github):
"""Add a new student and print confirmation.
Given a first name, last name, and GitHub account, add student to the
database and print a confirmation message.
"""
QUERY = """
INSERT INTO students (first_name, last_name, github)
VALUES (:first_name, :last_name, :github)
"""
db.session.execute(QUERY, {'first_name': first_name,
'last_name': last_name,
'github': github,})
db.session.commit()
print(f"Successully added student: {first_name} {last_name}")
def get_project_by_title(title):
"""Given a project title, print information about the project."""
QUERY = """
SELECT title, description, max_grade
FROM projects
WHERE title = :title
"""
db_cursor = db.session.execute(QUERY, {'title': title})
row = db_cursor.fetchone()
print(f"Title: {row[0]}\nDescription: {row[1]}\nMaximum Grade: {row[2]}")
def get_grade_by_github_title(github, title):
"""Print grade student received for a project."""
QUERY = """
SELECT student_github, project_title, grade
FROM grades
WHERE student_github = :github AND project_title = :title
"""
db_cursor = db.session.execute(QUERY, {'github': github,
'title': title})
row = db_cursor.fetchone()
print(f"Github: {row[0]}\nProject Title: {row[1]}\nGrade: {row[2]}")
def assign_grade(github, title, grade):
"""Assign a student a grade on an assignment and print a confirmation."""
QUERY = """
INSERT INTO grades (student_github, project_title, grade)
VALUES (:github, :title, :grade)
"""
db.session.execute(QUERY,{'github': github,
'title': title,
'grade': grade})
db.session.commit()
print(f"Successfully added {github}'s grade for {title}")
def add_project(title, description, max_grade):
"""Creates new project in projects table in Hackbright database. Will print confirmation."""
QUERY = """
INSERT INTO projects (title, description, max_grade)
VALUES (:title,:description, :max_grade)
"""
db.session.execute(QUERY,{'title': title,
'description': description,
'max_grade': max_grade})
print(f"Successfully added {title}.")
def handle_input():
"""Main loop.
Repeatedly prompt for commands, performing them, until 'quit' is received
as a command.
"""
command = None
while command != "quit":
input_string = input("HBA Database> ")
tokens = input_string.split()
command = tokens[0]
args = tokens[1:]
print(args)
if command == "student":
github = args[0]
get_student_by_github(github)
elif command == "new_student":
first_name, last_name, github = args # unpack!
make_new_student(first_name, last_name, github)
elif command == "project":
title = args[0]
get_project_by_title(title)
elif command == "github_grade":
github, title = args # unpack!
get_grade_by_github_title(github, title)
elif command == "assign_grade":
github, title, grade = args # unpack!
assign_grade(github, title, grade)
elif command == "add_project":
title = args[0]
project_desc = args[1:-1]
print(type(project_desc))
grade_max = args[-1]
add_project(title, project_desc, grade_max)
else:
if command != "quit":
print("Invalid Entry. Try again.")
if __name__ == "__main__":
connect_to_db(app)
handle_input()
# To be tidy, we close our database connection -- though,
# since this is where our program ends, we'd quit anyway.
db.session.close()
|
[
"no-reply@hackbrightacademy.com"
] |
no-reply@hackbrightacademy.com
|
23c2de5fd645c39cbadd4ecdb4a8572487884ba8
|
069c2295076c482afadfe6351da5ae02be8e18e6
|
/tests/urlpatterns/path_same_name_urls.py
|
d7ea5431b1e2e70e97338b78591e99ba67df435e
|
[
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"GPL-1.0-or-later",
"Python-2.0.1",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] |
permissive
|
django/django
|
5eb557f57053631cd4f566f451e43197309dbeeb
|
c74a6fad5475495756a5bdb18b2cab2b68d429bc
|
refs/heads/main
| 2023-09-01T03:43:44.033530
| 2023-08-31T08:27:32
| 2023-08-31T08:27:32
| 4,164,482
| 73,530
| 38,187
|
BSD-3-Clause
| 2023-09-14T20:03:48
| 2012-04-28T02:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,483
|
py
|
from django.urls import path, re_path, register_converter
from . import converters, views
register_converter(converters.DynamicConverter, "to_url_value_error")
urlpatterns = [
# Different number of arguments.
path("number_of_args/0/", views.empty_view, name="number_of_args"),
path("number_of_args/1/<value>/", views.empty_view, name="number_of_args"),
# Different names of the keyword arguments.
path("kwargs_names/a/<a>/", views.empty_view, name="kwargs_names"),
path("kwargs_names/b/<b>/", views.empty_view, name="kwargs_names"),
# Different path converters.
path("converter/path/<path:value>/", views.empty_view, name="converter"),
path("converter/str/<str:value>/", views.empty_view, name="converter"),
path("converter/slug/<slug:value>/", views.empty_view, name="converter"),
path("converter/int/<int:value>/", views.empty_view, name="converter"),
path("converter/uuid/<uuid:value>/", views.empty_view, name="converter"),
# Different regular expressions.
re_path(r"^regex/uppercase/([A-Z]+)/", views.empty_view, name="regex"),
re_path(r"^regex/lowercase/([a-z]+)/", views.empty_view, name="regex"),
# converter.to_url() raises ValueError (no match).
path(
"converter_to_url/int/<value>/",
views.empty_view,
name="converter_to_url",
),
path(
"converter_to_url/tiny_int/<to_url_value_error:value>/",
views.empty_view,
name="converter_to_url",
),
]
|
[
"felisiak.mariusz@gmail.com"
] |
felisiak.mariusz@gmail.com
|
c5673a94f94f72233370c9935ad7b182c58ba065
|
47a98fed42dc2e0b589e3f08ff9342a3d924c7ac
|
/pyblog/XDG_CACHE_HOME/Microsoft/Python Language Server/stubs.v1/epXVRECNA2MadRw60lZ38aCPhTXzoduilN38-UtKc2M=/python.sys.pyi
|
cb99aafec30445a55c1b4a04ac86042755257854
|
[] |
no_license
|
mehedi432/python
|
bd1c592edd622ae435c9f81c0771684048290e0a
|
725236e1b700ef41612ccf4f2aaccdf9bc1586d4
|
refs/heads/master
| 2020-06-06T22:31:44.548167
| 2019-06-28T07:15:27
| 2019-06-28T07:15:27
| 145,439,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 36,814
|
pyi
|
import _io as _mod__io
import builtins as _mod_builtins
import types as _mod_types
def __displayhook__():
'displayhook(object) -> None\n\nPrint an object to sys.stdout and also save it in builtins._\n'
pass
__doc__ = "This module provides access to some objects used or maintained by the\ninterpreter and to functions that interact strongly with the interpreter.\n\nDynamic objects:\n\nargv -- command line arguments; argv[0] is the script pathname if known\npath -- module search path; path[0] is the script directory, else ''\nmodules -- dictionary of loaded modules\n\ndisplayhook -- called to show results in an interactive session\nexcepthook -- called to handle any uncaught exception other than SystemExit\n To customize printing in an interactive session or to install a custom\n top-level exception handler, assign other functions to replace these.\n\nstdin -- standard input file object; used by input()\nstdout -- standard output file object; used by print()\nstderr -- standard error object; used for error messages\n By assigning other file objects (or objects that behave like files)\n to these, it is possible to redirect all of the interpreter's I/O.\n\nlast_type -- type of last uncaught exception\nlast_value -- value of last uncaught exception\nlast_traceback -- traceback of last uncaught exception\n These three are only available in an interactive session after a\n traceback has been printed.\n\nStatic objects:\n\nbuiltin_module_names -- tuple of module names built into this interpreter\ncopyright -- copyright notice pertaining to this interpreter\nexec_prefix -- prefix used to find the machine-specific Python library\nexecutable -- absolute path of the executable binary of the Python interpreter\nfloat_info -- a struct sequence with information about the float implementation.\nfloat_repr_style -- string indicating the style of repr() output for floats\nhash_info -- a struct sequence with information about the hash algorithm.\nhexversion -- version information encoded as a single integer\nimplementation -- Python implementation information.\nint_info -- a struct sequence with information about the int implementation.\nmaxsize -- the largest supported length of containers.\nmaxunicode -- the value of the largest Unicode code point\nplatform -- platform identifier\nprefix -- prefix used to find the Python library\nthread_info -- a struct sequence with information about the thread implementation.\nversion -- the version of this interpreter as a string\nversion_info -- version information as a named tuple\n__stdin__ -- the original stdin; don't touch!\n__stdout__ -- the original stdout; don't touch!\n__stderr__ -- the original stderr; don't touch!\n__displayhook__ -- the original displayhook; don't touch!\n__excepthook__ -- the original excepthook; don't touch!\n\nFunctions:\n\ndisplayhook() -- print an object to the screen, and save it in builtins._\nexcepthook() -- print an exception and its traceback to sys.stderr\nexc_info() -- return thread-safe information about the current exception\nexit() -- exit the interpreter by raising SystemExit\ngetdlopenflags() -- returns flags to be used for dlopen() calls\ngetprofile() -- get the global profiling function\ngetrefcount() -- return the reference count for an object (plus one :-)\ngetrecursionlimit() -- return the max recursion depth for the interpreter\ngetsizeof() -- return the size of an object in bytes\ngettrace() -- get the global debug tracing function\nsetcheckinterval() -- control how often the interpreter checks for events\nsetdlopenflags() -- set the flags to be used for dlopen() calls\nsetprofile() -- set the global profiling function\nsetrecursionlimit() -- set the max recursion depth for the interpreter\nsettrace() -- set the global debug tracing function\n"
def __excepthook__():
'excepthook(exctype, value, traceback) -> None\n\nHandle an exception by displaying it with a traceback on sys.stderr.\n'
pass
def __interactivehook__():
pass
__name__ = 'sys'
__package__ = ''
__stderr__ = _mod__io.TextIOWrapper()
__stdin__ = _mod__io.TextIOWrapper()
__stdout__ = _mod__io.TextIOWrapper()
def _clear_type_cache():
'_clear_type_cache() -> None\nClear the internal type lookup cache.'
pass
def _current_frames():
"_current_frames() -> dictionary\n\nReturn a dictionary mapping each current thread T's thread id to T's\ncurrent stack frame.\n\nThis function should be used for specialized purposes only."
return dict()
def _debugmallocstats():
"_debugmallocstats()\n\nPrint summary info to stderr about the state of\npymalloc's structures.\n\nIn Py_DEBUG mode, also perform some expensive internal consistency\nchecks.\n"
pass
def _getframe(depth=None):
'_getframe([depth]) -> frameobject\n\nReturn a frame object from the call stack. If optional integer depth is\ngiven, return the frame object that many calls below the top of the stack.\nIf that is deeper than the call stack, ValueError is raised. The default\nfor depth is zero, returning the frame at the top of the call stack.\n\nThis function should be used for internal and specialized\npurposes only.'
pass
_git = _mod_builtins.tuple()
_home = '/usr/bin'
_xoptions = _mod_builtins.dict()
abiflags = 'm'
api_version = 1013
argv = _mod_builtins.list()
base_exec_prefix = '/usr'
base_prefix = '/usr'
builtin_module_names = _mod_builtins.tuple()
byteorder = 'little'
def call_tracing(func, args):
'call_tracing(func, args) -> object\n\nCall func(*args), while tracing is enabled. The tracing state is\nsaved, and restored afterwards. This is intended to be called from\na debugger from a checkpoint, to recursively debug some other code.'
pass
def callstats():
'callstats() -> tuple of integers\n\nReturn a tuple of function call statistics, if CALL_PROFILE was defined\nwhen Python was built. Otherwise, return None.\n\nWhen enabled, this function returns detailed, implementation-specific\ndetails about the number of function calls executed. The return value is\na 11-tuple where the entries in the tuple are counts of:\n0. all function calls\n1. calls to PyFunction_Type objects\n2. PyFunction calls that do not create an argument tuple\n3. PyFunction calls that do not create an argument tuple\n and bypass PyEval_EvalCodeEx()\n4. PyMethod calls\n5. PyMethod calls on bound methods\n6. PyType calls\n7. PyCFunction calls\n8. generator calls\n9. All other calls\n10. Number of stack pops performed by call_function()'
pass
copyright = 'Copyright (c) 2001-2018 Python Software Foundation.\nAll Rights Reserved.\n\nCopyright (c) 2000 BeOpen.com.\nAll Rights Reserved.\n\nCopyright (c) 1995-2001 Corporation for National Research Initiatives.\nAll Rights Reserved.\n\nCopyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam.\nAll Rights Reserved.'
def displayhook(object):
'displayhook(object) -> None\n\nPrint an object to sys.stdout and also save it in builtins._\n'
pass
dont_write_bytecode = True
def exc_info():
'exc_info() -> (type, value, traceback)\n\nReturn information about the most recent exception caught by an except\nclause in the current stack frame or in an older stack frame.'
return tuple()
def excepthook(exctype, value, traceback):
'excepthook(exctype, value, traceback) -> None\n\nHandle an exception by displaying it with a traceback on sys.stderr.\n'
pass
exec_prefix = '/home/mehedi/python/blog/venv'
executable = '/home/mehedi/python/blog/venv/bin/python'
def exit(status=None):
'exit([status])\n\nExit the interpreter by raising SystemExit(status).\nIf the status is omitted or None, it defaults to zero (i.e., success).\nIf the status is an integer, it will be used as the system exit status.\nIf it is another kind of object, it will be printed and the system\nexit status will be one (i.e., failure).'
pass
class flags(_mod_builtins.tuple):
'sys.flags\n\nFlags provided through command line arguments or environment vars.'
@staticmethod
def __add__(self, value):
'Return self+value.'
return __T__()
__class__ = flags
@staticmethod
def __contains__(self, key):
'Return key in self.'
return False
@staticmethod
def __delattr__(self, name):
'Implement delattr(self, name).'
return None
@staticmethod
def __dir__(self):
'__dir__() -> list\ndefault dir() implementation'
return ['']
@staticmethod
def __eq__(self, value):
'Return self==value.'
return False
@staticmethod
def __format__(self, format_spec):
'default object formatter'
return ''
@staticmethod
def __ge__(self, value):
'Return self>=value.'
return False
@staticmethod
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
@staticmethod
def __getitem__(self, key):
'Return self[key].'
pass
@staticmethod
def __getnewargs__(self):
return ()
@staticmethod
def __gt__(self, value):
'Return self>value.'
return False
@staticmethod
def __hash__(self):
'Return hash(self).'
return 0
@staticmethod
def __init__(self, *args, **kwargs):
'sys.flags\n\nFlags provided through command line arguments or environment vars.'
pass
@staticmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@staticmethod
def __iter__(self):
'Implement iter(self).'
return __T__()
@staticmethod
def __le__(self, value):
'Return self<=value.'
return False
@staticmethod
def __len__(self):
'Return len(self).'
return 0
@staticmethod
def __lt__(self, value):
'Return self<value.'
return False
@staticmethod
def __mul__(self, value):
'Return self*value.'
return __T__()
@staticmethod
def __ne__(self, value):
'Return self!=value.'
return False
@staticmethod
def __reduce__(self):
return ''; return ()
@staticmethod
def __reduce_ex__(self, protocol):
'helper for pickle'
return ''; return ()
@staticmethod
def __repr__(self):
'Return repr(self).'
return ''
@staticmethod
def __rmul__(self, value):
'Return value*self.'
return __T__()
@staticmethod
def __setattr__(self, name, value):
'Implement setattr(self, name, value).'
return None
@staticmethod
def __sizeof__(self):
'__sizeof__() -> int\nsize of object in memory, in bytes'
return 0
@staticmethod
def __str__(self):
'Return str(self).'
return ''
@staticmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
bytes_warning = 0
@staticmethod
def count():
'T.count(value) -> integer -- return number of occurrences of value'
return 1
debug = 0
dont_write_bytecode = 1
hash_randomization = 1
ignore_environment = 1
@staticmethod
def index():
'T.index(value, [start, [stop]]) -> integer -- return first index of value.\nRaises ValueError if the value is not present.'
return 1
inspect = 0
interactive = 0
isolated = 0
n_fields = 13
n_sequence_fields = 13
n_unnamed_fields = 0
no_site = 0
no_user_site = 0
optimize = 0
quiet = 0
verbose = 0
class __float_info(_mod_builtins.tuple):
"sys.float_info\n\nA structseq holding information about the float type. It contains low level\ninformation about the precision and internal representation. Please study\nyour system's :file:`float.h` for more information."
def __add__(self, value):
'Return self+value.'
return __float_info()
__class__ = __float_info
def __contains__(self, key):
'Return key in self.'
return False
def __delattr__(self, name):
'Implement delattr(self, name).'
return None
@classmethod
def __dir__(self):
'__dir__() -> list\ndefault dir() implementation'
return ['']
def __eq__(self, value):
'Return self==value.'
return False
@classmethod
def __format__(self, format_spec):
'default object formatter'
return ''
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __getitem__(self, key):
'Return self[key].'
pass
@classmethod
def __getnewargs__(self):
return ()
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
def __init__(self, *args, **kwargs):
"sys.float_info\n\nA structseq holding information about the float type. It contains low level\ninformation about the precision and internal representation. Please study\nyour system's :file:`float.h` for more information."
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return __float_info()
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __mul__(self, value):
'Return self*value.'
return __float_info()
def __ne__(self, value):
'Return self!=value.'
return False
@classmethod
def __reduce__(self):
return ''; return ()
@classmethod
def __reduce_ex__(self, protocol):
'helper for pickle'
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
def __rmul__(self, value):
'Return value*self.'
return __float_info()
def __setattr__(self, name, value):
'Implement setattr(self, name, value).'
return None
@classmethod
def __sizeof__(self):
'__sizeof__() -> int\nsize of object in memory, in bytes'
return 0
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@classmethod
def count(cls):
'T.count(value) -> integer -- return number of occurrences of value'
return 1
dig = 15
epsilon = 2.220446049250313e-16
@classmethod
def index(cls):
'T.index(value, [start, [stop]]) -> integer -- return first index of value.\nRaises ValueError if the value is not present.'
return 1
mant_dig = 53
max = 1.7976931348623157e+308
max_10_exp = 308
max_exp = 1024
min = 2.2250738585072014e-308
min_10_exp = -307
min_exp = -1021
n_fields = 11
n_sequence_fields = 11
n_unnamed_fields = 0
radix = 2
rounds = 1
float_repr_style = 'short'
def get_asyncgen_hooks():
'get_asyncgen_hooks()\n\nReturn a namedtuple of installed asynchronous generators hooks (firstiter, finalizer).'
pass
def get_coroutine_wrapper():
'get_coroutine_wrapper()\n\nReturn the wrapper for coroutine objects set by sys.set_coroutine_wrapper.'
pass
def getallocatedblocks():
'getallocatedblocks() -> integer\n\nReturn the number of memory blocks currently allocated, regardless of their\nsize.'
return 1
def getcheckinterval():
'getcheckinterval() -> current check interval; see setcheckinterval().'
pass
def getdefaultencoding():
'getdefaultencoding() -> string\n\nReturn the current default string encoding used by the Unicode \nimplementation.'
return ''
def getdlopenflags():
'getdlopenflags() -> int\n\nReturn the current value of the flags that are used for dlopen calls.\nThe flag constants are defined in the os module.'
return 1
def getfilesystemencodeerrors():
'getfilesystemencodeerrors() -> string\n\nReturn the error mode used to convert Unicode filenames in\noperating system filenames.'
return ''
def getfilesystemencoding():
'getfilesystemencoding() -> string\n\nReturn the encoding used to convert Unicode filenames in\noperating system filenames.'
return ''
def getprofile():
'getprofile()\n\nReturn the profiling function set with sys.setprofile.\nSee the profiler chapter in the library manual.'
pass
def getrecursionlimit():
'getrecursionlimit()\n\nReturn the current value of the recursion limit, the maximum depth\nof the Python interpreter stack. This limit prevents infinite\nrecursion from causing an overflow of the C stack and crashing Python.'
pass
def getrefcount(object):
'getrefcount(object) -> integer\n\nReturn the reference count of object. The count returned is generally\none higher than you might expect, because it includes the (temporary)\nreference as an argument to getrefcount().'
return 1
def getsizeof(object, default):
'getsizeof(object, default) -> int\n\nReturn the size of object in bytes.'
return 1
def getswitchinterval():
'getswitchinterval() -> current thread switch interval; see setswitchinterval().'
pass
def gettrace():
'gettrace()\n\nReturn the global debug tracing function set with sys.settrace.\nSee the debugger chapter in the library manual.'
pass
class __hash_info(_mod_builtins.tuple):
'hash_info\n\nA struct sequence providing parameters used for computing\nhashes. The attributes are read only.'
def __add__(self, value):
'Return self+value.'
return __hash_info()
__class__ = __hash_info
def __contains__(self, key):
'Return key in self.'
return False
def __delattr__(self, name):
'Implement delattr(self, name).'
return None
@classmethod
def __dir__(self):
'__dir__() -> list\ndefault dir() implementation'
return ['']
def __eq__(self, value):
'Return self==value.'
return False
@classmethod
def __format__(self, format_spec):
'default object formatter'
return ''
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __getitem__(self, key):
'Return self[key].'
pass
@classmethod
def __getnewargs__(self):
return ()
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
def __init__(self, *args, **kwargs):
'hash_info\n\nA struct sequence providing parameters used for computing\nhashes. The attributes are read only.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return __hash_info()
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __mul__(self, value):
'Return self*value.'
return __hash_info()
def __ne__(self, value):
'Return self!=value.'
return False
@classmethod
def __reduce__(self):
return ''; return ()
@classmethod
def __reduce_ex__(self, protocol):
'helper for pickle'
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
def __rmul__(self, value):
'Return value*self.'
return __hash_info()
def __setattr__(self, name, value):
'Implement setattr(self, name, value).'
return None
@classmethod
def __sizeof__(self):
'__sizeof__() -> int\nsize of object in memory, in bytes'
return 0
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
algorithm = 'siphash24'
@classmethod
def count(cls):
'T.count(value) -> integer -- return number of occurrences of value'
return 1
cutoff = 0
hash_bits = 64
imag = 1000003
@classmethod
def index(cls):
'T.index(value, [start, [stop]]) -> integer -- return first index of value.\nRaises ValueError if the value is not present.'
return 1
inf = 314159
modulus = 2305843009213693951
n_fields = 9
n_sequence_fields = 9
n_unnamed_fields = 0
nan = 0
seed_bits = 128
width = 64
hexversion = 50727152
implementation = _mod_types.SimpleNamespace()
class __int_info(_mod_builtins.tuple):
"sys.int_info\n\nA struct sequence that holds information about Python's\ninternal representation of integers. The attributes are read only."
def __add__(self, value):
'Return self+value.'
return __int_info()
__class__ = __int_info
def __contains__(self, key):
'Return key in self.'
return False
def __delattr__(self, name):
'Implement delattr(self, name).'
return None
@classmethod
def __dir__(self):
'__dir__() -> list\ndefault dir() implementation'
return ['']
def __eq__(self, value):
'Return self==value.'
return False
@classmethod
def __format__(self, format_spec):
'default object formatter'
return ''
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __getitem__(self, key):
'Return self[key].'
pass
@classmethod
def __getnewargs__(self):
return ()
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
def __init__(self, *args, **kwargs):
"sys.int_info\n\nA struct sequence that holds information about Python's\ninternal representation of integers. The attributes are read only."
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return __int_info()
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __mul__(self, value):
'Return self*value.'
return __int_info()
def __ne__(self, value):
'Return self!=value.'
return False
@classmethod
def __reduce__(self):
return ''; return ()
@classmethod
def __reduce_ex__(self, protocol):
'helper for pickle'
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
def __rmul__(self, value):
'Return value*self.'
return __int_info()
def __setattr__(self, name, value):
'Implement setattr(self, name, value).'
return None
@classmethod
def __sizeof__(self):
'__sizeof__() -> int\nsize of object in memory, in bytes'
return 0
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
bits_per_digit = 30
@classmethod
def count(cls):
'T.count(value) -> integer -- return number of occurrences of value'
return 1
@classmethod
def index(cls):
'T.index(value, [start, [stop]]) -> integer -- return first index of value.\nRaises ValueError if the value is not present.'
return 1
n_fields = 2
n_sequence_fields = 2
n_unnamed_fields = 0
sizeof_digit = 4
def intern(string):
"intern(string) -> string\n\n``Intern'' the given string. This enters the string in the (global)\ntable of interned strings whose purpose is to speed up dictionary lookups.\nReturn the string itself or the previously interned string object with the\nsame value."
return ''
def is_finalizing():
'is_finalizing()\nReturn True if Python is exiting.'
pass
maxsize = 9223372036854775807
maxunicode = 1114111
meta_path = _mod_builtins.list()
modules = _mod_builtins.dict()
path = _mod_builtins.list()
path_hooks = _mod_builtins.list()
path_importer_cache = _mod_builtins.dict()
platform = 'linux'
prefix = '/home/mehedi/python/blog/venv'
def set_asyncgen_hooks(*, firstiter=None, finalizer=None):
'set_asyncgen_hooks(*, firstiter=None, finalizer=None)\n\nSet a finalizer for async generators objects.'
pass
def set_coroutine_wrapper(wrapper):
'set_coroutine_wrapper(wrapper)\n\nSet a wrapper for coroutine objects.'
pass
def setcheckinterval(n):
'setcheckinterval(n)\n\nTell the Python interpreter to check for asynchronous events every\nn instructions. This also affects how often thread switches occur.'
pass
def setdlopenflags(n):
'setdlopenflags(n) -> None\n\nSet the flags used by the interpreter for dlopen calls, such as when the\ninterpreter loads extension modules. Among other things, this will enable\na lazy resolving of symbols when importing a module, if called as\nsys.setdlopenflags(0). To share symbols across extension modules, call as\nsys.setdlopenflags(os.RTLD_GLOBAL). Symbolic names for the flag modules\ncan be found in the os module (RTLD_xxx constants, e.g. os.RTLD_LAZY).'
pass
def setprofile(function):
'setprofile(function)\n\nSet the profiling function. It will be called on each function call\nand return. See the profiler chapter in the library manual.'
pass
def setrecursionlimit(n):
'setrecursionlimit(n)\n\nSet the maximum depth of the Python interpreter stack to n. This\nlimit prevents infinite recursion from causing an overflow of the C\nstack and crashing Python. The highest possible limit is platform-\ndependent.'
pass
def setswitchinterval(n):
'setswitchinterval(n)\n\nSet the ideal thread switching delay inside the Python interpreter\nThe actual frequency of switching threads can be lower if the\ninterpreter executes long sequences of uninterruptible code\n(this is implementation-specific and workload-dependent).\n\nThe parameter must represent the desired switching delay in seconds\nA typical value is 0.005 (5 milliseconds).'
pass
def settrace(function):
'settrace(function)\n\nSet the global debug tracing function. It will be called on each\nfunction call. See the debugger chapter in the library manual.'
pass
stderr = _mod__io.TextIOWrapper()
stdin = _mod__io.TextIOWrapper()
stdout = _mod__io.TextIOWrapper()
class __thread_info(_mod_builtins.tuple):
'sys.thread_info\n\nA struct sequence holding information about the thread implementation.'
def __add__(self, value):
'Return self+value.'
return __thread_info()
__class__ = __thread_info
def __contains__(self, key):
'Return key in self.'
return False
def __delattr__(self, name):
'Implement delattr(self, name).'
return None
@classmethod
def __dir__(self):
'__dir__() -> list\ndefault dir() implementation'
return ['']
def __eq__(self, value):
'Return self==value.'
return False
@classmethod
def __format__(self, format_spec):
'default object formatter'
return ''
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __getitem__(self, key):
'Return self[key].'
pass
@classmethod
def __getnewargs__(self):
return ()
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
def __init__(self, *args, **kwargs):
'sys.thread_info\n\nA struct sequence holding information about the thread implementation.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return __thread_info()
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __mul__(self, value):
'Return self*value.'
return __thread_info()
def __ne__(self, value):
'Return self!=value.'
return False
@classmethod
def __reduce__(self):
return ''; return ()
@classmethod
def __reduce_ex__(self, protocol):
'helper for pickle'
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
def __rmul__(self, value):
'Return value*self.'
return __thread_info()
def __setattr__(self, name, value):
'Implement setattr(self, name, value).'
return None
@classmethod
def __sizeof__(self):
'__sizeof__() -> int\nsize of object in memory, in bytes'
return 0
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@classmethod
def count(cls):
'T.count(value) -> integer -- return number of occurrences of value'
return 1
@classmethod
def index(cls):
'T.index(value, [start, [stop]]) -> integer -- return first index of value.\nRaises ValueError if the value is not present.'
return 1
lock = 'semaphore'
n_fields = 3
n_sequence_fields = 3
n_unnamed_fields = 0
name = 'pthread'
version = 'NPTL 2.28'
version = '3.6.8 (default, Apr 9 2019, 04:59:38) \n[GCC 8.3.0]'
class __version_info(_mod_builtins.tuple):
'sys.version_info\n\nVersion information as a named tuple.'
def __add__(self, value):
'Return self+value.'
return __version_info()
__class__ = __version_info
def __contains__(self, key):
'Return key in self.'
return False
def __delattr__(self, name):
'Implement delattr(self, name).'
return None
@classmethod
def __dir__(self):
'__dir__() -> list\ndefault dir() implementation'
return ['']
def __eq__(self, value):
'Return self==value.'
return False
@classmethod
def __format__(self, format_spec):
'default object formatter'
return ''
def __ge__(self, value):
'Return self>=value.'
return False
def __getattribute__(self, name):
'Return getattr(self, name).'
pass
def __getitem__(self, key):
'Return self[key].'
pass
@classmethod
def __getnewargs__(self):
return ()
def __gt__(self, value):
'Return self>value.'
return False
def __hash__(self):
'Return hash(self).'
return 0
def __init__(self, *args, **kwargs):
'sys.version_info\n\nVersion information as a named tuple.'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
def __iter__(self):
'Implement iter(self).'
return __version_info()
def __le__(self, value):
'Return self<=value.'
return False
def __len__(self):
'Return len(self).'
return 0
def __lt__(self, value):
'Return self<value.'
return False
def __mul__(self, value):
'Return self*value.'
return __version_info()
def __ne__(self, value):
'Return self!=value.'
return False
@classmethod
def __reduce__(self):
return ''; return ()
@classmethod
def __reduce_ex__(self, protocol):
'helper for pickle'
return ''; return ()
def __repr__(self):
'Return repr(self).'
return ''
def __rmul__(self, value):
'Return value*self.'
return __version_info()
def __setattr__(self, name, value):
'Implement setattr(self, name, value).'
return None
@classmethod
def __sizeof__(self):
'__sizeof__() -> int\nsize of object in memory, in bytes'
return 0
def __str__(self):
'Return str(self).'
return ''
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
@classmethod
def count(cls):
'T.count(value) -> integer -- return number of occurrences of value'
return 1
@classmethod
def index(cls):
'T.index(value, [start, [stop]]) -> integer -- return first index of value.\nRaises ValueError if the value is not present.'
return 1
major = 3
micro = 8
minor = 6
n_fields = 5
n_sequence_fields = 5
n_unnamed_fields = 0
releaselevel = 'final'
serial = 0
warnoptions = _mod_builtins.list()
float_info = __float_info()
hash_info = __hash_info()
int_info = __int_info()
thread_info = __thread_info()
version_info = __version_info()
|
[
"aamehedi93@gmail.com"
] |
aamehedi93@gmail.com
|
cb4b16dd237ab801af0b21ca00cf08970de29bf8
|
e8c82271070e33bb6b181616a0a518d8f8fc6158
|
/fce/numpy/distutils/tests/f2py_ext/tests/PaxHeader/test_fib2.py
|
a56021af7be6b185d62870db000c6c9d53082297
|
[] |
no_license
|
DataRozhlas/profil-volice-share
|
aafa0a93b26de0773fa6bf2b7d513a5ec856ce38
|
b4424527fe36e0cd613f7bde8033feeecb7e2e94
|
refs/heads/master
| 2020-03-18T01:44:26.136999
| 2018-05-20T12:19:24
| 2018-05-20T12:19:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
17 gid=713727123
15 uid=3629613
20 ctime=1458667064
20 atime=1458667064
23 SCHILY.dev=16777220
23 SCHILY.ino=31296593
18 SCHILY.nlink=1
|
[
"honza@datastory.cz"
] |
honza@datastory.cz
|
00b42fcbfbde767ac076c1bdd0d7fb34c5b3382c
|
67b0379a12a60e9f26232b81047de3470c4a9ff9
|
/comments/models.py
|
27fec916d93089637204f146a37c7c27c5e70df4
|
[] |
no_license
|
vintkor/whitemandarin
|
8ea9022b889fac718e0858873a07c586cf8da729
|
5afcfc5eef1bb1cc2febf519b04a4819a7b9648f
|
refs/heads/master
| 2021-05-06T03:35:09.367375
| 2017-12-20T15:43:08
| 2017-12-20T15:43:08
| 114,904,110
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,969
|
py
|
# -*- coding: utf-8 -*-
from django.db import models
from mptt.models import MPTTModel, TreeForeignKey
from tinymce import models as tinymce_model
import datetime
class Comments(MPTTModel):
prod_name = models.CharField(max_length=250, blank=True, db_index=True, verbose_name="Название")
paket = models.CharField(max_length=250, db_index=True, verbose_name="Пакет")
item_model = models.CharField(max_length=250, db_index=True, verbose_name="Модель")
item_id = models.IntegerField(db_index=True, null=True, verbose_name="id")
published_in_category = models.BooleanField(default=False, verbose_name='Показывать в категории')
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', verbose_name=u"Родитель")
name = models.CharField(max_length=250, verbose_name="Название")
text = tinymce_model.HTMLField(blank=True, verbose_name="Полное описание")
published = models.BooleanField(verbose_name="Опубликован")
date_add = models.DateTimeField(default=datetime.datetime.today ,verbose_name="Дата публикации")
vote = models.DecimalField(max_digits=2, decimal_places=1,db_index=True, null=True, verbose_name="Оценка")
positive = models.IntegerField(null=True, blank=True, default=0, verbose_name="Позитивных")
negative = models.IntegerField(null=True, blank=True, default=0, verbose_name="Негативных")
def save(self):
super(Comments, self).save()
try:
paket = self.paket
item_model = self.item_model
id = self.item_id
count_comments = Comments.objects.filter(paket=paket, item_model=item_model, item_id=int(id), published = True).count()
# assert False, count_comments
exec "from %s.models import %s" % (paket, item_model)
p = eval("%s.objects.get(pk=%d)" % (item_model, int(id)))
p.comments_count = count_comments
min_vote = 5
max_vote = 0
all_reit = 0.0
prod_votes = Comments.objects.filter(paket=paket, item_model=item_model, item_id=int(id), published = True).values('vote')
for item in prod_votes:
if min_vote > item['vote']:
min_vote = item['vote']
if max_vote < item['vote']:
max_vote = item['vote']
all_reit = all_reit + float(item['vote'])
# assert False, min_vote
p.min_reit = min_vote
p.max_reit = max_vote
p.reit = all_reit / count_comments
p.save()
self.prod_name = p.name
except:
pass
super(Comments, self).save()
if not self.date_add:
self.date_add = datetime.datetime.today()
super(Comments, self).save()
def get_name(self):
paket = self.paket
item_model = self.item_model
id = self.item_id
# count_comments = Comments.objects.filter(paket=paket, item_model=item_model, item_id=int(id), published = True).count()
# assert False, count_comments
exec "from %s.models import %s" % (paket, item_model)
p = eval("%s.objects.get(pk=%d)" % (item_model, int(id)))
return p.name
def __unicode__(self):
return self.name
class Meta:
verbose_name_plural = "Коментарии "
verbose_name = "Коментарий"
ordering = ['-id']
class MPTTMeta:
order_insertion_by = ['name']
class Utility(models.Model):
comment = models.ForeignKey(Comments, blank=True, null=True, verbose_name="Коммент")
positive = models.BooleanField(verbose_name="Позитивная оценка")
def __unicode__(self):
return self.comment.name
class Meta:
verbose_name_plural = "Оценки"
verbose_name = "Оценка"
|
[
"alkv84@yandex.ru"
] |
alkv84@yandex.ru
|
372ffb8f05abddeea2704b81e3dfd8ba8d5fa88e
|
236332a967f8f02291b58cab7addfeabdfe7b9a2
|
/experiments/tests/testing_2_3.py
|
207477ec55fb3dd07e0cb74d22864d4061c012cc
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ConsumerAffairs/django-experiments
|
2dbf04b7f0e7ebdff6d5e7879afeb26f7fdb5150
|
4f1591c9b40390f7302f3777df231ffe3629f00d
|
refs/heads/master
| 2021-01-20T11:10:30.199586
| 2018-04-20T21:26:18
| 2018-04-20T21:26:18
| 101,666,220
| 0
| 10
|
MIT
| 2018-04-20T21:26:19
| 2017-08-28T16:56:16
|
Python
|
UTF-8
|
Python
| false
| false
| 218
|
py
|
# coding=utf-8
try:
from unittest import mock, skip
except ImportError:
import mock
class DummyLockTests(object):
@classmethod
def new(cls):
test_class = cls
return skip(test_class)
|
[
"fran.hrzenjak@gmail.com"
] |
fran.hrzenjak@gmail.com
|
f6bb5a74f05f10651cae3ee6b1e226e5f896c8de
|
65e0c11d690b32c832b943fb43a4206739ddf733
|
/bsdradius/tags/release20060404_v_0_4_0/bsdradius/Typecast.py
|
436a5e6ae0899419c20edf50298dd09eb597dcaf
|
[
"BSD-3-Clause"
] |
permissive
|
Cloudxtreme/bsdradius
|
b5100062ed75c3201d179e190fd89770d8934aee
|
69dba67e27215dce49875e94a7eedbbdf77bc784
|
refs/heads/master
| 2021-05-28T16:50:14.711056
| 2015-04-30T11:54:17
| 2015-04-30T11:54:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,761
|
py
|
## BSDRadius is released under BSD license.
## Copyright (c) 2006, DATA TECH LABS
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
## * Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions and the following disclaimer in the documentation
## and/or other materials provided with the distribution.
## * Neither the name of the DATA TECH LABS nor the names of its contributors
## may be used to endorse or promote products derived from this software without
## specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
## DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
## ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
## ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Functions and methods for casting types from string to
other types.
Contains functions and class for inheritance in other classes.
Supported types: 'str', 'string', 'int', 'hex', 'oct', 'dec', 'bool', 'Template'
"""
# HeadURL $HeadURL: file:///Z:/backup/svn/bsdradius/tags/release20060404_v_0_4_0/bsdradius/Typecast.py $
# Author: $Author: valts $
# File version: $Revision: 201 $
# Last changes: $Date: 2006-04-04 17:22:11 +0300 (Ot, 04 Apr 2006) $
# for typecasting to Template
from string import Template
### functions ###
def getstr (input):
return str(input)
def getstring (input):
return getstr(input)
def getint (input):
return int(str(input))
def gethex (input):
return int(str(input), 16)
def getoct (input):
return int(str(input), 8)
def getdec (input):
return int(str(input), 10)
_booleanStates = {'1': True, 'yes': True, 'y': True, 'true': True, 'on': True,
'0': False, 'no': False, 'n': False, 'false': False, 'off': False}
def getbool (input):
inp = str(input)
if inp.lower() not in _booleanStates:
raise ValueError, 'Not a boolean: %s' % inp
return _booleanStates[inp.lower()]
def getTemplate (input):
return Template(str(input))
class Typecast:
"""Use this class as base class in your classes to
add typecasting functionality. This class defines
methods which are wrappers to functions in module
namespace.
You can override attribute "data" in derived classes.
Since self.data is dictionary (with multiple levels) you can
pass any number of keys to typecasting methods. They all use method
_getItem() which searches recursively in self.data for rightmost key value.
"""
_booleanStates = _booleanStates
def __init__(self):
self.data = {}
def _getItem(self, keys):
"""Search recursively for item by given keys
Input: (str) keys Example: t._getItem('key1', 'key2', 'key3')
Output: (mixed) value
"""
if not keys:
raise KeyError, 'No key specified'
tmp = None
for key in keys:
if tmp is None:
tmp = self.data[key]
else:
tmp = tmp[key]
return tmp
def getstr (self, *keys):
return getstr(self._getItem(keys))
def getstring (self, *keys):
return getstring(self._getItem(keys))
def getint (self, *keys):
return getint(self._getItem(keys))
def gethex (self, *keys):
return gethex(self._getItem(keys))
def getoct (self, *keys):
return getoct(self._getItem(keys))
def getdec (self, *keys):
return getdec(self._getItem(keys))
def getbool (self, *keys):
return getbool(self._getItem(keys))
def getTemplate (self, *keys):
return getTemplate(self._getItem(keys))
# holds references to all supported typecast methods
typecastMethods = {
'str' : getstr,
'string' : getstring,
'int' : getint,
'hex' : gethex,
'oct' : getoct,
'dec' : getdec,
'bool' : getbool,
'Template' : getTemplate,
}
# holds references to all supported typecast methods
Typecast.typecastMethods = {
'str' : Typecast.getstr,
'string' : Typecast.getstring,
'int' : Typecast.getint,
'hex' : Typecast.gethex,
'oct' : Typecast.getoct,
'dec' : Typecast.getdec,
'bool' : Typecast.getbool,
'Template' : Typecast.getTemplate,
}
|
[
"valdiic@72071c86-a5be-11dd-a5cd-697bfd0a0cef"
] |
valdiic@72071c86-a5be-11dd-a5cd-697bfd0a0cef
|
77fdcf1dbfc3a529545552210737968c88bf404b
|
ffaeaf54e891c3dcca735347f27f1980f66b7a41
|
/python/1.POP/1.base/01.helloworld.py
|
015e87763fe1f07c2320ce7fe71f056ea13d317c
|
[
"Apache-2.0"
] |
permissive
|
dunitian/BaseCode
|
9804e3d8ff1cb6d4d8cca96978b20d168072e8bf
|
4855ef4c6dd7c95d7239d2048832d8acfe26e084
|
refs/heads/master
| 2020-04-13T09:51:02.465773
| 2018-12-24T13:26:32
| 2018-12-24T13:26:32
| 137,184,193
| 0
| 0
|
Apache-2.0
| 2018-06-13T08:13:38
| 2018-06-13T08:13:38
| null |
UTF-8
|
Python
| false
| false
| 970
|
py
|
'''三个单引号多行注释:
print("Hello World!")
print("Hello World!")
print("Hello World!")'''
"""三个双引号多行注释:
print("Hello World!")
print("Hello World!")
print("Hello World!")"""
# 单行注释 输出
print("Hello World!")
# 定义一个变量并输出
name = "小明"
print(name)
print("x" * 10)
print("dnt.dkill.net/now", end='')
print("带你走进中医经络")
print("dnt.dkill.net/now", end="")
print("带你走进中医经络")
# 如果字符串内部既包含'又包含"怎么办?可以用转义字符\来标识
print("I\'m \"OK\"!")
# 如果字符串里面有很多字符都需要转义,就需要加很多\,为了简化,Python还允许用r''表示''内部的字符串默认不转义
print(r'\\\t\\')
# 如果字符串内部有很多换行,用\n写在一行里不好阅读,为了简化,Python允许用'''...'''的格式表示多行内容
print('''我请你吃饭吧~
晚上吃啥?
去厕所,你说呢?''')
|
[
"39723758+lotapp@users.noreply.github.com"
] |
39723758+lotapp@users.noreply.github.com
|
4e72b71582b2240f32ecf38474428072ef1b7413
|
9e7b9e91b8425061a5ad36e0dd630a799ec79f6f
|
/opencv_cookbook.py
|
49d0d5b8c67cdd2753d61353ec3e42ef9014ce83
|
[] |
no_license
|
OlgaBelitskaya/colab_notebooks
|
c27fad60f7e4ca35287e2561487b5d9d82efde43
|
d568149c8bcfb0025f7b09120ca44f639ac40efe
|
refs/heads/master
| 2023-07-07T23:02:49.289280
| 2021-08-14T08:16:38
| 2021-08-14T08:16:38
| 158,067,383
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,397
|
py
|
# -*- coding: utf-8 -*-
"""opencv_cookbook.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1GD7Oi1LtFaEi8VOjiBM5cj5ayWpaejaf
# OpenCV Cookbook
## Libraries & Color Tools
"""
import warnings; warnings.filterwarnings('ignore')
import urllib,cv2
from skimage import io,transform
import numpy as np,pylab as pl
import seaborn as sb,scipy as sp
fpath='https://olgabelitskaya.github.io/'
pl.style.use('seaborn-whitegrid')
pl.rcParams['figure.figsize']=(7,7)
ColorFlags=[flag for flag in dir(cv2) if flag.startswith('COLOR')]
print (np.array(ColorFlags[:30]))
def get_image(original,flag,fpath=fpath):
input_file=urllib.request.urlopen(fpath+original)
output_file=open(original,'wb');
output_file.write(input_file.read())
output_file.close(); input_file.close()
img=cv2.imread(original)
return cv2.cvtColor(img,flag)
"""## Data"""
plist=[get_image('pattern0%s.jpeg'%(i+1),flag=cv2.COLOR_BGR2RGB)
for i in range(7)]
flower_img=get_image('flower.png',flag=cv2.COLOR_BGR2RGB)
cat_img=get_image('cat.png',flag=cv2.COLOR_BGR2RGB)
img=plist[2]
st='Image parameters: size - %s; shape - %s; type - %s'
print (st%(img.size,img.shape,img.dtype))
pl.imshow(img); pl.show()
"""## Simple Manipulations"""
img_inv=cv2.bitwise_not(img)
pl.imshow(img_inv); pl.show()
img_w2b=img.copy()
img_w2b[np.where((img_w2b==[255,255,255]).all(axis=2))]=[0,0,0]
pl.imshow(img_w2b); pl.show()
pl.rcParams['figure.figsize']=(14,7)
pl.figure(1); pl.subplot(121)
img_gray1=cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
pl.imshow(img_gray1)
pl.subplot(122)
img_gray2=cv2.cvtColor(img_w2b,cv2.COLOR_RGB2GRAY)
pl.imshow(img_gray2); pl.show()
pl.rcParams['figure.figsize']=(8,8); N=50
# skimage & opencv - grayscale, resize, invert
img_skgray=io.imread('pattern03.jpeg',as_gray=True)
img_skgray_resized=transform.resize(img_skgray,(N,N))
img_skgray_resized2=cv2.bitwise_not(img_skgray_resized)
img_cvgray_resized=cv2.resize(img_gray1,(N,N),
interpolation=cv2.INTER_CUBIC)
img_cvgray_resized2=cv2.bitwise_not(img_cvgray_resized)
pl.figure(1); pl.subplot(221)
pl.imshow(img_skgray_resized,cmap=pl.cm.Greys)
pl.subplot(222)
pl.imshow(img_skgray_resized2)
pl.subplot(223)
pl.imshow(img_cvgray_resized,cmap=pl.cm.Greys)
pl.subplot(224)
pl.imshow(img_cvgray_resized); pl.show()
pl.rcParams['figure.figsize']=(8,8)
# split color channels
img0=plist[0]; b,g,r=cv2.split(img0)
# merge channels
img_merged=cv2.merge([b,g,r])
# display one of the channels
pl.figure(1); pl.subplot(231); pl.imshow(r,cmap=pl.cm.Reds_r)
pl.subplot(232); pl.imshow(g,cmap=pl.cm.Greens_r)
pl.subplot(233); pl.imshow(b,cmap=pl.cm.Blues_r)
# display merged image
pl.subplot(234); pl.imshow(img_merged); pl.show()
hsv_img=cv2.cvtColor(img0,cv2.COLOR_RGB2HSV_FULL)
lab_img=cv2.cvtColor(img0,cv2.COLOR_RGB2LAB)
pl.figure(1); pl.subplot(121); pl.imshow(hsv_img)
pl.subplot(122); pl.imshow(lab_img); pl.show()
pl.rcParams['figure.figsize']=(12,4)
# flip images
img_vertical_flipped=cv2.flip(img,0)
img_horizontal_flipped=cv2.flip(img,1)
img_transposed=cv2.transpose(img)
pl.figure(1); pl.subplot(131); pl.imshow(img_vertical_flipped)
pl.subplot(132); pl.imshow(img_horizontal_flipped)
pl.subplot(133); pl.imshow(img_transposed); pl.show()
"""## Advanced Transformations"""
# repeat the fragment
img_twice=img.copy()
img_fragment=img_twice[15:60,15:60]
img_twice[105:105+img_fragment.shape[0],105:105+\
img_fragment.shape[1]]=img_fragment
pl.imshow(img_twice); pl.show()
pl.rcParams['figure.figsize']=(8,4)
# perspective transformation
rows,cols,ch=img.shape
pts1=np.float32([[10,10],[140,40],[10,140],[140,100]])
pts2=np.float32([[0,0],[150,0],[0,150],[150,150]])
m=cv2.getPerspectiveTransform(pts1,pts2)
dst=cv2.warpPerspective(img,m,(150,150))
pl.subplot(121),pl.imshow(img),pl.title('Input')
pl.scatter(pts1[:,0],pts1[:,1],c='g')
pl.subplot(122),pl.imshow(dst),pl.title('Output')
pl.xlim(-5,155); pl.ylim(155,-5)
pl.scatter(pts2[:,0],pts2[:,1],c='g'); pl.show()
pl.rcParams['figure.figsize']=(16,8)
# gradient filters
img2=img0.copy()
img2_gray=cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
laplacian=cv2.Laplacian(img2_gray,cv2.CV_64F,ksize=5)
sobel=cv2.Sobel(img2_gray,cv2.CV_64F,1,0,ksize=3)
scharr=cv2.Scharr(img2_gray,cv2.CV_64F,1,0)
pl.subplot(1,4,1),pl.imshow(img2)
pl.title('Original'),pl.xticks([]),pl.yticks([])
pl.subplot(1,4,2),pl.imshow(laplacian,cmap=pl.cm.bone)
pl.title('Laplacian'),pl.xticks([]),pl.yticks([])
pl.subplot(1,4,3),pl.imshow(sobel,cmap=pl.cm.bone)
pl.title('Sobel'),pl.xticks([]),pl.yticks([])
pl.subplot(1,4,4),pl.imshow(scharr,cmap=pl.cm.bone)
pl.title('Scharr'),pl.xticks([]),pl.yticks([]); pl.show()
pl.rcParams['figure.figsize']=(12,12)
# erosion
kernel=np.ones((3,3),np.uint8)
erosion=cv2.erode(img_gray1,kernel,iterations=1)
pl.subplot(2,2,1),pl.imshow(img_gray1)
pl.title('Original Gray'),pl.xticks([]),pl.yticks([])
pl.subplot(2,2,2),pl.imshow(erosion,cmap=pl.cm.bone)
pl.title('Erosion'),pl.xticks([]),pl.yticks([])
img_gray1_inv=cv2.bitwise_not(img_gray1)
erosion_inv=cv2.erode(img_gray1_inv,kernel,iterations=1)
pl.subplot(2,2,3),pl.imshow(img_gray1_inv)
pl.title('Inverted Gray'),pl.xticks([]),pl.yticks([])
pl.subplot(2,2,4),pl.imshow(erosion_inv,cmap=pl.cm.bone)
pl.title('Erosion for Inverted')
pl.xticks([]),pl.yticks([]); pl.show()
# morphological gradient
gradient=cv2.morphologyEx(img,cv2.MORPH_GRADIENT,kernel)
pl.subplot(1,2,1),pl.imshow(img)
pl.title('Original Gray'),pl.xticks([]),pl.yticks([])
pl.subplot(1,2,2),pl.imshow(gradient,cmap=pl.cm.bone)
pl.title('Morphological Gradient')
pl.xticks([]),pl.yticks([]); pl.show()
"""## Edges' Detection"""
pl.rcParams['figure.figsize']=(12,6)
img_gray0=cv2.cvtColor(img0,cv2.COLOR_RGB2GRAY)
edge_img=img.copy(); edge_img0=img0.copy()
edge=cv2.Canny(img_gray1,90,240)
edge_img[edge!=0]=(0,255,0)
edge0=cv2.Canny(img_gray0,90,240)
edge_img0[edge0!=0]=(0,255,0)
pl.figure(1); pl.subplot(121); pl.imshow(edge_img)
pl.subplot(122); pl.imshow(edge_img0); pl.show()
"""## Key Points"""
pl.rcParams['figure.figsize']=(16,6)
orb_img=flower_img.copy()
orb=cv2.ORB_create()
keypoints=orb.detect(orb_img,None)
keypoints,descriptors=orb.compute(orb_img,keypoints)
cv2.drawKeypoints(orb_img,keypoints,orb_img)
match_img=np.zeros(flower_img.shape,np.uint8)
center_img=flower_img[60:140,90:180]
match_img[60:140,100:180]=[0,0,0]
center_img=cv2.flip(center_img,0)
match_img[100:100+center_img.shape[0],
150:150+center_img.shape[1]]=center_img
pl.figure(1); pl.subplot(121); pl.imshow(orb_img)
pl.subplot(122); pl.imshow(match_img); pl.show()
pl.rcParams['figure.figsize']=(16,6)
match_keypoints=orb.detect(match_img,None)
match_keypoints,match_descriptors=\
orb.compute(match_img,match_keypoints)
brute_force=cv2.BFMatcher(cv2.NORM_HAMMING,crossCheck=True)
matches=brute_force.match(descriptors,match_descriptors)
matches=sorted(matches,key=lambda x:x.distance)
draw_matches=cv2.drawMatches(orb_img,keypoints,
match_img,match_keypoints,
matches[:9],orb_img)
pl.imshow(draw_matches); pl.show()
"""## Object Detection"""
pl.rcParams['figure.figsize']=(8,8)
url='haarcascade_frontalcatface.xml'
input_file=urllib.request.urlopen(fpath+url)
output_file=open(url,'wb')
output_file.write(input_file.read())
output_file.close(); input_file.close()
gray_cat_img=cv2.cvtColor(cat_img,cv2.COLOR_RGB2GRAY)
catface_img=cat_img.copy()
catface_cascade=cv2.CascadeClassifier(url)
catfaces=catface_cascade.detectMultiScale(gray_cat_img,1.095,6)
for (x,y,w,h) in catfaces:
cv2.rectangle(catface_img,(x,y),(x+w,y+h),(0,255,0),3)
pl.figure(1); pl.subplot(121); pl.imshow(cat_img)
pl.subplot(122); pl.imshow(catface_img); pl.show()
pl.rcParams['figure.figsize']=(12,12)
sport_img=get_image('sport.jpg',flag=cv2.COLOR_BGR2RGB)
gray_sport_img=cv2.cvtColor(sport_img,cv2.COLOR_RGB2GRAY)
face_img=sport_img.copy()
url='haarcascade_frontalface_default.xml'
input_file=urllib.request.urlopen(fpath+url)
output_file=open(url,'wb')
output_file.write(input_file.read())
output_file.close(); input_file.close()
face_cascade=cv2.CascadeClassifier(url)
faces=face_cascade.detectMultiScale(gray_sport_img,1.095,4)
for (x,y,w,h) in faces:
cv2.rectangle(face_img,(x,y),(x+w,y+h),(0,255,0),3)
pl.figure(1); pl.subplot(211); pl.imshow(sport_img)
pl.subplot(212); pl.imshow(face_img); pl.show()
|
[
"safuolga@gmail.com"
] |
safuolga@gmail.com
|
88ea9f503fbe4878090275c2480106a7648b48f2
|
f94e4955f9d16b61b7c9bff130b9d9ee43436bea
|
/labs/lab06/lab06.py
|
1c1dac20000f742c21337b538d3d8ac3b9563bc4
|
[] |
no_license
|
j2chu/dsc80-sp19
|
bd1dade66c19b920a54b0f8551fd999185449f86
|
dd48210a7cbadfb6470104b275f34085437e4766
|
refs/heads/master
| 2020-06-01T23:22:32.727488
| 2019-06-07T01:46:11
| 2019-06-07T01:46:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,275
|
py
|
import os
import pandas as pd
import numpy as np
import requests
import bs4
import json
# ---------------------------------------------------------------------
# Question # 1
# ---------------------------------------------------------------------
def question1():
"""
NOTE: You do NOT need to do anything with this function.
The function for this question makes sure you
have a correctly named HTML file in the right
place. Note: This does NOT check if the supplementary files
needed for your page are there!
>>> os.path.exists('lab06_1.html')
True
"""
# Don't change this function body!
# No python required; create the HTML file.
return
# ---------------------------------------------------------------------
# Question # 2
# ---------------------------------------------------------------------
def extract_book_links(text):
"""
:Example:
>>> fp = os.path.join('data', 'products.html')
>>> out = extract_book_links(open(fp, encoding='utf-8').read())
>>> url = 'scarlet-the-lunar-chronicles-2_218/index.html'
>>> out[0] == url
True
"""
return ...
def get_product_info(text):
"""
:Example:
>>> fp = os.path.join('data', 'Frankenstein.html')
>>> out = get_product_info(open(fp, encoding='utf-8').read())
>>> isinstance(out, dict)
True
>>> 'UPC' in out.keys()
True
>>> out['Rating']
'Two'
"""
return ...
def scrape_books(k):
"""
:param k: number of book-listing pages to scrape.
:returns: a dataframe of information on (certain) books
on the k pages (as described in the question).
:Example:
>>> out = scrape_books(1)
>>> out.shape
(1, 10)
>>> out['Rating'][0] == 'Five'
True
>>> out['UPC'][0] == 'ce6396b0f23f6ecc'
True
"""
return ...
# ---------------------------------------------------------------------
# Question 3
# ---------------------------------------------------------------------
def send_requests(apiKey, *args):
"""
:param apiKey: apiKey from newsapi website
:param args: number of languages as strings
:return: a list of dictionaries, where keys correspond to languages
and values correspond to Response objects
>>> responses = send_requests(os.environ['API_KEY'], "ru", "fr")
>>> isinstance(responses[0], dict)
True
>>> isinstance(responses[1], dict)
True
"""
return ...
def gather_info(resp):
"""
Finds some basic information from the obtained responses
:param resp: a list of dictionaries
:return: a list with the following items:
language that has the most number of news
most common base url for every language
>>> responses = send_requests(os.environ['API_KEY'], "ru", "fr")
>>> result = gather_info(responses)
>>> isinstance(result[0], str)
True
>>> len(result) == len(responses) + 1
True
"""
return ...
# ---------------------------------------------------------------------
# Question # 4
# ---------------------------------------------------------------------
def depth(comments):
"""
:Example:
>>> fp = os.path.join('data', 'comments.csv')
>>> comments = pd.read_csv(fp, sep='|')
>>> depth(comments).max() == 5
True
"""
return ...
# ---------------------------------------------------------------------
# DO NOT TOUCH BELOW THIS LINE
# IT'S FOR YOUR OWN BENEFIT!
# ---------------------------------------------------------------------
# Graded functions names! DO NOT CHANGE!
# This dictionary provides your doctests with
# a check that all of the questions being graded
# exist in your code!
GRADED_FUNCTIONS = {
'q01': ['question1'],
'q02': ['extract_book_links', 'get_product_info', 'scrape_books'],
'q03': ['send_requests', 'gather_info'],
'q04': ['depth']
}
def check_for_graded_elements():
"""
>>> check_for_graded_elements()
True
"""
for q, elts in GRADED_FUNCTIONS.items():
for elt in elts:
if elt not in globals():
stmt = "YOU CHANGED A QUESTION THAT SHOULDN'T CHANGE! \
In %s, part %s is missing" % (q, elt)
raise Exception(stmt)
return True
|
[
"aaron.fraenkel@gmail.com"
] |
aaron.fraenkel@gmail.com
|
e9eef0ae487bb90ae983c14902b33bc6d26c7a4f
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/429/usersdata/313/105928/submittedfiles/jogoDaVelha.py
|
6702e638a0b195b5ad2f3fe0c84a675088f19cc5
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,102
|
py
|
print (" BEM-VINDO AO JOGO DA VELHA ")
print("O primeiro jogador será o (X) e o segundo o (O) ")
posicao = """ posicoes do jogo
1 | 2 | 3
-----------
4 | 5 | 6
-----------
7 | 8 | 9
"""
print (posicao)
posicoes = [
(5,7),
(5,5),
(5,3),
(9,7),
(9,5),
(9,3),
(7,7),
(7,5),
(7,3),
]
ganhador = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[1, 4, 7],
[2, 5, 8],
[3, 6, 9],
[1, 5, 9],
[3, 5, 7],
]
jogo = []
for vertical in posicao.splitlines():
jogo.append(list(vertical))
jogador = "X"
jogando = True
jogadas = 0
while True:
if jogadas == 9:
print (" DEU VELHA!")
break
jogada = int(input(" digite a posicao de 1 à 9 (jogador %s): " % jogador ))
if jogada<1 or jogada>9:
print ("posicao fora das posicoes do jogo")
continue
if jogo[posicoes[jogada][0]][posicoes[jogada][1]] != " ":
print ("Essa posicao já foi ocupada")
continue
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
bc95461ad6fdc0ef95c4d671c174b643f840fd99
|
3354e6bdd4aeb2ddec84e6a8036c90cd24b6577a
|
/(구)자료구조와 알고리즘/(구)Quizes/backjoon/back_1002.py
|
b90ab7cd42413d530cb002dd703cd4c028e3c9d1
|
[] |
no_license
|
hchayan/Data-Structure-and-Algorithms
|
1125d7073b099d8c6aae4b14fbdb5e557dcb9412
|
be060447e42235e94f93a0b2f94f84d2fd560ffe
|
refs/heads/master
| 2023-01-05T10:15:02.862700
| 2020-11-04T08:16:56
| 2020-11-04T08:16:56
| 209,513,516
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
import sys
import math
n = int(sys.stdin.readline().rstrip())
def getAns():
x1, y1, r1, x2, y2, r2 = map(int, sys.stdin.readline().rstrip().split())
leng = math.sqrt((x2-x1)**2+(y2-y1)**2)
if r1 == r2 and x1 == x2 and y1 == y2:
return -1
if r1 > r2:
r1, r2 = r2, r1
if leng > r1+r2:
return 0
elif leng == r1+r2:
return 1
else:
if leng+r1 == r2:
return 1
elif leng+r1 < r2:
return 0
return 2
for nn in range(n):
print(getAns())
|
[
"k852012@naver.com"
] |
k852012@naver.com
|
4b8450ca316cbf700f09c76a6c374e71ef1ce7c9
|
62b78ad6d3ec041ad20e4328d1730d815c9e35f1
|
/Twosteps.py
|
8c659c092569737b987696cb0d9e96f753e7dd19
|
[] |
no_license
|
Jamshid93/ForLoops2
|
7fe5b3240370d88a888f2d4609fb6d7787965a4d
|
296de0ccc51872001e45e20926b91a4528abd7a5
|
refs/heads/master
| 2022-01-26T05:22:18.206075
| 2019-07-20T04:12:37
| 2019-07-20T04:12:37
| 197,874,396
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 105
|
py
|
for item in range(3, 14, 2): # this is code result 3,5,...13. Because after 14 writin 2
print(item)
|
[
"hamzayevjf@gmail.com"
] |
hamzayevjf@gmail.com
|
4acf925d2f474e88d0b195933e8e7df31a2aa765
|
9446feb2a94486ac16c585f712dbcbea7d112a9d
|
/src/taskmaster/cli/master.py
|
b78926059cf4a36ee7d184b223ba2326de9179e4
|
[
"Apache-2.0"
] |
permissive
|
jdunck/taskmaster
|
c16c879a546dd2ac383f804788e2d8ae2606abd1
|
04a03bf0853facf318ce98192db6389cdaaefe3c
|
refs/heads/master
| 2023-08-23T19:29:22.605052
| 2012-05-16T00:52:24
| 2012-05-16T00:52:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 958
|
py
|
"""
taskmaster.cli.master
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
def run(target, reset=False, size=10000, address='tcp://0.0.0.0:3050'):
from taskmaster.server import Server, Controller
server = Server(address, size=size)
controller = Controller(server, target)
if reset:
controller.reset()
controller.start()
def main():
import optparse
import sys
parser = optparse.OptionParser()
parser.add_option("--address", dest="address", default='tcp://127.0.0.1:3050')
parser.add_option("--size", dest="size", default='10000', type=int)
parser.add_option("--reset", dest="reset", default=False, action='store_true')
(options, args) = parser.parse_args()
if len(args) != 1:
print 'Usage: tm-master <callback>'
sys.exit(1)
sys.exit(run(args[0], **options.__dict__))
if __name__ == '__main__':
main()
|
[
"dcramer@gmail.com"
] |
dcramer@gmail.com
|
8f4026d972f244c0391ff8f24625e881b3fc284a
|
1bf7f5fdfc5e7dbbc5ba14698ff488aa76b34d58
|
/virtual/bin/gunicorn_paster
|
3977588403c354347f27e635608040cf5dca8a00
|
[
"MIT"
] |
permissive
|
Brian23-eng/News-Highlight
|
ca86fab23ebfc429b31624c36ac7c3520d46966d
|
19c8816cbcf2980a381d01788ba604cc85c8ebaa
|
refs/heads/master
| 2021-06-25T02:05:01.848783
| 2019-10-17T08:09:19
| 2019-10-17T08:09:19
| 214,452,153
| 0
| 0
|
MIT
| 2021-03-20T01:54:54
| 2019-10-11T14:06:20
|
Python
|
UTF-8
|
Python
| false
| false
| 282
|
#!/home/brian/Documents/Core/Python/Flask/News-Highlight/virtual/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.pasterapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"b.odhiambo.bo@gmail.com"
] |
b.odhiambo.bo@gmail.com
|
|
92467aabe2d3e0851ea17a982715577fa57c6fde
|
4aa6b7c3a5ae3817007e09ad1289c1e9f7a355c0
|
/dynamic_programming/best-time-to-buy-and-sell-stock-iv.py
|
57dc30b64f80d1c90423152d8d4b8f9a47789989
|
[] |
no_license
|
liuhuipy/Algorithm-python
|
8f5143e06cf5fa2de2c178e3ba9e5fd12b9bcdf7
|
4e92a0b874f956d1df84d1493f870a5d1f06cde2
|
refs/heads/master
| 2021-06-03T04:19:01.946149
| 2021-01-08T07:44:40
| 2021-01-08T07:44:40
| 99,838,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,937
|
py
|
"""
买卖股票的最佳时机IV:
给定一个数组,它的第 i 个元素是一支给定的股票在第 i 天的价格。
设计一个算法来计算你所能获取的最大利润。你最多可以完成 k 笔交易。
注意: 你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。
示例 1:
输入: [2,4,1], k = 2
输出: 2
解释: 在第 1 天 (股票价格 = 2) 的时候买入,在第 2 天 (股票价格 = 4) 的时候卖出,这笔交易所能获得利润 = 4-2 = 2 。
示例 2:
输入: [3,2,6,5,0,3], k = 2
输出: 7
解释: 在第 2 天 (股票价格 = 2) 的时候买入,在第 3 天 (股票价格 = 6) 的时候卖出, 这笔交易所能获得利润 = 6-2 = 4 。
随后,在第 5 天 (股票价格 = 0) 的时候买入,在第 6 天 (股票价格 = 3) 的时候卖出, 这笔交易所能获得利润 = 3-0 = 3 。
"""
from typing import List
class Solution:
def maxProfit(self, k: int, prices: List[int]) -> int:
if not prices:
return 0
len_prices = len(prices)
if k >= len_prices / 2:
res = 0
for i in range(1, len_prices):
if prices[i] > prices[i - 1]:
res += prices[i] - prices[i - 1]
return res
dp = [[[0 for _ in range(k + 1)], [0 for _ in range(k + 1)]] for _ in range(len_prices)]
for i in range(k + 1):
dp[0][0][i] = -prices[0]
for i in range(1, len_prices):
dp[i][0][0] = max(-prices[i], dp[i - 1][0][0])
for j in range(1, k + 1):
dp[i][0][j] = max(dp[i - 1][1][j] - prices[i], dp[i - 1][0][j])
dp[i][1][j] = max(dp[i - 1][0][j - 1] + prices[i], dp[i - 1][1][j])
print(dp)
return max(dp[len_prices - 1][1])
if __name__ == '__main__':
print(Solution().maxProfit(2, [2,1,4,5,2,9,7]))
|
[
"liuhui_py@163.com"
] |
liuhui_py@163.com
|
001acef57576b87eb38040f53889537d452e2f72
|
552865ae5daa143bc6a7dec46f7febe49f0a7226
|
/src/mr/cabot/kml.py
|
96d3de4531e1a03cd61c963cb5568f2f5a0be081
|
[] |
no_license
|
collective/mr.cabot
|
231a4a96c38e793356c4d06438d236d447e97bc8
|
3e905d80ed5eac52a258b74d19abf5ab182d49e2
|
refs/heads/master
| 2023-03-22T15:30:19.171188
| 2013-01-27T17:54:22
| 2013-01-27T18:32:03
| 6,816,996
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,045
|
py
|
import datetime
import urllib
import os
import simplekml
from mr.cabot.interfaces import IListing, IGeolocation
import sebastian
colors = {"commit": "ff00ff00", "mailing-list": "ffff0000", "answer": "ff00ffff"}
def join(objs):
kml = simplekml.Kml()
unique_locations = set()
for obj in objs:
loc = IGeolocation(obj).coords
if loc not in unique_locations:
unique_locations.add(loc)
add_point(kml, obj)
return kml.kml()
def add_point(kml, obj):
loc = IGeolocation(obj).coords
if not loc:
return ''
else:
lat, lon = loc
listing = IListing(obj)
listing_type = listing.__name__
summary = listing.summary
if isinstance(summary, str):
summary = listing.summary.decode("utf-8", "ignore")
summary = summary.encode("ascii","xmlcharrefreplace")
point = kml.newpoint(name=listing.__name__, description=summary, coords=[(lon, lat)])
point.style.iconstyle.color = colors[listing_type]
point.style.iconstyle.scale = 1
|
[
"git@matthewwilkes.name"
] |
git@matthewwilkes.name
|
9c1c1496d9e87ef0b64186d9951572487e4eec52
|
2d5648035b8bd32b4a6ded311e48975e5ea100d4
|
/runs/bro/100KB/src2-tgt1/ssl-par-ssl-iter00100.cfg.py
|
0df43d2c6562ed9dcdd7e02e3967e1cde40ca70a
|
[
"MIT"
] |
permissive
|
Largio/broeval
|
3975e54a1eaead6686c53e5e99250a00becbe1e0
|
89e831d07f066100afdd1a5b220f9f08f1c10b3d
|
refs/heads/master
| 2021-05-08T08:54:06.498264
| 2017-11-10T17:09:02
| 2017-11-10T17:09:02
| 92,508,227
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
# Write results to this file
OUTFILE = 'runs/bro/100KB/src2-tgt1/ssl-par-ssl-iter00100.result.csv'
# Source computers for the request
SOURCE = ['10.0.0.1', '10.0.0.3']
# Target machines for the requests (aka server)
TARGET = ['10.0.0.2']
# IDS Mode. (ATM: noids, min, max, http, ssl, ftp, icmp, mysql)
IDSMODE = 'ssl'
# Connection mode (par = parallel, seq = sequential)
MODE = 'par'
# Number of evaluation repititions to run
EPOCHS = 100
# Number of iterations to be run in each evaluation repitition
ITER = 100
# Size of the file to be downloaded from target (in Bytes * 10^SIZE)
SIZE = 5
# Protocol to be used e.g. HTTP, SSL, FTP, MYSQL
PROTOCOL = 'ssl'
|
[
"larswiete@googlemail.com"
] |
larswiete@googlemail.com
|
3e9b63e9492405f039f1e350d73adff14fddf664
|
39ab815dfdbab9628ede8ec3b4aedb5da3fd456a
|
/aql/aql/options/aql_option_types.py
|
e93027919b544a7de53973e716f7a8f385c8a943
|
[
"MIT"
] |
permissive
|
menify/sandbox
|
c03b1bf24c1527b47eb473f1acc433f17bfb1d4f
|
32166c71044f0d5b414335b2b6559adc571f568c
|
refs/heads/master
| 2016-09-05T21:46:53.369065
| 2015-04-20T06:35:27
| 2015-04-20T06:35:27
| 25,891,580
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,600
|
py
|
#
# Copyright (c) 2011,2012 The developers of Aqualid project - http://aqualid.googlecode.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE
# AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__all__ = (
'OptionType', 'StrOptionType', 'VersionOptionType', 'PathOptionType', 'BoolOptionType',
'EnumOptionType', 'RangeOptionType', 'ListOptionType', 'DictOptionType',
'autoOptionType',
'ErrorOptionTypeEnumAliasIsAlreadySet', 'ErrorOptionTypeEnumValueIsAlreadySet',
'ErrorOptionTypeUnableConvertValue', 'ErrorOptionTypeNoEnumValues',
)
from aql.util_types import String, AqlException, toString, toSequence, IgnoreCaseString, Version, FilePath, UniqueList, List, \
SplitListType, ValueListType, Dict, SplitDictType, ValueDictType
#//===========================================================================//
class ErrorOptionTypeEnumAliasIsAlreadySet( AqlException ):
def __init__( self, option, value, current_value, new_value ):
msg = "Alias '%s' of Enum Option '%s' can't be changed to '%s' from '%s'" % (value, option, new_value, current_value )
super(type(self), self).__init__( msg )
#//===========================================================================//
class ErrorOptionTypeEnumValueIsAlreadySet( AqlException ):
def __init__( self, option, value, new_value ):
msg = "Value '%s' of Enum Option '%s' can't be changed to alias to '%s'" % (value, option, new_value )
super(type(self), self).__init__( msg )
#//===========================================================================//
class ErrorOptionTypeUnableConvertValue( TypeError ):
def __init__( self, option_type, invalid_value ):
self.option_type = option_type
self.invalid_value = invalid_value
msg = "Unable to convert option value '%s (%s)' to '%s'" % (invalid_value, type(invalid_value), option_type.rangeHelp())
super(type(self), self).__init__( msg )
#//===========================================================================//
class ErrorOptionTypeNoEnumValues( TypeError ):
def __init__( self, option_type ):
msg = "Enum option type '%s' doesn't have any values." % (option_type,)
super(type(self), self).__init__( msg )
#//===========================================================================//
def autoOptionType( value ):
if isinstance( value, (UniqueList, list, tuple) ):
value_type = str
if value:
try:
value_type = type(value[0])
except IndexError:
pass
return ListOptionType( value_type = value_type )
if isinstance( value, dict ):
return DictOptionType()
if isinstance( value, bool ):
return BoolOptionType()
return OptionType( value_type = type(value), is_auto = True )
#//===========================================================================//
class OptionType (object):
__slots__ = (
'value_type',
'default',
'description',
'group',
'range_help',
'is_auto',
'is_tool_key',
)
#//-------------------------------------------------------//
def __init__( self, value_type = str, description = None, group = None, range_help = None, default = NotImplemented,
is_auto = False, is_tool_key = False ):
if issubclass( value_type, OptionType ):
value_type = value_type()
self.value_type = value_type
self.is_auto = is_auto
self.is_tool_key = is_tool_key
self.description = description
self.group = group
self.range_help = range_help
if default is NotImplemented:
self.default = NotImplemented
else:
self.default = value_type( default )
#//-------------------------------------------------------//
def __call__( self, value = NotImplemented ):
"""
Converts a value to options' value
"""
try:
if value is NotImplemented:
if self.default is NotImplemented:
return self.value_type()
return self.default
return self.value_type( value )
except (TypeError, ValueError):
raise ErrorOptionTypeUnableConvertValue( self, value )
def toStr( self, value ):
"""
Converts a value to options' value string
"""
return toString( value )
#//-------------------------------------------------------//
def rangeHelp( self ):
"""
Returns a description (list of strings) about range of allowed values
"""
if self.range_help:
return list(toSequence( self.range_help ))
return ["Value of type '%s'" % self.value_type.__name__]
#//===========================================================================//
#//===========================================================================//
class StrOptionType (OptionType):
def __init__( self, ignore_case = False, description = None, group = None, range_help = None, is_tool_key = False ):
value_type = IgnoreCaseString if ignore_case else String
super(StrOptionType, self).__init__( value_type, description, group, range_help, is_tool_key = is_tool_key )
#//===========================================================================//
#//===========================================================================//
class VersionOptionType (OptionType):
def __init__( self, description = None, group = None, range_help = None, is_tool_key = False ):
super(VersionOptionType, self).__init__( Version, description, group, range_help, is_tool_key = is_tool_key )
#//===========================================================================//
#//===========================================================================//
class PathOptionType (OptionType):
def __init__( self, description = None, group = None, range_help = None, is_tool_key = False ):
super(PathOptionType, self).__init__( FilePath, description, group, range_help, is_tool_key = is_tool_key )
#//===========================================================================//
#//===========================================================================//
class BoolOptionType (OptionType):
__slots__ = (
'true_value',
'false_value',
'true_values',
'false_values',
'aliases',
)
#//-------------------------------------------------------//
__true_values = ('yes', 'true', 'on', 'enabled', 'y', '1', 't' )
__false_values = ('no', 'false', 'off', 'disabled', 'n', '0', 'f' )
#//-------------------------------------------------------//
def __init__( self, description = None, group = None, style = None, true_values = None, false_values = None, default = False, is_tool_key = False ):
#noinspection PyTypeChecker
super(BoolOptionType,self).__init__( bool, description, group, default = default, is_tool_key = is_tool_key )
if style is None:
style = ('True', 'False')
else:
style = map(IgnoreCaseString, style)
if true_values is None:
true_values = self.__true_values
else:
true_values = toSequence( true_values )
if false_values is None:
false_values = self.__false_values
else:
false_values = toSequence( false_values )
self.true_value, self.false_value = style
self.true_values = set()
self.false_values = set()
self.addValues( true_values, false_values )
self.addValues( self.true_value, self.false_value )
#//-------------------------------------------------------//
def __call__( self, value = NotImplemented ):
if type(value) is bool:
return value
if value is NotImplemented:
value = self.default
value_str = IgnoreCaseString(value)
if value_str in self.true_values:
return True
if value_str in self.false_values:
return False
return True if value else False
#//-------------------------------------------------------//
def toStr( self, value ):
return self.true_value if value else self.false_value
#//-------------------------------------------------------//
def addValues( self, true_values, false_values ):
true_values = toSequence( true_values )
false_values = toSequence( false_values )
self.true_values.update( map( lambda v: IgnoreCaseString(v), true_values ) )
self.false_values.update( map( lambda v: IgnoreCaseString(v), false_values ) )
#//-------------------------------------------------------//
def rangeHelp( self ):
return [ ', '.join( sorted( self.true_values ) ),
', '.join( sorted( self.false_values ) ) ]
#//===========================================================================//
#//===========================================================================//
class EnumOptionType (OptionType):
__slots__ = (
'__values',
)
def __init__( self, values, description = None, group = None, value_type = IgnoreCaseString, default = NotImplemented, is_tool_key = False ):
super(EnumOptionType,self).__init__( value_type, description, group, default = default, is_tool_key = is_tool_key )
self.__values = {}
if default is not NotImplemented:
self.addValues( default )
self.addValues( values )
#//-------------------------------------------------------//
def addValues( self, values ):
try:
values = tuple( values.items() ) # convert dictionary to a sequence
except AttributeError:
pass
set_default_value = self.__values.setdefault
value_type = self.value_type
for value in toSequence(values):
it = iter( toSequence( value ) )
value = value_type( next( it ) )
value = set_default_value( value, value )
for alias in it:
alias = value_type(alias)
v = set_default_value( alias, value )
if v != value:
if alias == v:
raise ErrorOptionTypeEnumValueIsAlreadySet( self, alias, value )
else:
raise ErrorOptionTypeEnumAliasIsAlreadySet( self, alias, v, value )
#//-------------------------------------------------------//
def __call__( self, value = NotImplemented ):
try:
if value is NotImplemented:
value = self.default
if value is not NotImplemented:
return value
try:
value = next(iter(self.__values.values()))
return value
except StopIteration:
raise ErrorOptionTypeNoEnumValues( self )
value = self.__values[ self.value_type( value ) ]
return value
except (KeyError, TypeError):
raise ErrorOptionTypeUnableConvertValue( self, value )
#//-------------------------------------------------------//
def rangeHelp(self):
values = {}
for alias, value in self.__values.items():
if alias is value:
values.setdefault( alias, [] )
else:
values.setdefault( value, [] ).append( alias )
help_str = []
for value, aliases in values.items():
s = toString(value)
if aliases:
s += ' (or ' + ', '.join( map( toString, aliases ) ) + ')'
help_str.append( s )
return help_str
#//-------------------------------------------------------//
def range( self ):
values = []
for alias, value in self.__values.items():
if alias is value:
values.append( alias )
return values
#//===========================================================================//
#//===========================================================================//
#noinspection PyAttributeOutsideInit
class RangeOptionType (OptionType):
__slots__ = (
'min_value',
'max_value',
'auto_correct',
)
def __init__( self, min_value, max_value, description = None, group = None, value_type = int, auto_correct = True, default = NotImplemented, is_tool_key = False ):
#noinspection PyTypeChecker
super(RangeOptionType,self).__init__( value_type, description, group, default = default, is_tool_key = is_tool_key )
self.setRange( min_value, max_value, auto_correct )
if default is not NotImplemented:
self.default = self( default )
#//-------------------------------------------------------//
def setRange( self, min_value, max_value, auto_correct = True ):
if min_value is not None:
try:
min_value = self.value_type( min_value )
except (TypeError, ValueError):
raise ErrorOptionTypeUnableConvertValue( self, min_value )
else:
min_value = self.value_type()
if max_value is not None:
try:
max_value = self.value_type( max_value )
except (TypeError, ValueError):
raise ErrorOptionTypeUnableConvertValue( self, max_value )
else:
max_value = self.value_type()
self.min_value = min_value
self.max_value = max_value
if auto_correct is not None:
self.auto_correct = auto_correct
#//-------------------------------------------------------//
def __call__( self, value = NotImplemented):
try:
min_value = self.min_value
if value is NotImplemented:
if self.default is NotImplemented:
return min_value
value = self.default
value = self.value_type( value )
if value < min_value:
if self.auto_correct:
value = min_value
else:
raise TypeError()
max_value = self.max_value
if value > max_value:
if self.auto_correct:
value = max_value
else:
raise TypeError()
return value
except TypeError:
raise ErrorOptionTypeUnableConvertValue( self, value )
#//-------------------------------------------------------//
def rangeHelp(self):
return ["%s ... %s" % (self.min_value, self.max_value) ]
#//-------------------------------------------------------//
def range( self ):
return [self.min_value, self.max_value]
#//===========================================================================//
#//===========================================================================//
class ListOptionType (OptionType):
__slots__ = ('item_type',)
#//=======================================================//
def __init__( self, value_type = str, unique = False, separators = ', ', description = None, group = None, range_help = None, is_tool_key = False ):
if isinstance(value_type, OptionType):
if description is None:
description = value_type.description
if description:
description = "List of: " + description
if group is None:
group = value_type.group
if range_help is None:
range_help = value_type.range_help
if unique:
list_type = UniqueList
else:
list_type = List
list_type = ValueListType( list_type, value_type )
if separators:
list_type = SplitListType( list_type, separators )
super(ListOptionType,self).__init__( list_type, description, group, range_help, is_tool_key = is_tool_key )
self.item_type = value_type
#//-------------------------------------------------------//
def __call__( self, values = None ):
try:
if values is NotImplemented:
values = []
return self.value_type( values )
except (TypeError, ValueError):
raise ErrorOptionTypeUnableConvertValue( self, values )
#//-------------------------------------------------------//
def rangeHelp( self ):
if self.range_help:
return list(toSequence( self.range_help ))
if isinstance(self.item_type, OptionType):
return self.item_type.rangeHelp()
return ["List of type '%s'" % self.item_type.__name__]
#//===========================================================================//
class DictOptionType (OptionType):
#//=======================================================//
def __init__( self, key_type = str, value_type = None, separators = ', ', description = None, group = None, range_help = None, is_tool_key = False ):
if isinstance(value_type, OptionType):
if description is None:
description = value_type.description
if description:
description = "List of: " + description
if group is None:
group = value_type.group
if range_help is None:
range_help = value_type.range_help
dict_type = ValueDictType( Dict, key_type, value_type )
if separators:
dict_type = SplitDictType( dict_type, separators )
super(DictOptionType,self).__init__( dict_type, description, group, range_help, is_tool_key = is_tool_key )
#//-------------------------------------------------------//
def setValueType( self, key, value_type ):
if isinstance( value_type, OptionType ):
value_type = value_type.value_type
self.value_type.setValueType( key, value_type )
#//-------------------------------------------------------//
def __call__( self, values = None ):
try:
if values is NotImplemented:
values = None
return self.value_type( values )
except (TypeError, ValueError):
raise ErrorOptionTypeUnableConvertValue( self, values )
#//-------------------------------------------------------//
def rangeHelp( self ):
if self.range_help:
return list(toSequence( self.range_help ))
return ["Dictionary of values"]
|
[
"menify@a28edc5c-ec3e-0410-a3da-1b30b3a8704b"
] |
menify@a28edc5c-ec3e-0410-a3da-1b30b3a8704b
|
46f9074e93f7bef5beaa27844351f2b1ba6935da
|
3307766701d680af6d12a726a2d98df2cb1830e5
|
/jams/gcj/2013/1C/C/C.py
|
0660c807359fca4cfb396ebfa66c729d1b5b2f9e
|
[] |
no_license
|
dpaneda/code
|
c1a54037a275fa7044eb5c2d6079f052dd968615
|
7da1ede33a6a7cd19cbd0db517d91e7cccfbbfff
|
refs/heads/master
| 2023-01-07T18:41:00.816363
| 2022-12-30T09:24:22
| 2022-12-30T09:24:22
| 1,583,913
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,495
|
py
|
#!/usr/bin/python2
import sys
import bisect
def calculate_atacks(tribes):
# We calculate attacks day by day, until no tribe have any attacks left
attacks = {}
for tribe in tribes:
for i in xrange(0, tribe[1]):
d = tribe[0]
if d not in attacks:
attacks[d] = []
attacks[d].append((tribe[2], tribe[3], tribe[4]))
# Change tribe status
tribe[0] += tribe[5]
tribe[2] += tribe[6]
tribe[3] += tribe[6]
tribe[4] += tribe[7]
return attacks
def raise_wall(wall, wallh, w, e, s):
# print wall, wallh
# print w, e, s
a = bisect.bisect_right(wall, w)
if a > 0:
a -= 1
b = bisect.bisect_right(wall, e)
print a, b
insert = False
if wall[a] < w and wallh[a] < s:
wall.insert(a + 1, w)
wallh.insert(a + 1, s)
b += 1
insert = True
elif wall[a] == w and wallh[a] < s:
wallh[a] = s
insert = True
if insert:
if b >= len(wall):
wall.insert(a + 2, e)
wallh.insert(a + 2, 0)
elif wall[b] > e:
wall.insert(a + 2, e)
wallh.insert(a + 2, wall[b])
for i in xrange(a + 2, b):
if wallh[i] < s:
del(wall[i])
del(wallh[i])
# print wall, wallh
def wall_minimum_height(wall, wallh, w, e):
a = bisect.bisect_right(wall, w) - 1
if a < 0:
a = 0
b = bisect.bisect_right(wall, e)
if a == b:
return 0
return min(wallh[a:b])
def succeed(wall, wallh, w, e, s):
#print w, e, s
m = wall_minimum_height(wall, wallh, w, e)
return m < s
def simulate_attacks(attacks):
wall = [0]
wallh = [0]
s = 0
days = sorted(attacks.iterkeys())
for day in days:
for attack in attacks[day]:
if succeed(wall, wallh, attack[0], attack[1], attack[2]):
s += 1
for attack in attacks[day]:
raise_wall(wall, wallh, attack[0], attack[1], attack[2])
return s
def Solve():
ntribes = int(sys.stdin.readline().strip())
tribes = []
for i in xrange(0, ntribes):
d, n, w, e, s, di, pi, si = map(int, sys.stdin.readline().strip().split())
tribes.append([d, n, w, e, s, di, pi, si])
attacks = calculate_atacks(tribes)
return simulate_attacks(attacks)
num = int(sys.stdin.readline())
for case in range(1, num + 1):
print "Case #%d: %s " % (case, Solve())
|
[
"dpaneda@gmail.com"
] |
dpaneda@gmail.com
|
f85d432e037030d3e230472ed90ab71633bfd965
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/pa3/benchmarks/sieve-6.py
|
50bed63741a7b659fb9658ec148349d295ad58a5
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,280
|
py
|
# A resizable list of integers
class Vector(object):
$ClassBody
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
# Data
v:Vector = None
i:int = 0
# Crunch
v = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
7cb4c2732a9e0437ad2c3c1be8df7a72b03dab80
|
b8062e01860960131b37e27298b6b755b4191f5f
|
/python/level1_single_api/9_amct/amct_pytorch/resnet-101/src/resnet-101_calibration.py
|
1fb64a80ea43a7e08efa9490757866a88b3a89a4
|
[
"Apache-2.0"
] |
permissive
|
RomanGaraev/samples
|
4071fcbe6bf95cf274576665eb72588568d8bcf2
|
757aac75a0f3921c6d1b4d98599bd7d4ffda936b
|
refs/heads/master
| 2023-07-16T02:17:36.640036
| 2021-08-30T15:14:05
| 2021-08-30T15:14:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,051
|
py
|
"""
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
import os
import argparse
import torch # pylint: disable=E0401
from PIL import Image # pylint: disable=E0401
from torchvision import transforms # pylint: disable=E0401
import onnxruntime as ort # pylint: disable=E0401
import amct_pytorch as amct # pylint: disable=E0401
from resnet import resnet101 # pylint: disable=E0401, C0415
PATH = os.path.realpath('./')
IMG_DIR = os.path.join(PATH, 'data/images')
LABEL_FILE = os.path.join(IMG_DIR, 'image_label.txt')
PARSER = argparse.ArgumentParser(description='whether use nuq')
PARSER.add_argument('--nuq', dest='nuq', action='store_true', help='whether use nuq')
ARGS = PARSER.parse_args()
if ARGS.nuq:
OUTPUTS = os.path.join(PATH, 'outputs/nuq')
else:
OUTPUTS = os.path.join(PATH, 'outputs/calibration')
TMP = os.path.join(OUTPUTS, 'tmp')
def get_labels_from_txt(label_file):
"""Read all images' name and label from label_file"""
images = []
labels = []
with open(label_file, 'r') as f:
lines = f.readlines()
for line in lines:
images.append(line.split(' ')[0])
labels.append(int(line.split(' ')[1]))
return images, labels
def prepare_image_input(images):
"""Read all images"""
input_tensor = torch.zeros(len(images), 3, 224, 224) # pylint: disable=E1101
preprocess = transforms.Compose(
[transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
for index, image in enumerate(images):
input_image = Image.open(image).convert('RGB')
input_tensor[index, ...] = preprocess(input_image)
return input_tensor
def img_postprocess(probs, labels):
"""Do image post-process"""
# calculate top1 and top5 accuracy
top1_get = 0
top5_get = 0
prob_size = probs.shape[1]
for index, label in enumerate(labels):
top5_record = (probs[index, :].argsort())[prob_size - 5: prob_size]
if label == top5_record[-1]:
top1_get += 1
top5_get += 1
elif label in top5_record:
top5_get += 1
return float(top1_get) / len(labels), float(top5_get) / len(labels)
def model_forward(model, batch_size, iterations):
"""Do pytorch model forward"""
images, labels = get_labels_from_txt(LABEL_FILE)
images = [os.path.join(IMG_DIR, image) for image in images]
top1_total = 0
top5_total = 0
for i in range(iterations):
input_batch = prepare_image_input(images[i * batch_size: (i + 1) * batch_size])
# move the input and model to GPU for speed if available
if torch.cuda.is_available():
input_batch = input_batch.to('cuda')
model.to('cuda')
with torch.no_grad():
output = model(input_batch)
top1, top5 = img_postprocess(output, labels[i * batch_size: (i + 1) * batch_size])
top1_total += top1
top5_total += top5
print('****************iteration:{}*****************'.format(i))
print('top1_acc:{}'.format(top1))
print('top5_acc:{}'.format(top5))
print('******final top1:{}'.format(top1_total / iterations))
print('******final top5:{}'.format(top5_total / iterations))
return top1_total / iterations, top5_total / iterations
def onnx_forward(onnx_model, batch_size, iterations):
"""Do onnx model forward"""
ort_session = ort.InferenceSession(onnx_model)
images, labels = get_labels_from_txt(LABEL_FILE)
images = [os.path.join(IMG_DIR, image) for image in images]
top1_total = 0
top5_total = 0
for i in range(iterations):
input_batch = prepare_image_input(images[i * batch_size: (i + 1) * batch_size])
output = ort_session.run(None, {'input': input_batch.numpy()})
top1, top5 = img_postprocess(output[0], labels[i * batch_size: (i + 1) * batch_size])
top1_total += top1
top5_total += top5
print('****************iteration:{}*****************'.format(i))
print('top1_acc:{}'.format(top1))
print('top5_acc:{}'.format(top5))
print('******final top1:{}'.format(top1_total / iterations))
print('******final top5:{}'.format(top5_total / iterations))
return top1_total / iterations, top5_total / iterations
def main():
"""Sample main function"""
model = resnet101(pretrained=True)
model.eval()
ori_top1, ori_top5 = model_forward(model, batch_size=32, iterations=5)
# Quantize configurations
args_shape = [(1, 3, 224, 224)]
input_data = tuple([torch.randn(arg_shape) for arg_shape in args_shape]) # pylint: disable=E1101
if torch.cuda.is_available():
input_data = tuple([data.to('cuda') for data in input_data])
model.to('cuda')
config_json_file = os.path.join(TMP, 'config.json')
skip_layers = []
batch_num = 2
if ARGS.nuq:
config_defination = os.path.join(PATH, 'src/nuq_conf/nuq_quant.cfg')
amct.create_quant_config(
config_json_file, model, input_data, skip_layers, batch_num, config_defination=config_defination)
else:
amct.create_quant_config(config_json_file, model, input_data, skip_layers, batch_num)
# Phase1: do conv+bn fusion, weights calibration and generate
# calibration model
record_file = os.path.join(TMP, 'record.txt')
modified_model = os.path.join(TMP, 'modified_model.onnx')
calibration_model = amct.quantize_model(
config_json_file, modified_model, record_file, model, input_data, input_names=['input'],
output_names=['output'], dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}})
# Phase2: do calibration
model_forward(calibration_model, batch_size=32, iterations=batch_num)
if torch.cuda.is_available():
torch.cuda.empty_cache()
# Phase3: save final model, one for onnx do fake quant test, one
# deploy model for ATC
result_path = os.path.join(OUTPUTS, 'resnet-101')
amct.save_model(modified_model, record_file, result_path)
# Phase4: run fake_quant model test
quant_top1, quant_top5 = onnx_forward(
'%s_%s' % (result_path, 'fake_quant_model.onnx'), batch_size=32, iterations=5)
print('[INFO] ResNet101 before quantize top1:{:>10} top5:{:>10}'.format(ori_top1, ori_top5))
print('[INFO] ResNet101 after quantize top1:{:>10} top5:{:>10}'.format(quant_top1, quant_top5))
if __name__ == '__main__':
main()
|
[
"derek.qian.wang@huawei.com"
] |
derek.qian.wang@huawei.com
|
4af4f611f29d8399e7635e13af155fc04e99e0b9
|
9e1dcb4f71b7eda84bbf0855d574eb38719d21a9
|
/nested_loops_prime_number.py
|
09ead76ff7a45ba184bcf3f6b8ff47bf66b017c6
|
[] |
no_license
|
ayoubabounakif/edX-Python
|
689c2730458513151fc3b7a69f6a3e8b25462028
|
2449616fd6d9d8d8d74819cff24f3a54bff9dd4b
|
refs/heads/master
| 2020-12-30T03:46:10.271688
| 2020-02-07T05:28:09
| 2020-02-07T05:28:09
| 238,849,304
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,763
|
py
|
#ALGORITHM
'''
1. Select a number
2. Select a divisor and set it equal to 2.
3. Assume number is prime
4. If divisor is less then the number go to step 5 else go to step 8
5. If remainder of (number/divisor) is 0 then number is not prime(exit/stop)
6. Add one to the divisor
7. Go to step 4
8. Number is prime
'''
# A program that prints the prime numbers
#between x (start_number) and y (end_number)
#CODE (using while loop)
ask_user = int(input("Enter a value for x: "))
ask_user_2 = int(input("Enter a value for y: "))
x = ask_user
y = ask_user_2
current_number = x
while current_number <= y:
current_divisor = 2
current_number_prime = True
while (current_divisor < current_number):
if current_number % current_divisor == 0:
current_number_prime = False
break
current_divisor = current_divisor + 1
if current_number_prime:
print (current_number, "is prime")
current_number = current_number + 1
print ("DONE! These are all the prime numbers between your values!")
#CODE (using for loop)
ask_user = int(input("Enter a value for x: "))
ask_user_2 = int(input("Enter a value for y: "))
x = ask_user
y = ask_user_2
current_number = x
for current_number in range(x, y+1):
current_number_prime = True
for current_divisor in range (2, current_number):
if current_number % current_divisor == 0:
current_number_prime = False
break
if current_number_prime:
print (current_number, "is prime")
print ("DONE! These are all the prime numbers between your values!")
|
[
"noreply@github.com"
] |
ayoubabounakif.noreply@github.com
|
556a064c6aaa406e6208c1055530438590c6f151
|
9b2f4810b093639209b65bbcb5fa07125e17266f
|
/src/radical/pilot/umgr/staging_input/__init__.py
|
66be18b437eb382c61a394d1bf9f1abbbf8f82d4
|
[
"MIT"
] |
permissive
|
karahbit/radical.pilot
|
887d25d370d08e3455f19cd240677b62278ef67f
|
c611e1df781749deef899dcf5815728e1d8a962e
|
refs/heads/devel
| 2020-12-21T09:54:10.622036
| 2020-08-20T18:18:12
| 2020-08-20T18:18:12
| 254,967,331
| 0
| 0
|
NOASSERTION
| 2020-05-01T00:47:51
| 2020-04-11T22:37:20
| null |
UTF-8
|
Python
| false
| false
| 141
|
py
|
__copyright__ = "Copyright 2016, http://radical.rutgers.edu"
__license__ = "MIT"
from .base import UMGRStagingInputComponent as Input
|
[
"andre@merzky.net"
] |
andre@merzky.net
|
4285a06223ef406e7b6a8cfcba809f60b3d98731
|
57eb2354f8fba9d46c8edcfac60c13fc0468d950
|
/Lekhaka/deformer_noiser.py
|
af37dc110bc7fa9c610374b8ecf483f63c73effc
|
[] |
no_license
|
rakeshvar/Lekhaka
|
597e91e60c30c566e6f792af2d1378205f698087
|
1d2d31035fe8a29f002adb5a70d762669102a0f3
|
refs/heads/main
| 2023-06-16T11:18:30.121653
| 2021-07-09T08:35:56
| 2021-07-09T08:35:56
| 370,766,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,731
|
py
|
import numpy as np
from scipy import ndimage as nd
from scipy.special import cosdg, sindg
def _summary(mat, name):
print(f"{name}\tshape:{mat.shape}\tmax:{mat.max():.2f} min:{mat.min():.2f}")
pass
class Deformer:
def __init__(self, translation=0, zoom=0, elastic_magnitude=0, sigma=1, angle=0, nearest=False, debug=False):
self.translation = translation
self.zoom = zoom
self.elastic_magnitude = elastic_magnitude
self.sigma = sigma
self.angle = angle
self.nearest = nearest
# Build a gaussian filter for elastic distortion
if elastic_magnitude:
self.nrounds = 2
nsds = 2
sigma //= self.nrounds
filt = np.exp(-.5 * np.linspace(-nsds, nsds, int(2*nsds*sigma+1)) ** 2)
filt /= filt.sum()
if debug:
print(f"Gaussian Filter Range: {filt.max():.4f}-{filt.min():.4f} "
f"Ratio:{filt.max()/filt.min():.2f} Sum:{filt.sum()}")
self.filt = filt
self.summary = _summary if debug else lambda _, __: None
def __str__(self):
print('Elastic Translation:{:} Zoom:{} Mag:{:d} Sig:{:d} Angle:{} Interpolation:{}'.format(
self.translation, self.zoom, self.elastic_magnitude, self.sigma, self.angle,
'Nearest' if self.nearest else 'Linear'))
def __call__(self, inpt):
# Degenerate Case
if not (self.elastic_magnitude or self.translation or self.angle or self.zoom):
return inpt
b, h, w = inpt.shape
_hwidx = np.indices((h, w)).astype('float')
target = np.stack([_hwidx for _ in range(b)])
self.summary(target, "initial traget")
if self.elastic_magnitude:
# Elastic
elast = self.elastic_magnitude * np.random.normal(size=(b, 2, h, w))
for _ in range(self.nrounds):
for ax in (-1, -2):
nd.correlate1d(elast, self.filt, axis=ax, output=elast)
target += elast
self.summary(elast, "elastic")
# Zoom and Rotate
if self.zoom or self.angle:
# Center at 'about' half way
origin = np.random.uniform(.4, .6, size=(b, 2, 1, 1)) * np.array((h, w)).reshape((1, 2, 1, 1))
target -= origin
self.summary(origin, "origin")
# Zoom
if self.zoom:
zoomer = np.exp(self.zoom * np.random.uniform(-1, size=(b, 2, 1, 1)))
target *= zoomer
self.summary(zoomer, "zoom")
# Rotate
if self.angle:
theta = self.angle * np.random.uniform(-1, size=b)
c, s = cosdg(theta), sindg(theta)
rotate = np.array([[c, -s], [s, c]])
rotate = np.moveaxis(rotate, -1, 0) # b x 2 x 2
for i in range(b):
target[i] = np.tensordot(rotate[i], target[i], axes=(0, 0))
self.summary(rotate, "rotate")
# Uncenter
target += origin
# Make sure you do not go below zero along the width (vertical axis because of Transpose)
least_vert_disp = target[:, 0, 0].min(axis=-1)
self.summary(least_vert_disp[:, None, None], "least_vert_disp")
target[:, 0] -= least_vert_disp[:, None, None]
if self.translation:
transln = self.translation * np.random.uniform(-1, size=(b, 2, 1, 1))
transln[:, 0] = -2 * np.abs(transln[:, 0]) # Along slab width translation is (0, 2translation)
target += transln
self.summary(transln, "translation")
for i in range(b):
self.summary(target[i, 0], f"{i} final traget y")
self.summary(target[i, 1], f"{i} final traget x")
transy = np.clip(target[:, 0], 0, h - 1 - .001)
transx = np.clip(target[:, 1], 0, w - 1 - .001)
output = np.empty_like(inpt)
if self.nearest:
vert = np.rint(transy).astype(int)
horz = np.rint(transx).astype(int)
for i in range(b):
output[i] = inpt[i, vert[i], horz[i]]
else:
topp = np.floor(transy)
left = np.floor(transx)
fraction_y = transy - topp
fraction_x = transx - left
topp = topp.astype('int32')
left = left.astype('int32')
for i in range(b):
output[i] = inpt[i, topp, left] * (1 - fraction_y) * (1 - fraction_x) + \
inpt[i, topp, left + 1] * (1 - fraction_y) * fraction_x + \
inpt[i, topp + 1, left] * fraction_y * (1 - fraction_x) + \
inpt[i, topp + 1, left + 1] * fraction_y * fraction_x
self.summary(inpt, "input")
self.summary(output, "output")
return output
class Noiser:
def __init__(self, num_blots=0, erase_fraction=.5, minsize=0, maxsize=0):
self.num_blots = num_blots
self.erase_fraction = erase_fraction
self.minsize = minsize
self.maxsize = maxsize
def __call__(self, inpt):
batch_sz, h, w = inpt.shape
size = batch_sz, self.num_blots
colors = np.random.binomial(n=1, p=1-self.erase_fraction, size=size)
xs = np.random.randint(h, size=size)
dxs = np.random.randint(self.minsize, self.maxsize, size=size)
ys = np.random.randint(w, size=size)
dys = np.random.randint(self.minsize, self.maxsize, size=size)
for i in range(batch_sz):
for x, dx, y, dy, c in zip(xs[i], dxs[i], ys[i], dys[i], colors[i]):
inpt[i, x:(x+dx), y:(y+dy)] = c
return inpt
|
[
"rakeshvar@gmail.com"
] |
rakeshvar@gmail.com
|
c6984060bdb66e9297a30262564f0ec5543acd5e
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03146/s790644084.py
|
7e0cb3ce3d0317c1b444b17f7e0a4ff736bda753
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
s = int(input())
a = s
prev = set()
for i in range(1, 1500000):
if a in prev:
print(i)
exit()
prev.add(a)
if a % 2 == 0:
a //= 2
else:
a = 3 * a + 1
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
29f193740bef122fbd90749abed73ecb40569459
|
b3a2beaab1ac676c96e93a48d4f35ff6ed6799d0
|
/anyex/async/btcchina.py
|
1adec6030b7a567203d310d05d8ea27f4920560c
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
ttwishing/anyex
|
40c06cf34e4d8f96bb968e8b7be3d2da5e6023f8
|
cfd1f2f04ab992b790add4843aafff91e5773cbf
|
refs/heads/master
| 2020-05-23T12:07:58.615432
| 2019-05-15T05:09:46
| 2019-05-15T05:09:46
| 186,751,745
| 0
| 0
|
MIT
| 2019-05-15T04:57:08
| 2019-05-15T04:57:08
| null |
UTF-8
|
Python
| false
| false
| 11,676
|
py
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/anyex/anyex/blob/master/CONTRIBUTING.md#how-to-contribute-code
from anyex.async.base.exchange import Exchange
import base64
import hashlib
class btcchina (Exchange):
def describe(self):
return self.deep_extend(super(btcchina, self).describe(), {
'id': 'btcchina',
'name': 'BTCChina',
'countries': 'CN',
'rateLimit': 1500,
'version': 'v1',
'has': {
'CORS': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766368-465b3286-5ed6-11e7-9a11-0f6467e1d82b.jpg',
'api': {
'plus': 'https://plus-api.btcchina.com/market',
'public': 'https://data.btcchina.com/data',
'private': 'https://api.btcchina.com/api_trade_v1.php',
},
'www': 'https://www.btcchina.com',
'doc': 'https://www.btcchina.com/apidocs',
},
'api': {
'plus': {
'get': [
'orderbook',
'ticker',
'trade',
],
},
'public': {
'get': [
'historydata',
'orderbook',
'ticker',
'trades',
],
},
'private': {
'post': [
'BuyIcebergOrder',
'BuyOrder',
'BuyOrder2',
'BuyStopOrder',
'CancelIcebergOrder',
'CancelOrder',
'CancelStopOrder',
'GetAccountInfo',
'getArchivedOrder',
'getArchivedOrders',
'GetDeposits',
'GetIcebergOrder',
'GetIcebergOrders',
'GetMarketDepth',
'GetMarketDepth2',
'GetOrder',
'GetOrders',
'GetStopOrder',
'GetStopOrders',
'GetTransactions',
'GetWithdrawal',
'GetWithdrawals',
'RequestWithdrawal',
'SellIcebergOrder',
'SellOrder',
'SellOrder2',
'SellStopOrder',
],
},
},
'markets': {
'BTC/CNY': {'id': 'btccny', 'symbol': 'BTC/CNY', 'base': 'BTC', 'quote': 'CNY', 'api': 'public', 'plus': False},
'LTC/CNY': {'id': 'ltccny', 'symbol': 'LTC/CNY', 'base': 'LTC', 'quote': 'CNY', 'api': 'public', 'plus': False},
'LTC/BTC': {'id': 'ltcbtc', 'symbol': 'LTC/BTC', 'base': 'LTC', 'quote': 'BTC', 'api': 'public', 'plus': False},
'BCH/CNY': {'id': 'bcccny', 'symbol': 'BCH/CNY', 'base': 'BCH', 'quote': 'CNY', 'api': 'plus', 'plus': True},
'ETH/CNY': {'id': 'ethcny', 'symbol': 'ETH/CNY', 'base': 'ETH', 'quote': 'CNY', 'api': 'plus', 'plus': True},
},
})
async def fetch_markets(self):
markets = await self.publicGetTicker({
'market': 'all',
})
result = []
keys = list(markets.keys())
for p in range(0, len(keys)):
key = keys[p]
market = markets[key]
parts = key.split('_')
id = parts[1]
base = id[0:3]
quote = id[3:6]
base = base.upper()
quote = quote.upper()
symbol = base + '/' + quote
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privatePostGetAccountInfo()
balances = response['result']
result = {'info': balances}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
lowercase = currency.lower()
account = self.account()
if lowercase in balances['balance']:
account['total'] = float(balances['balance'][lowercase]['amount'])
if lowercase in balances['frozen']:
account['used'] = float(balances['frozen'][lowercase]['amount'])
account['free'] = account['total'] - account['used']
result[currency] = account
return self.parse_balance(result)
def create_market_request(self, market):
request = {}
field = 'symbol' if (market['plus']) else 'market'
request[field] = market['id']
return request
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
method = market['api'] + 'GetOrderbook'
request = self.create_market_request(market)
orderbook = await getattr(self, method)(self.extend(request, params))
timestamp = orderbook['date'] * 1000
return self.parse_order_book(orderbook, timestamp)
def parse_ticker(self, ticker, market):
timestamp = ticker['date'] * 1000
last = float(ticker['last'])
return {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high']),
'low': float(ticker['low']),
'bid': float(ticker['buy']),
'ask': float(ticker['sell']),
'vwap': float(ticker['vwap']),
'open': float(ticker['open']),
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': float(ticker['vol']),
'quoteVolume': None,
'info': ticker,
}
def parse_ticker_plus(self, ticker, market):
timestamp = ticker['Timestamp']
symbol = None
if market:
symbol = market['symbol']
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['High']),
'low': float(ticker['Low']),
'bid': float(ticker['BidPrice']),
'ask': float(ticker['AskPrice']),
'vwap': None,
'open': float(ticker['Open']),
'last': float(ticker['Last']),
'change': None,
'percentage': None,
'average': None,
'baseVolume': float(ticker['Volume24H']),
'quoteVolume': None,
'info': ticker,
}
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
method = market['api'] + 'GetTicker'
request = self.create_market_request(market)
tickers = await getattr(self, method)(self.extend(request, params))
ticker = tickers['ticker']
if market['plus']:
return self.parse_ticker_plus(ticker, market)
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market):
timestamp = int(trade['date']) * 1000
return {
'id': trade['tid'],
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': None,
'price': trade['price'],
'amount': trade['amount'],
}
def parse_trade_plus(self, trade, market):
timestamp = self.parse8601(trade['timestamp'])
return {
'id': None,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': trade['side'].lower(),
'price': trade['price'],
'amount': trade['size'],
}
def parse_trades_plus(self, trades, market=None):
result = []
for i in range(0, len(trades)):
result.append(self.parse_trade_plus(trades[i], market))
return result
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
method = market['api'] + 'GetTrade'
request = self.create_market_request(market)
if market['plus']:
now = self.milliseconds()
request['start_time'] = now - 86400 * 1000
request['end_time'] = now
else:
method += 's' # trades vs trade
response = await getattr(self, method)(self.extend(request, params))
if market['plus']:
return self.parse_trades_plus(response['trades'], market)
return self.parse_trades(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
method = 'privatePost' + self.capitalize(side) + 'Order2'
order = {}
id = market['id'].upper()
if type == 'market':
order['params'] = [None, amount, id]
else:
order['params'] = [price, amount, id]
response = await getattr(self, method)(self.extend(order, params))
return {
'info': response,
'id': response['id'],
}
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
market = params['market'] # TODO fixme
return await self.privatePostCancelOrder(self.extend({
'params': [id, market],
}, params))
def nonce(self):
return self.microseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/' + path
if api == 'private':
self.check_required_credentials()
p = []
if 'params' in params:
p = params['params']
nonce = self.nonce()
request = {
'method': path,
'id': nonce,
'params': p,
}
p = ','.join(p)
body = self.json(request)
query = (
'tonce=' + nonce +
'&accesskey=' + self.apiKey +
'&requestmethod=' + method.lower() +
'&id=' + nonce +
'&method=' + path +
'¶ms=' + p
)
signature = self.hmac(self.encode(query), self.encode(self.secret), hashlib.sha1)
auth = self.encode(self.apiKey + ':' + signature)
headers = {
'Authorization': 'Basic ' + base64.b64encode(auth),
'Json-Rpc-Tonce': nonce,
}
else:
if params:
url += '?' + self.urlencode(params)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
|
[
"yong2452@gmail.com"
] |
yong2452@gmail.com
|
287b2dea5d50e568064505e8ecdad813d1967f06
|
e966e08e69df8f6669034c1d8a2ed57293a48ef7
|
/www/main.py
|
a8c620ef841d4f5469289bfa7a8cbc2b5c224f3a
|
[] |
no_license
|
adrianPerez/notify-io
|
c9d06f5fb2a40d25a9399bb72319225e60ffa142
|
20eeafa5edfe2455d4b154733283aa8ce2969dbb
|
refs/heads/master
| 2021-01-18T12:14:50.622242
| 2009-11-12T06:13:36
| 2009-11-12T06:13:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,257
|
py
|
import wsgiref.handlers
import hashlib, time, os
from google.appengine.ext import webapp
from google.appengine.ext import db
from google.appengine.api import users
from google.appengine.api import urlfetch
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import login_required
from django.utils import simplejson
try:
is_dev = os.environ['SERVER_SOFTWARE'].startswith('Dev')
except:
is_dev = False
API_VERSION = 'v1'
if is_dev:
API_HOST = 'localhost:8191'
WWW_HOST = 'localhost:8091'
else:
API_HOST = 'api.notify.io'
WWW_HOST = 'www.notify.io'
def baseN(num,b,numerals="0123456789abcdefghijklmnopqrstuvwxyz"):
return ((num == 0) and "0" ) or (baseN(num // b, b).lstrip("0") + numerals[num % b])
class Account(db.Model):
user = db.UserProperty(auto_current_user_add=True)
hash = db.StringProperty()
api_key = db.StringProperty()
source_enabled = db.BooleanProperty()
source_name = db.StringProperty()
source_icon = db.StringProperty()
created = db.DateTimeProperty(auto_now_add=True)
updated = db.DateTimeProperty(auto_now=True)
#def __init__(self, *args, **kwargs):
# super(Account, self).__init__(*args, **kwargs)
@classmethod
def get_by_user(cls, user):
return cls.all().filter('user =', user).get()
@classmethod
def get_by_hash(cls, hash):
return cls.all().filter('hash = ', hash).get()
def set_hash_and_key(self):
self.hash = hashlib.md5(self.user.email()).hexdigest()
self.api_key = ''.join([baseN(abs(hash(time.time())), 36), baseN(abs(hash(self.hash)), 36)])
class Channel(db.Model):
target = db.ReferenceProperty(Account, required=True, collection_name='channels_as_target')
source = db.ReferenceProperty(Account, required=True, collection_name='channels_as_source')
status = db.StringProperty(required=True, default='pending')
created = db.DateTimeProperty(auto_now_add=True)
updated = db.DateTimeProperty(auto_now=True)
@classmethod
def get_all_by_target(cls, account):
return cls.all().filter('target =', account)
@classmethod
def get_all_by_source(cls, account):
return cls.all().filter('source =', account)
@classmethod
def get_by_source_and_target(cls, source, target):
return cls.all().filter('source =', source).filter('target =', target).get()
def delete(self):
notices = Notification.all().filter('channel =', self)
for n in notices:
n.channel = None
n.put()
super(Channel, self).delete()
def get_approval_notice(self):
notice = Notification(channel=self, target=self.target, text="%s wants to send you notifications. Click here to approve/deny this request." % self.source.source_name)
notice.title = "New Notification Source"
notice.link = "http://%s/dashboard/sources" % WWW_HOST
notice.icon = self.source.source_icon
notice.sticky = 'true'
return notice
class Notification(db.Model):
channel = db.ReferenceProperty(Channel)
target = db.ReferenceProperty(Account, collection_name='target_notifications')
source = db.ReferenceProperty(Account, collection_name='source_notifications')
title = db.StringProperty()
text = db.TextProperty(required=True)
link = db.StringProperty()
icon = db.StringProperty()
sticky = db.StringProperty()
created = db.DateTimeProperty(auto_now_add=True)
updated = db.DateTimeProperty(auto_now=True)
def __init__(self, *args, **kwargs):
channel = kwargs.get('channel')
if channel and isinstance(channel, Channel):
kwargs['source'] = channel.source
kwargs['target'] = channel.target
super(Notification, self).__init__(*args, **kwargs)
def to_json(self):
o = {'text': self.text}
for arg in ['title', 'link', 'icon', 'sticky']:
value = getattr(self, arg)
if value:
o[arg] = value
o['source'] = self.source.source_name
return simplejson.dumps(o)
class MainHandler(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
self.redirect('/dashboard')
return
else:
login_url = users.create_login_url('/')
self.response.out.write(template.render('templates/main.html', locals()))#file('templates/main.html').read())#
class NotificationHandler(webapp.RequestHandler):
def post(self):
target = Account.all().filter('hash =', self.request.get('hash')).get()
source = Account.all().filter('api_key =', self.request.get('api_key')).get()
replay = self.request.get('replay', None)
if replay:
self.replay(replay, target, source)
else:
self.notify(target, source)
def replay(self, replay, target, source):
notice = Notification.get_by_id(int(replay))
channel = notice.channel
# Can only replay if hash == notification target AND (api_key == notification source OR notification target)
authz = channel.target.key() == target.key() and (channel.source.key() == source.key() or source.key() == channel.target.key())
if notice and channel.status == 'enabled' and authz:
self.response.out.write(notice.to_json())
else:
self.error(404)
def notify(self, target, source):
channel = Channel.all().filter('target =', target).filter('source =', source).get()
approval_notice = None
if not channel and source and target:
channel = Channel(target=target, source=source)
channel.put()
approval_notice = channel.get_approval_notice()
if channel:
notice = Notification(channel=channel, text=self.request.get('text'), icon=source.source_icon)
for arg in ['title', 'link', 'icon', 'sticky']:
value = self.request.get(arg, None)
if value:
setattr(notice, arg, value)
notice.put()
if channel.status == 'enabled':
self.response.out.write(notice.to_json())
elif channel.status == 'pending':
self.response.set_status(202)
if approval_notice:
self.response.out.write(approval_notice.to_json())
else:
self.response.out.write("202 Pending approval")
elif channel.status == 'disabled':
self.response.set_status(202)
self.response.out.write("202 Accepted but disabled")
else:
self.error(404)
self.response.out.write("404 Target or source not found")
class DownloadHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
account = Account.all().filter('user =', user).get()
host = API_HOST
hash = account.hash
api_key = account.api_key
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write(template.render('templates/client.py', locals()))
class ListenAuthHandler(webapp.RequestHandler):
def get(self):
api_key = self.request.get('api_key')
userhash = self.request.get('hash')
account = Account.all().filter('hash =', userhash).filter('api_key =', api_key).get()
if account:
self.response.out.write("ok")
else:
self.error(403)
class IntroHandler(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if not user:
login_url = users.create_login_url('/')
self.response.out.write(template.render('templates/getstarted.html', locals()))
def main():
application = webapp.WSGIApplication([
('/', MainHandler),
('/notification', NotificationHandler),
('/download/notifyio-client.py', DownloadHandler),
('/auth', ListenAuthHandler),
('/getstarted', IntroHandler),
], debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
|
[
"progrium@gmail.com"
] |
progrium@gmail.com
|
db7db6a90704df75b54f315bea68f0ad7f3365c1
|
649bd422025e421d86025743eac324c9b882a2e8
|
/exam/1_three-dimensional_atomic_system/dump/phasetrans/temp101_6000.py
|
ca5589222090cec51eb4387d80afbfad76d2ebaa
|
[] |
no_license
|
scheuclu/atom_class
|
36ddee1f6a5995872e858add151c5942c109847c
|
0c9a8c63d9b38898c1869fe8983126cef17662cd
|
refs/heads/master
| 2021-01-21T10:52:28.448221
| 2017-03-07T23:04:41
| 2017-03-07T23:04:41
| 83,489,471
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 68,814
|
py
|
ITEM: TIMESTEP
6000
ITEM: NUMBER OF ATOMS
2048
ITEM: BOX BOUNDS pp pp pp
1.4321272860081535e-01 4.7056787271390377e+01
1.4321272860081535e-01 4.7056787271390377e+01
1.4321272860081535e-01 4.7056787271390377e+01
ITEM: ATOMS id type xs ys zs
8 1 0.121128 0.0652153 0.0529452
35 1 0.0586052 0.120368 0.0662597
130 1 0.0478415 0.0644832 0.129102
165 1 0.124033 0.12342 0.120496
161 1 0.98823 0.125918 0.128182
391 1 0.190985 0.0048602 0.439043
12 1 0.24819 0.0670862 0.0611532
39 1 0.187074 0.122063 0.0533896
43 1 0.320293 0.130682 0.0544349
134 1 0.181293 0.0560446 0.125742
138 1 0.30732 0.0530883 0.121272
169 1 0.240007 0.10949 0.126835
1165 1 0.371986 0.497731 0.127201
512 1 0.876989 0.44083 0.433068
1291 1 0.316116 0.496974 0.311832
16 1 0.371795 0.0612295 0.0620073
47 1 0.437826 0.114021 0.0581376
142 1 0.430052 0.062614 0.122352
173 1 0.379179 0.124915 0.138167
177 1 0.495709 0.118015 0.114163
511 1 0.941227 0.373121 0.434656
1051 1 0.813842 0.494019 0.0542743
20 1 0.503697 0.0626443 0.0466291
24 1 0.618396 0.0614018 0.0596878
51 1 0.563683 0.121955 0.0584759
146 1 0.560756 0.0636515 0.116751
181 1 0.621046 0.121178 0.119958
28 1 0.748168 0.0555409 0.063096
55 1 0.691085 0.129398 0.0688364
59 1 0.813312 0.120681 0.0650308
150 1 0.685922 0.0616512 0.124457
154 1 0.808926 0.0625564 0.132809
185 1 0.748522 0.118407 0.129589
1045 1 0.632401 0.496465 0.00307537
257 1 0.985749 0.0019644 0.252104
4 1 0.99584 0.0682345 0.0673613
32 1 0.876888 0.0629551 0.0639142
63 1 0.928141 0.122593 0.0568666
158 1 0.928595 0.0571168 0.124497
189 1 0.875148 0.112629 0.126521
1285 1 0.134889 0.494434 0.257063
113 1 0.499521 0.382165 0.00639871
40 1 0.121127 0.175363 0.063895
67 1 0.0682582 0.24445 0.0545415
72 1 0.119785 0.316021 0.0664481
162 1 0.0596738 0.182325 0.11949
194 1 0.0635595 0.30876 0.125828
197 1 0.119146 0.235746 0.126522
36 1 0.00873541 0.176155 0.0563818
1301 1 0.625385 0.498166 0.242573
27 1 0.816269 0.00160164 0.0667978
44 1 0.254646 0.186829 0.0541968
71 1 0.183945 0.242923 0.050513
75 1 0.320405 0.2448 0.0546557
76 1 0.255848 0.302572 0.063998
166 1 0.189254 0.190654 0.123266
170 1 0.321352 0.185042 0.115355
198 1 0.178342 0.310604 0.124489
201 1 0.250149 0.24938 0.117744
202 1 0.324996 0.307847 0.115514
1437 1 0.871202 0.493117 0.372296
1183 1 0.952448 0.50141 0.182558
1167 1 0.433169 0.496037 0.195564
174 1 0.438422 0.183023 0.124459
48 1 0.388053 0.173805 0.0595832
79 1 0.441226 0.248243 0.0616888
80 1 0.379786 0.300443 0.0584279
205 1 0.379228 0.242368 0.127794
206 1 0.429667 0.310109 0.120297
84 1 0.488685 0.316129 0.0569946
1309 1 0.884009 0.500873 0.235989
52 1 0.492207 0.182649 0.0507947
209 1 0.500796 0.234277 0.115626
56 1 0.624645 0.185926 0.0744435
83 1 0.56474 0.259103 0.063541
88 1 0.620886 0.312571 0.0573471
178 1 0.563889 0.183501 0.125628
210 1 0.557067 0.314044 0.125164
213 1 0.624564 0.258809 0.119893
60 1 0.74487 0.188774 0.0692651
87 1 0.684781 0.256381 0.0639363
91 1 0.809947 0.244391 0.0600154
92 1 0.753095 0.309474 0.0640282
182 1 0.684525 0.192522 0.126194
186 1 0.811141 0.18199 0.121576
214 1 0.685344 0.31175 0.125652
217 1 0.753052 0.25301 0.119086
218 1 0.807026 0.324013 0.124366
287 1 0.938339 0.0102788 0.315231
193 1 0.00648118 0.238458 0.132031
68 1 0.00756776 0.308302 0.0598156
64 1 0.868943 0.182474 0.0616799
95 1 0.945237 0.235997 0.0773487
96 1 0.866704 0.306592 0.074907
190 1 0.92692 0.17961 0.13134
221 1 0.875069 0.2353 0.120871
222 1 0.934342 0.31433 0.112717
99 1 0.0618397 0.373871 0.0677851
104 1 0.127296 0.433875 0.0660925
226 1 0.0634204 0.427211 0.126115
229 1 0.127749 0.375214 0.123966
1417 1 0.247161 0.488042 0.378628
510 1 0.946002 0.431991 0.368591
103 1 0.189705 0.366519 0.0628518
107 1 0.312201 0.368755 0.0717985
108 1 0.253562 0.428358 0.0606608
230 1 0.195211 0.432487 0.119114
233 1 0.248939 0.365739 0.121965
234 1 0.31071 0.438854 0.127608
155 1 0.807621 0.00434102 0.197672
269 1 0.375204 0.00295794 0.255217
141 1 0.377144 0.00650305 0.123111
111 1 0.431994 0.379364 0.0619769
112 1 0.377306 0.438418 0.0688109
237 1 0.380627 0.371326 0.115958
238 1 0.437714 0.44024 0.128722
116 1 0.491601 0.440328 0.064435
241 1 0.493594 0.381498 0.120745
534 1 0.684606 0.0677011 0.497799
1283 1 0.0711745 0.491009 0.304388
115 1 0.559624 0.369957 0.0625803
120 1 0.62473 0.439067 0.0674431
242 1 0.558531 0.434985 0.122181
245 1 0.623 0.372693 0.115498
2 1 0.0597747 0.0666446 -0.0018904
145 1 0.499772 0.00365486 0.116262
1433 1 0.751504 0.494536 0.368606
389 1 0.11701 0.00597755 0.359588
119 1 0.691012 0.37475 0.0589997
123 1 0.817019 0.372982 0.0550826
124 1 0.754011 0.43874 0.0616733
246 1 0.694249 0.435861 0.117448
249 1 0.751083 0.379071 0.115771
250 1 0.818031 0.438744 0.125211
538 1 0.807618 0.0519969 0.497404
617 1 0.255228 0.37739 0.495101
509 1 0.883631 0.385582 0.378427
225 1 1.00178 0.359418 0.117514
100 1 0.00298638 0.427398 0.06825
127 1 0.938144 0.370269 0.0579974
128 1 0.883877 0.433222 0.0587752
253 1 0.876317 0.37671 0.118641
254 1 0.937414 0.431481 0.127322
1169 1 0.497299 0.495151 0.12319
135 1 0.189169 -0.00090506 0.1859
136 1 0.120549 0.065911 0.185594
163 1 0.0562221 0.128368 0.182479
258 1 0.0596169 0.0667815 0.237037
264 1 0.108614 0.0711829 0.309163
291 1 0.053496 0.137016 0.30447
293 1 0.121067 0.12736 0.247731
132 1 0.981324 0.0641198 0.188972
289 1 0.992882 0.125668 0.2463
54 1 0.688222 0.190013 0.00134316
1031 1 0.193836 0.492392 0.0561465
140 1 0.249307 0.0534475 0.18891
167 1 0.18331 0.129856 0.181925
171 1 0.319054 0.124426 0.193673
262 1 0.183877 0.0718001 0.249468
266 1 0.305653 0.0642806 0.252152
268 1 0.246009 0.0679596 0.311467
295 1 0.180614 0.12368 0.322482
297 1 0.248296 0.128553 0.257276
299 1 0.311311 0.122931 0.314736
3 1 0.0444232 0.00474053 0.0720287
109 1 0.383211 0.366447 0.00378107
89 1 0.749516 0.248164 0.00572867
1039 1 0.430071 0.499077 0.0728967
144 1 0.380092 0.0648068 0.185115
175 1 0.443059 0.127202 0.195953
270 1 0.434443 0.0580826 0.25566
272 1 0.375384 0.0559552 0.314034
301 1 0.375799 0.12496 0.249942
303 1 0.436468 0.133556 0.311093
276 1 0.500308 0.0703962 0.309802
148 1 0.499375 0.0581931 0.182404
143 1 0.436467 0.00489952 0.179232
153 1 0.739173 0.00304366 0.128371
1293 1 0.370919 0.490759 0.250121
305 1 0.49286 0.129961 0.25537
152 1 0.612463 0.0592013 0.192506
179 1 0.552254 0.125039 0.181771
274 1 0.554553 0.0587076 0.252637
280 1 0.611748 0.0590055 0.311719
307 1 0.560414 0.135425 0.3073
309 1 0.611341 0.121031 0.24354
273 1 0.494943 0.00735931 0.247505
263 1 0.186061 0.00750506 0.309957
156 1 0.750532 0.0654744 0.202114
183 1 0.678135 0.118851 0.188105
187 1 0.808758 0.130888 0.185069
278 1 0.683576 0.0619224 0.249834
282 1 0.812881 0.0718269 0.258829
284 1 0.739986 0.0561687 0.302256
311 1 0.676152 0.122 0.302808
313 1 0.756053 0.119526 0.253378
315 1 0.814276 0.122225 0.320888
638 1 0.937118 0.437881 0.489462
609 1 0.997218 0.383571 0.497383
260 1 1.00077 0.0598909 0.300806
160 1 0.86137 0.0630755 0.192195
191 1 0.927397 0.128784 0.194914
286 1 0.931028 0.0659774 0.249095
288 1 0.881471 0.0704887 0.317239
317 1 0.869844 0.129723 0.253029
319 1 0.941499 0.130241 0.306729
502 1 0.684708 0.428444 0.375944
168 1 0.120738 0.184627 0.189778
195 1 0.0629465 0.253956 0.184077
200 1 0.12685 0.314629 0.1888
290 1 0.0560866 0.189238 0.246466
296 1 0.124888 0.185823 0.316385
323 1 0.0685434 0.251252 0.315974
325 1 0.123807 0.247371 0.260307
328 1 0.1278 0.32047 0.315551
292 1 0.990068 0.191519 0.29956
164 1 0.993496 0.189351 0.189716
196 1 0.998118 0.313547 0.192619
322 1 0.0749752 0.314431 0.252122
172 1 0.253633 0.180008 0.178928
199 1 0.179287 0.245765 0.18862
203 1 0.31666 0.245505 0.187571
294 1 0.180983 0.189005 0.252656
298 1 0.307984 0.187991 0.26037
300 1 0.241511 0.179661 0.321388
326 1 0.189631 0.301119 0.24891
327 1 0.187416 0.251404 0.313083
329 1 0.251477 0.245258 0.231845
330 1 0.306948 0.301314 0.245889
331 1 0.299118 0.249107 0.307883
332 1 0.245593 0.316458 0.322579
204 1 0.246783 0.304336 0.17844
176 1 0.3768 0.18862 0.191603
207 1 0.441886 0.258177 0.178856
208 1 0.375406 0.310951 0.190509
302 1 0.435887 0.206017 0.250153
304 1 0.374011 0.182432 0.311216
333 1 0.372571 0.248956 0.258777
334 1 0.435445 0.311531 0.24245
335 1 0.433158 0.260026 0.313605
336 1 0.36535 0.310332 0.309661
337 1 0.498704 0.260114 0.250191
180 1 0.493058 0.196904 0.184407
212 1 0.495117 0.320505 0.181935
340 1 0.4928 0.320328 0.312942
308 1 0.496909 0.197152 0.307168
184 1 0.620009 0.191185 0.183044
211 1 0.563756 0.252591 0.188711
216 1 0.623246 0.312842 0.193273
306 1 0.554787 0.1885 0.240768
312 1 0.624717 0.190809 0.306772
338 1 0.561157 0.317049 0.247719
339 1 0.555498 0.266503 0.313382
341 1 0.630568 0.25877 0.249182
344 1 0.625745 0.322038 0.304517
314 1 0.810226 0.190021 0.25251
188 1 0.744975 0.180104 0.18884
215 1 0.685088 0.257996 0.189504
219 1 0.81252 0.251556 0.179636
220 1 0.756545 0.321944 0.187859
310 1 0.687626 0.181407 0.25209
316 1 0.749004 0.184778 0.318247
342 1 0.694204 0.323102 0.248087
343 1 0.688369 0.256054 0.320795
345 1 0.760125 0.253363 0.247136
346 1 0.812104 0.316969 0.253226
347 1 0.811411 0.250973 0.313083
348 1 0.746109 0.314529 0.313484
324 1 0.992186 0.313155 0.311799
321 1 0.00807945 0.255823 0.245881
192 1 0.862071 0.187784 0.186276
223 1 0.935978 0.255975 0.181534
224 1 0.874632 0.30868 0.168438
318 1 0.932307 0.183715 0.24662
320 1 0.879842 0.182687 0.314147
349 1 0.87128 0.253988 0.248517
350 1 0.938413 0.310918 0.250528
351 1 0.934406 0.252762 0.314453
352 1 0.874857 0.310205 0.307123
277 1 0.616868 -0.00267472 0.253056
569 1 0.746805 0.119592 0.498992
385 1 0.999467 0.000229777 0.371311
227 1 0.0719792 0.368813 0.190951
232 1 0.130389 0.436107 0.191244
354 1 0.0740025 0.436355 0.247832
355 1 0.0616367 0.370269 0.309927
357 1 0.131959 0.372375 0.249437
360 1 0.128174 0.430318 0.313971
228 1 0.00799869 0.428548 0.183531
356 1 0.0052846 0.437063 0.301342
15 1 0.430579 0.00747275 0.0572639
231 1 0.197739 0.371336 0.187143
235 1 0.314983 0.367143 0.173173
236 1 0.252667 0.42798 0.186932
358 1 0.196409 0.429214 0.246545
359 1 0.184342 0.376592 0.324591
361 1 0.255661 0.371645 0.247239
362 1 0.307247 0.438688 0.25153
363 1 0.308764 0.374031 0.320278
364 1 0.2561 0.439748 0.318348
506 1 0.808548 0.439684 0.370675
505 1 0.754403 0.374737 0.368943
507 1 0.812861 0.360161 0.435154
625 1 0.494369 0.370427 0.497217
239 1 0.43563 0.379492 0.190583
240 1 0.368895 0.431072 0.185821
365 1 0.36374 0.370127 0.250241
366 1 0.436851 0.43946 0.255998
367 1 0.4291 0.385911 0.312882
368 1 0.360999 0.43222 0.3125
372 1 0.497518 0.440881 0.312139
106 1 0.323538 0.427381 0.00438207
369 1 0.489447 0.378472 0.251187
244 1 0.493323 0.438804 0.190328
243 1 0.556918 0.375787 0.190831
248 1 0.627653 0.441538 0.176577
370 1 0.557412 0.433714 0.243819
371 1 0.550651 0.373447 0.307205
373 1 0.617932 0.385291 0.24921
376 1 0.619139 0.444814 0.310797
137 1 0.245452 -0.000687656 0.122808
614 1 0.180397 0.43821 0.498999
247 1 0.683 0.38139 0.188766
251 1 0.818103 0.377411 0.180721
252 1 0.752535 0.445803 0.183858
374 1 0.683436 0.441921 0.256636
375 1 0.68652 0.371526 0.312362
377 1 0.749849 0.380634 0.245944
378 1 0.811659 0.450142 0.238764
379 1 0.81434 0.383388 0.301672
380 1 0.748821 0.43311 0.311193
495 1 0.431179 0.37015 0.437404
542 1 0.93188 0.0490275 0.496337
353 1 1.00114 0.373768 0.244386
255 1 0.944165 0.368505 0.184717
256 1 0.878409 0.43519 0.187829
381 1 0.878593 0.371312 0.240778
382 1 0.947383 0.439829 0.240379
383 1 0.943962 0.374637 0.308113
384 1 0.876645 0.438515 0.312045
114 1 0.552691 0.442736 0.0140531
413 1 0.883924 0.00982582 0.375202
503 1 0.687019 0.374773 0.43651
386 1 0.0537825 0.0681364 0.362356
392 1 0.116826 0.0628668 0.433433
419 1 0.0494314 0.131186 0.435066
421 1 0.110048 0.126035 0.37221
388 1 0.987355 0.0586155 0.435113
417 1 0.00123889 0.130761 0.369666
390 1 0.182057 0.0573305 0.376315
394 1 0.30643 0.0577327 0.377751
396 1 0.249731 0.0588082 0.435368
423 1 0.182291 0.119968 0.441863
425 1 0.247246 0.119125 0.377243
427 1 0.307347 0.129285 0.441677
545 1 0.996348 0.128058 0.494262
494 1 0.428387 0.436813 0.37727
526 1 0.442734 0.0660786 0.499029
626 1 0.559785 0.433314 0.495576
398 1 0.4307 0.0645666 0.382829
400 1 0.373774 0.0652292 0.440701
429 1 0.378573 0.129331 0.373288
431 1 0.441164 0.127871 0.442911
433 1 0.507252 0.128962 0.371812
26 1 0.807021 0.0586015 0.00269561
395 1 0.313517 -0.000957738 0.436622
508 1 0.75383 0.427335 0.428589
159 1 0.925029 0.00938259 0.188258
404 1 0.504537 0.0725638 0.440706
402 1 0.556546 0.0667685 0.378907
408 1 0.62587 0.0675687 0.43018
435 1 0.573564 0.132694 0.440911
437 1 0.618656 0.133571 0.376668
1425 1 0.50076 0.500056 0.3784
497 1 0.49223 0.382296 0.370653
553 1 0.252025 0.12881 0.4961
406 1 0.677708 0.0668947 0.365429
410 1 0.815704 0.0540681 0.373372
412 1 0.742694 0.0611798 0.438586
439 1 0.689886 0.128695 0.435334
441 1 0.756876 0.122177 0.380631
443 1 0.815578 0.129123 0.437048
594 1 0.553594 0.308052 0.496484
1311 1 0.936992 0.494754 0.319739
1287 1 0.189396 0.489062 0.313738
414 1 0.950559 0.0744779 0.366125
416 1 0.86926 0.0572342 0.439301
445 1 0.87777 0.114251 0.382571
447 1 0.93858 0.137559 0.437642
530 1 0.566645 0.0587052 0.498126
418 1 0.0659617 0.189206 0.376669
424 1 0.117717 0.1841 0.440256
450 1 0.0531448 0.30954 0.371959
451 1 0.0549244 0.249404 0.444509
453 1 0.117126 0.253963 0.383274
456 1 0.125371 0.319386 0.454215
449 1 0.00264226 0.239201 0.365422
452 1 0.997653 0.308206 0.431584
1431 1 0.68659 0.495015 0.440719
422 1 0.179979 0.190496 0.383419
426 1 0.307464 0.186983 0.377407
428 1 0.240088 0.190402 0.43697
454 1 0.181067 0.313639 0.377801
455 1 0.18452 0.253998 0.431553
457 1 0.249475 0.241847 0.37612
458 1 0.315338 0.294557 0.373428
459 1 0.313059 0.242878 0.439579
460 1 0.252717 0.31343 0.433354
1173 1 0.638616 0.499143 0.121875
430 1 0.448002 0.192673 0.379607
432 1 0.377153 0.183201 0.436683
461 1 0.380607 0.244487 0.380418
462 1 0.430941 0.318761 0.371433
463 1 0.433157 0.245247 0.445837
464 1 0.373193 0.308902 0.436731
436 1 0.504288 0.185764 0.455589
465 1 0.499425 0.262478 0.372246
468 1 0.496512 0.31859 0.432987
557 1 0.375883 0.122012 0.493476
500 1 0.495956 0.437053 0.43507
440 1 0.631405 0.189371 0.436318
466 1 0.558206 0.315572 0.376345
472 1 0.625791 0.321161 0.441019
469 1 0.620524 0.25425 0.372468
434 1 0.557807 0.189072 0.369724
467 1 0.564864 0.249469 0.436851
470 1 0.675451 0.31516 0.375975
442 1 0.813444 0.199138 0.382928
471 1 0.686071 0.251731 0.4438
444 1 0.746241 0.18851 0.44419
473 1 0.747145 0.261432 0.377315
476 1 0.743968 0.314887 0.442497
438 1 0.680696 0.197774 0.373411
474 1 0.818398 0.328331 0.367029
475 1 0.804203 0.262633 0.436794
22 1 0.683516 0.0701632 -0.000595372
420 1 0.00190908 0.194073 0.42975
479 1 0.944014 0.250534 0.424619
448 1 0.870052 0.199608 0.433559
446 1 0.939496 0.181517 0.369623
477 1 0.874169 0.25861 0.372188
480 1 0.877117 0.307083 0.441013
478 1 0.936767 0.3164 0.367778
267 1 0.307545 0.00168665 0.32149
481 1 0.00876619 0.375028 0.383104
484 1 0.00728894 0.436296 0.436439
485 1 0.119328 0.373231 0.380754
483 1 0.0622024 0.367245 0.443661
488 1 0.109383 0.431936 0.441308
482 1 0.0619555 0.444351 0.368627
1537 1 -0.00309395 0.499975 0.495049
498 1 0.564503 0.444728 0.369663
501 1 0.618187 0.374247 0.375203
492 1 0.24397 0.449296 0.45114
491 1 0.310236 0.374075 0.4266
486 1 0.179455 0.439234 0.387218
490 1 0.318449 0.44216 0.376773
487 1 0.184983 0.372144 0.442645
489 1 0.247473 0.394729 0.384257
58 1 0.802085 0.177155 0.00544861
496 1 0.384038 0.445178 0.443134
57 1 0.748161 0.12212 0.00866366
504 1 0.621196 0.435119 0.428186
499 1 0.558088 0.3785 0.440002
493 1 0.373807 0.372853 0.375848
1171 1 0.55377 0.496383 0.19
1163 1 0.311153 0.495238 0.188333
70 1 0.177878 0.316652 0.000495599
1153 1 0.0116227 0.489476 0.128232
1155 1 0.060959 0.498221 0.190081
634 1 0.812936 0.434853 0.491969
1161 1 0.243017 0.494859 0.122145
1281 1 0.998394 0.498183 0.252641
387 1 0.0517238 0.00653301 0.433633
1027 1 0.0739598 0.501202 0.0629198
1439 1 0.939695 0.494508 0.425331
1035 1 0.308177 0.490806 0.0610102
61 1 0.866456 0.118663 -0.000554736
93 1 0.880638 0.24985 0.00169254
149 1 0.620237 -0.00126225 0.130658
1159 1 0.191283 0.495113 0.188278
151 1 0.683904 0.00191618 0.187628
399 1 0.429822 0.00292457 0.437849
117 1 0.624343 0.373781 -0.00145932
285 1 0.871717 0.000172731 0.256449
1295 1 0.421051 0.498963 0.311546
131 1 0.0531605 0.0053344 0.185422
126 1 0.955795 0.4394 -5.17223e-05
1055 1 0.951098 0.490323 0.0767626
1415 1 0.17092 0.497121 0.443776
621 1 0.373982 0.37778 0.493722
261 1 0.119443 0.0126827 0.241378
11 1 0.319937 0.000832331 0.0534228
550 1 0.179007 0.184848 0.494279
401 1 0.488769 0.00194453 0.376001
9 1 0.246131 0.00544418 0.00587372
574 1 0.935456 0.195013 0.49442
637 1 0.886806 0.377513 0.492745
1157 1 0.137115 0.490309 0.121206
33 1 0.992392 0.12339 0.00182719
521 1 0.255566 0.0023313 0.496699
581 1 0.124519 0.245942 0.492752
570 1 0.813371 0.188503 0.495417
598 1 0.68666 0.312012 0.500355
81 1 0.493713 0.239485 0.00128045
573 1 0.870499 0.126781 0.495379
1033 1 0.253668 0.495279 0.000537149
586 1 0.306419 0.306281 0.496415
1029 1 0.124148 0.491168 0.00619924
65 1 0.998343 0.250753 0.00790032
610 1 0.0502485 0.441977 0.500613
86 1 0.68722 0.317975 0.00798476
522 1 0.315063 0.0674079 0.49551
41 1 0.252257 0.128845 0.00200848
520 1 0.119835 0.0637772 0.56574
547 1 0.0591587 0.121069 0.568235
642 1 0.0554993 0.0552552 0.62429
677 1 0.123094 0.124835 0.623211
1943 1 0.675019 0.497362 0.937691
585 1 0.251169 0.242296 0.506156
524 1 0.252753 0.0583012 0.562912
551 1 0.181296 0.121024 0.559598
555 1 0.318584 0.116161 0.560324
646 1 0.182016 0.0649942 0.624475
650 1 0.308449 0.0619911 0.621688
681 1 0.257227 0.126792 0.616806
1553 1 0.502145 0.494538 0.506979
1687 1 0.695296 0.498581 0.6987
528 1 0.387262 0.0591382 0.561706
559 1 0.437786 0.130648 0.560292
654 1 0.435578 0.0577267 0.629732
685 1 0.372009 0.124008 0.62748
689 1 0.496027 0.12536 0.619562
656 1 0.364451 0.0600731 0.684318
927 1 0.926861 0.00351331 0.937106
1673 1 0.257468 0.50226 0.61985
532 1 0.493015 0.0619454 0.563527
536 1 0.617966 0.0628884 0.562155
563 1 0.554837 0.128405 0.568813
658 1 0.554329 0.0602531 0.625487
693 1 0.626436 0.119805 0.628102
993 1 0.991459 0.36687 0.874766
1555 1 0.561645 0.498555 0.579625
540 1 0.747177 0.0591899 0.561169
567 1 0.680128 0.122321 0.562953
571 1 0.8135 0.122767 0.566831
662 1 0.681071 0.0626324 0.614664
666 1 0.811642 0.0613672 0.615466
697 1 0.755275 0.12569 0.621261
622 1 0.433423 0.438594 0.512506
561 1 0.499945 0.121437 0.506363
590 1 0.431405 0.308288 0.504784
516 1 0.995092 0.0604302 0.561585
673 1 0.995614 0.117362 0.623887
544 1 0.868427 0.0648344 0.562265
575 1 0.933935 0.125747 0.560074
670 1 0.933231 0.0643969 0.625429
701 1 0.878627 0.13157 0.619698
102 1 0.193575 0.432934 0.999707
552 1 0.117526 0.180751 0.565197
579 1 0.0594201 0.252219 0.564802
584 1 0.122946 0.308461 0.55941
674 1 0.0561646 0.191799 0.632381
706 1 0.0630737 0.3165 0.624192
709 1 0.118152 0.243577 0.625321
548 1 0.995401 0.189678 0.570533
1939 1 0.571934 0.48665 0.941628
558 1 0.41873 0.184782 0.501295
1551 1 0.434916 0.496069 0.566739
678 1 0.184937 0.178635 0.620043
556 1 0.244423 0.178301 0.556535
583 1 0.180627 0.244545 0.562667
587 1 0.309306 0.241575 0.569152
588 1 0.249067 0.314236 0.564872
682 1 0.310904 0.1797 0.635495
710 1 0.182336 0.315833 0.616618
713 1 0.24617 0.25033 0.623385
714 1 0.309488 0.314203 0.622789
597 1 0.620437 0.247334 0.50112
78 1 0.438844 0.307532 0.996077
523 1 0.317618 0.00261417 0.562412
560 1 0.368433 0.179915 0.56983
591 1 0.432574 0.24729 0.566486
592 1 0.385416 0.321327 0.576436
686 1 0.438242 0.185379 0.62331
717 1 0.372335 0.257232 0.63601
718 1 0.439826 0.312139 0.635549
596 1 0.485641 0.316356 0.564585
789 1 0.629985 0.00705505 0.740836
589 1 0.367408 0.2522 0.501974
564 1 0.49151 0.188616 0.548636
721 1 0.499051 0.248695 0.625324
568 1 0.620972 0.193809 0.566599
595 1 0.563127 0.246439 0.566868
600 1 0.620452 0.317905 0.560042
690 1 0.553849 0.189245 0.619316
722 1 0.543962 0.317274 0.631323
725 1 0.618184 0.261781 0.62329
549 1 0.115613 0.119159 0.499778
1691 1 0.828755 0.501707 0.683091
578 1 0.0541539 0.316497 0.505623
46 1 0.436898 0.172751 0.987513
572 1 0.747747 0.17792 0.548397
599 1 0.694357 0.256602 0.562521
603 1 0.809002 0.24409 0.56289
604 1 0.755142 0.318717 0.566431
694 1 0.687169 0.183875 0.622526
698 1 0.817132 0.190522 0.624604
726 1 0.689541 0.308267 0.624803
729 1 0.7487 0.245104 0.619449
730 1 0.816087 0.315685 0.631417
97 1 0.00188806 0.369834 0.995072
605 1 0.872275 0.252869 0.496779
602 1 0.809398 0.311953 0.504586
705 1 0.993059 0.25725 0.626483
580 1 0.998467 0.31308 0.564426
576 1 0.879581 0.189076 0.559245
607 1 0.943016 0.25026 0.560466
608 1 0.874254 0.310278 0.563093
702 1 0.935642 0.184559 0.6307
733 1 0.876963 0.244126 0.624763
734 1 0.926958 0.311509 0.623562
1563 1 0.818482 0.49739 0.549145
1801 1 0.24886 0.498415 0.756852
554 1 0.315123 0.185091 0.502692
611 1 0.0571109 0.385069 0.569049
616 1 0.124659 0.444053 0.560875
738 1 0.0712745 0.441387 0.63199
741 1 0.132667 0.381432 0.621999
122 1 0.82207 0.43867 0.99202
1665 1 0.987441 0.496093 0.618838
615 1 0.185776 0.377842 0.554423
619 1 0.318452 0.377879 0.563638
620 1 0.25144 0.446113 0.559029
742 1 0.184753 0.447184 0.615145
745 1 0.248397 0.375241 0.620284
746 1 0.307497 0.440569 0.622943
30 1 0.93003 0.0596986 1.00029
903 1 0.191036 0.00597635 0.942456
613 1 0.11076 0.375913 0.505086
42 1 0.31647 0.184369 0.993057
577 1 0.996276 0.245172 0.500465
623 1 0.43018 0.387916 0.572468
624 1 0.367145 0.449408 0.570104
749 1 0.369686 0.382169 0.631941
750 1 0.432221 0.441091 0.631895
669 1 0.872975 0.00144212 0.616933
94 1 0.933651 0.308971 0.994316
1815 1 0.687873 0.501298 0.815291
34 1 0.0667826 0.184811 0.995736
628 1 0.503254 0.441291 0.569354
753 1 0.492418 0.384416 0.627831
627 1 0.552884 0.372235 0.553377
632 1 0.62689 0.42666 0.576364
754 1 0.565748 0.426948 0.637854
757 1 0.626858 0.367649 0.628403
50 1 0.561015 0.179968 1.00101
518 1 0.184426 0.0644955 0.504002
533 1 0.622743 0.000560708 0.511438
631 1 0.693294 0.371702 0.565348
635 1 0.817984 0.374549 0.559543
636 1 0.750454 0.439924 0.556398
758 1 0.69315 0.443583 0.627097
761 1 0.761408 0.385047 0.620453
762 1 0.820608 0.445938 0.618602
1539 1 0.0655218 0.497715 0.56799
1693 1 0.884606 0.496526 0.621404
105 1 0.25124 0.37801 0.997035
737 1 -0.00057837 0.366293 0.625668
612 1 0.988928 0.439466 0.553093
639 1 0.929367 0.373893 0.565341
640 1 0.873453 0.435026 0.558282
765 1 0.872595 0.384766 0.623974
766 1 0.931711 0.430021 0.620508
1813 1 0.623914 0.492649 0.741322
1024 1 0.880258 0.433346 0.937343
10 1 0.312523 0.0624103 0.994291
629 1 0.62847 0.381267 0.505089
1023 1 0.936953 0.376731 0.94997
648 1 0.113825 0.0545436 0.690208
675 1 0.0603347 0.121957 0.691358
770 1 0.0569529 0.0570899 0.750625
776 1 0.118291 0.0608467 0.81617
803 1 0.0517804 0.121157 0.822514
805 1 0.113885 0.127705 0.758637
652 1 0.244835 0.0571359 0.692317
679 1 0.178342 0.114133 0.693801
683 1 0.306287 0.119561 0.685358
774 1 0.184934 0.0576834 0.752516
778 1 0.322929 0.0631418 0.75279
780 1 0.257579 0.0580108 0.819117
807 1 0.186627 0.122531 0.8248
809 1 0.248147 0.118886 0.753704
811 1 0.314834 0.115056 0.812325
546 1 0.0606903 0.188196 0.503472
1022 1 0.938925 0.433225 0.878135
1021 1 0.86756 0.371045 0.885668
593 1 0.499718 0.252626 0.51099
1803 1 0.303707 0.497562 0.823013
118 1 0.69332 0.436452 1.00033
687 1 0.443612 0.122656 0.689811
782 1 0.429262 0.0526603 0.735975
784 1 0.377658 0.0531615 0.812375
813 1 0.373913 0.129444 0.738401
815 1 0.429873 0.118575 0.808833
660 1 0.50287 0.0723015 0.679757
98 1 0.0678466 0.433639 1.00268
517 1 0.122397 0.00696856 0.501706
817 1 0.496928 0.122495 0.749476
788 1 0.504204 0.0687492 0.814091
664 1 0.618958 0.0647374 0.684558
691 1 0.562694 0.123759 0.694457
786 1 0.561015 0.0647371 0.747644
792 1 0.620867 0.0518753 0.802119
819 1 0.559631 0.134735 0.803813
821 1 0.624002 0.124843 0.749845
1020 1 0.752436 0.44067 0.938309
1018 1 0.817299 0.434969 0.872877
668 1 0.751486 0.0573695 0.677129
695 1 0.698872 0.119674 0.675827
699 1 0.816246 0.122531 0.676618
790 1 0.688947 0.0690316 0.742328
794 1 0.821138 0.0600399 0.742503
796 1 0.757068 0.0546041 0.802652
823 1 0.694468 0.12461 0.800627
825 1 0.76873 0.11914 0.738842
827 1 0.812906 0.123411 0.811755
643 1 0.0501908 -5.92363e-05 0.69151
1019 1 0.806723 0.37407 0.937156
1017 1 0.747042 0.370264 0.875502
772 1 0.997006 0.0535071 0.806574
644 1 0.99044 0.0594366 0.68687
801 1 0.993002 0.126721 0.756536
672 1 0.875619 0.055494 0.682362
703 1 0.939192 0.124165 0.693334
798 1 0.936071 0.0737716 0.757909
800 1 0.868291 0.0635776 0.807685
829 1 0.875926 0.128239 0.751585
831 1 0.935429 0.12974 0.812774
797 1 0.874154 -0.000404513 0.742287
707 1 0.0556263 0.251774 0.696711
680 1 0.118708 0.180913 0.688949
712 1 0.119378 0.304266 0.692997
802 1 0.0522728 0.183499 0.750654
808 1 0.116978 0.186122 0.813042
834 1 0.0650235 0.307965 0.757849
835 1 0.0548158 0.250911 0.815633
837 1 0.12425 0.246501 0.755488
840 1 0.125705 0.310745 0.819122
804 1 0.991222 0.182067 0.816051
836 1 1.00023 0.307786 0.812498
684 1 0.242969 0.179123 0.689339
711 1 0.180529 0.242474 0.678691
715 1 0.296806 0.243836 0.680483
716 1 0.239636 0.315628 0.686475
806 1 0.184215 0.190923 0.747853
810 1 0.303037 0.186442 0.74637
812 1 0.249264 0.181989 0.806442
838 1 0.186832 0.308927 0.739618
839 1 0.188015 0.244417 0.806587
841 1 0.241576 0.255045 0.738396
842 1 0.30402 0.307238 0.74574
843 1 0.304737 0.2462 0.802932
844 1 0.24659 0.30372 0.801595
688 1 0.377882 0.192435 0.679763
719 1 0.42971 0.252844 0.697357
720 1 0.378546 0.323547 0.697075
814 1 0.433406 0.1907 0.754445
816 1 0.373991 0.184227 0.814978
845 1 0.36159 0.241251 0.742076
846 1 0.439457 0.313678 0.759209
847 1 0.428156 0.245794 0.816796
848 1 0.371921 0.309123 0.796938
852 1 0.497733 0.317274 0.81326
849 1 0.501191 0.252682 0.74863
820 1 0.507865 0.199137 0.805287
724 1 0.496163 0.316897 0.695156
692 1 0.506025 0.18433 0.688146
728 1 0.617866 0.318996 0.688022
696 1 0.619979 0.186953 0.672651
723 1 0.553482 0.250761 0.687949
818 1 0.567617 0.191631 0.747144
824 1 0.630337 0.190765 0.807133
850 1 0.559062 0.320363 0.75347
851 1 0.568675 0.248824 0.806972
853 1 0.631708 0.25038 0.735735
856 1 0.620022 0.308044 0.802511
700 1 0.759986 0.189419 0.69596
727 1 0.692694 0.247834 0.675482
731 1 0.810419 0.25353 0.689822
732 1 0.75463 0.309717 0.68561
822 1 0.68979 0.179495 0.744208
826 1 0.815414 0.183742 0.756285
828 1 0.751554 0.185565 0.814467
854 1 0.694726 0.300839 0.74541
855 1 0.686532 0.250381 0.814821
857 1 0.753074 0.241206 0.760806
858 1 0.809271 0.302237 0.756024
859 1 0.809098 0.250797 0.819608
860 1 0.748941 0.304101 0.818034
833 1 0.988967 0.246046 0.759601
708 1 -0.000648709 0.31016 0.703955
676 1 0.989317 0.191394 0.687461
704 1 0.870712 0.190609 0.685498
735 1 0.932599 0.248919 0.692461
736 1 0.872093 0.313203 0.685977
830 1 0.931533 0.186894 0.753077
832 1 0.87135 0.187078 0.817477
861 1 0.866211 0.247445 0.75095
862 1 0.924115 0.311442 0.753703
863 1 0.935829 0.254208 0.826497
864 1 0.874317 0.310591 0.825187
1015 1 0.698176 0.375684 0.934376
739 1 0.0648202 0.365561 0.695658
744 1 0.126109 0.437821 0.692199
866 1 0.0631873 0.445961 0.754806
867 1 0.0576614 0.375231 0.821462
869 1 0.128385 0.37367 0.747725
872 1 0.127149 0.438641 0.808126
740 1 0.00762817 0.442297 0.685412
45 1 0.37536 0.121871 0.988677
743 1 0.188738 0.385028 0.680488
747 1 0.308033 0.38307 0.694514
748 1 0.249818 0.440298 0.676815
870 1 0.191717 0.441336 0.753417
871 1 0.186077 0.365591 0.806973
873 1 0.247503 0.375908 0.750256
874 1 0.306751 0.436128 0.757529
875 1 0.304613 0.373249 0.808096
876 1 0.243436 0.439564 0.814886
121 1 0.749315 0.37218 0.994082
917 1 0.637129 0.00186735 0.871762
783 1 0.446339 0.00754966 0.808645
751 1 0.443511 0.375644 0.693761
752 1 0.373353 0.442778 0.706978
877 1 0.374673 0.3796 0.750305
878 1 0.441376 0.436791 0.757235
879 1 0.426521 0.376895 0.814979
880 1 0.369537 0.450914 0.819809
884 1 0.502073 0.440177 0.821472
756 1 0.505577 0.442892 0.686381
881 1 0.502094 0.382964 0.760509
755 1 0.554593 0.375637 0.693089
760 1 0.63257 0.426779 0.681343
882 1 0.556195 0.436149 0.745466
883 1 0.557147 0.371748 0.820254
885 1 0.632913 0.37856 0.75214
888 1 0.619287 0.43493 0.811332
759 1 0.696879 0.376751 0.691475
763 1 0.814532 0.365617 0.697428
764 1 0.760953 0.441024 0.701171
886 1 0.69019 0.433457 0.756655
887 1 0.684685 0.361231 0.80705
889 1 0.756194 0.368779 0.754083
890 1 0.816155 0.445431 0.757736
891 1 0.809353 0.382274 0.814087
892 1 0.746689 0.440346 0.822191
1014 1 0.682119 0.431244 0.871374
865 1 -0.00151426 0.378904 0.757896
868 1 0.00159753 0.443874 0.81449
767 1 0.937642 0.370173 0.69594
768 1 0.868468 0.426908 0.693294
893 1 0.868265 0.376459 0.759626
894 1 0.931537 0.433944 0.745147
895 1 0.936999 0.37364 0.810156
896 1 0.877553 0.436407 0.822302
925 1 0.873147 0.00157866 0.873838
898 1 0.0441245 0.067285 0.877138
904 1 0.119615 0.0603659 0.929353
931 1 0.0558632 0.124028 0.936689
933 1 0.113612 0.114486 0.875652
543 1 0.92614 0.00243669 0.562732
562 1 0.565983 0.183092 0.50808
902 1 0.183436 0.0508095 0.867198
906 1 0.313537 0.061398 0.878783
908 1 0.250061 0.0728919 0.936797
935 1 0.184442 0.121592 0.929019
937 1 0.257689 0.120666 0.867335
939 1 0.314672 0.116284 0.932108
1679 1 0.440455 0.50034 0.688669
514 1 0.0603306 0.0604205 0.503237
1945 1 0.747331 0.497516 0.878108
910 1 0.434963 0.0596289 0.870788
912 1 0.374922 0.0574321 0.934782
941 1 0.372887 0.12224 0.869312
943 1 0.434435 0.114608 0.930962
14 1 0.435882 0.0618483 0.992538
779 1 0.31124 0.000349178 0.812338
1557 1 0.624325 0.498373 0.509046
38 1 0.194198 0.180954 0.994616
531 1 0.547816 -0.000153135 0.566435
945 1 0.5055 0.123571 0.876586
916 1 0.502718 0.0627393 0.930874
914 1 0.568543 0.0633929 0.863703
920 1 0.63258 0.0643072 0.930952
947 1 0.570213 0.122656 0.928598
949 1 0.63264 0.118429 0.853587
1016 1 0.633409 0.42634 0.938382
918 1 0.694462 0.065798 0.87168
922 1 0.807459 0.0657772 0.869592
924 1 0.755491 0.0624861 0.941103
951 1 0.689895 0.120165 0.937106
953 1 0.751307 0.124852 0.871552
955 1 0.809578 0.126412 0.936329
633 1 0.744771 0.378775 0.499537
1005 1 0.380786 0.375211 0.88466
1013 1 0.627975 0.371356 0.869614
900 1 0.991491 0.0593802 0.93756
929 1 0.994028 0.128707 0.885548
785 1 0.498197 0.00951886 0.738618
926 1 0.934134 0.0690403 0.876179
928 1 0.856908 0.066904 0.93986
957 1 0.874849 0.125452 0.874807
959 1 0.933141 0.120112 0.942502
930 1 0.0577081 0.197398 0.877452
936 1 0.116042 0.180194 0.928881
963 1 0.0540585 0.245158 0.940799
965 1 0.125272 0.246244 0.883441
968 1 0.134066 0.309705 0.930987
962 1 0.066298 0.309335 0.874474
932 1 0.995966 0.18644 0.944953
630 1 0.682558 0.442322 0.501293
934 1 0.190878 0.192453 0.869788
938 1 0.312118 0.189564 0.871147
940 1 0.253238 0.179844 0.925541
966 1 0.191256 0.306509 0.871184
967 1 0.18959 0.236171 0.931716
969 1 0.254958 0.250541 0.871906
970 1 0.303994 0.310541 0.867256
971 1 0.311016 0.242291 0.944254
972 1 0.244239 0.313281 0.934956
1012 1 0.496427 0.433214 0.927664
897 1 0.986831 0.00667214 0.871907
18 1 0.565764 0.0663264 0.993265
519 1 0.190338 0.00535165 0.564955
973 1 0.365133 0.252218 0.87657
944 1 0.373352 0.179551 0.928091
942 1 0.44442 0.180798 0.872679
976 1 0.374159 0.311074 0.940807
975 1 0.432 0.242576 0.929726
974 1 0.426388 0.307556 0.864764
977 1 0.500321 0.24881 0.875372
948 1 0.505926 0.183328 0.936436
911 1 0.444172 0.00517051 0.929622
1007 1 0.436166 0.383614 0.940868
37 1 0.130416 0.129221 0.991501
980 1 0.493353 0.304659 0.928653
946 1 0.574055 0.189073 0.871532
984 1 0.630269 0.307443 0.950235
978 1 0.564141 0.306639 0.878085
981 1 0.628796 0.254657 0.868795
979 1 0.564383 0.244506 0.942346
952 1 0.636187 0.192021 0.930282
1010 1 0.564945 0.439115 0.873727
1667 1 0.0701592 0.498124 0.693048
987 1 0.811943 0.235417 0.942296
985 1 0.742316 0.242781 0.878576
950 1 0.688699 0.174694 0.872383
956 1 0.743815 0.179828 0.9373
988 1 0.75894 0.304991 0.94273
986 1 0.811981 0.314427 0.878015
982 1 0.689257 0.314335 0.875063
954 1 0.816133 0.178457 0.87669
983 1 0.694903 0.253975 0.944776
1001 1 0.242001 0.371996 0.871351
90 1 0.814218 0.31113 0.995905
1002 1 0.308414 0.423399 0.87319
601 1 0.753993 0.246355 0.499248
649 1 0.247924 -0.000308399 0.628309
964 1 0.00334504 0.318099 0.937463
960 1 0.881677 0.186489 0.934358
989 1 0.866473 0.248938 0.87831
990 1 0.930736 0.314976 0.89057
961 1 0.996002 0.25354 0.880072
992 1 0.86842 0.295631 0.938749
991 1 0.939569 0.246822 0.937195
958 1 0.931517 0.186353 0.876114
999 1 0.187941 0.3757 0.938903
1951 1 0.950878 0.495948 0.936368
998 1 0.187819 0.442672 0.872944
996 1 0.0088624 0.433209 0.932497
995 1 0.068402 0.375947 0.922987
997 1 0.134206 0.375372 0.861603
994 1 0.0748577 0.447505 0.885015
1000 1 0.130247 0.435772 0.940486
1011 1 0.567563 0.387504 0.941433
1009 1 0.485771 0.373655 0.877974
1008 1 0.379782 0.446366 0.944728
1006 1 0.43592 0.441111 0.863288
1003 1 0.31835 0.376435 0.940627
1004 1 0.249489 0.43662 0.934448
799 1 0.939283 0.00335348 0.801347
82 1 0.556013 0.314907 0.994799
1677 1 0.380245 0.501755 0.631277
795 1 0.815334 -3.384e-05 0.808232
74 1 0.313897 0.309797 0.997158
49 1 0.504302 0.120848 0.988645
1927 1 0.196792 0.503359 0.943409
1543 1 0.182396 0.502311 0.555831
582 1 0.192101 0.303435 0.500877
69 1 0.125911 0.23678 0.987644
651 1 0.309815 -0.00040935 0.69662
659 1 0.563911 0.000248238 0.683493
110 1 0.44057 0.451827 0.999982
101 1 0.119169 0.366648 0.998808
66 1 0.0674756 0.300394 0.995654
73 1 0.247952 0.257665 0.987939
606 1 0.943785 0.307159 0.50419
62 1 0.932253 0.18454 0.996779
525 1 0.374322 0.0122233 0.501617
566 1 0.680236 0.183741 0.501649
85 1 0.625778 0.241897 0.998402
77 1 0.380147 0.241822 0.993669
6 1 0.187402 0.063301 0.996112
5 1 0.121391 0.00920025 0.996098
17 1 0.486697 0.00259467 0.994905
565 1 0.618243 0.123341 0.504487
618 1 0.309394 0.437563 0.50075
53 1 0.623455 0.128579 0.996528
125 1 0.870885 0.364358 0.993431
1 1 0.996788 0.00648991 0.997064
529 1 0.49258 -0.00020734 0.503419
1032 1 0.142052 0.564631 0.0710253
1059 1 0.0624782 0.627405 0.0678674
1154 1 0.0777703 0.551974 0.12792
1189 1 0.135617 0.625038 0.130384
1185 1 0.0054275 0.627255 0.12531
1179 1 0.807393 0.502461 0.173732
1036 1 0.256067 0.554927 0.0557294
1063 1 0.193962 0.628147 0.0662972
1067 1 0.310687 0.634589 0.0718746
1158 1 0.201925 0.563145 0.125068
1162 1 0.304698 0.554521 0.117088
1193 1 0.251886 0.627222 0.131689
283 1 0.799015 0.997035 0.313004
1305 1 0.746333 0.505462 0.247897
1409 1 0.00529815 0.505601 0.367745
1649 1 0.49927 0.875833 0.493895
1303 1 0.682353 0.508023 0.312462
1026 1 0.0767728 0.563682 0.00937252
1421 1 0.373408 0.501669 0.379021
1040 1 0.368035 0.566529 0.0596128
1071 1 0.431155 0.616491 0.0671872
1166 1 0.441237 0.561903 0.132009
1197 1 0.372124 0.62159 0.127936
405 1 0.623791 0.992615 0.379961
1201 1 0.493944 0.623476 0.124782
281 1 0.743113 0.991057 0.243513
1044 1 0.488219 0.557916 0.0645556
1048 1 0.633557 0.558837 0.0588135
1075 1 0.557163 0.617582 0.0712744
1170 1 0.551598 0.558428 0.128441
1205 1 0.621038 0.623778 0.126209
271 1 0.434008 0.996864 0.309997
1078 1 0.685256 0.689323 -0.00214951
1203 1 0.563937 0.627204 0.188165
1047 1 0.697326 0.5042 0.064268
1073 1 0.494004 0.615989 0.00308877
1079 1 0.680345 0.622559 0.0492892
1052 1 0.75254 0.562707 0.0552931
1083 1 0.81611 0.625586 0.0593421
1174 1 0.691619 0.568475 0.116428
1178 1 0.819413 0.56223 0.11354
1209 1 0.761208 0.624154 0.120736
1137 1 0.511901 0.869225 0.000476706
1042 1 0.561652 0.572516 -0.00166775
1505 1 0.00335659 0.876534 0.368009
1129 1 0.257194 0.875178 0.0087104
1028 1 0.0095533 0.560518 0.0627493
1056 1 0.880047 0.573159 0.0523696
1087 1 0.933243 0.627034 0.067256
1182 1 0.946428 0.558791 0.119963
1213 1 0.876272 0.624834 0.13062
31 1 0.941864 0.984783 0.0561114
1175 1 0.693115 0.500918 0.184491
1065 1 0.246794 0.62671 0.00222067
1125 1 0.122366 0.875894 0.00242943
403 1 0.558447 0.995504 0.433348
1634 1 0.061111 0.948087 0.4871
1064 1 0.130847 0.689474 0.0608156
1091 1 0.0706429 0.754337 0.0663665
1096 1 0.129392 0.822084 0.0625236
1186 1 0.0731019 0.691792 0.117359
1218 1 0.0671951 0.820265 0.122208
1221 1 0.125847 0.761415 0.129815
1093 1 0.129534 0.753337 0.00915254
1538 1 0.0528006 0.552601 0.496271
1068 1 0.251834 0.698212 0.0745807
1095 1 0.193508 0.756465 0.0652815
1099 1 0.321203 0.747138 0.0512332
1100 1 0.259783 0.817614 0.0565703
1190 1 0.190065 0.69652 0.127109
1194 1 0.315249 0.682416 0.139171
1222 1 0.192355 0.81692 0.131336
1225 1 0.249945 0.759406 0.133328
1226 1 0.31636 0.817407 0.121192
513 1 1.00098 0.997614 0.49038
1598 1 0.948021 0.690867 0.491193
1629 1 0.885849 0.75174 0.48621
1072 1 0.375382 0.684404 0.0675691
1103 1 0.441118 0.739755 0.0684107
1104 1 0.387609 0.815007 0.0543299
1198 1 0.430937 0.681345 0.134452
1229 1 0.375313 0.752489 0.126169
1230 1 0.436693 0.808598 0.122005
1076 1 0.5113 0.683277 0.0633884
19 1 0.556007 0.999111 0.0698367
1233 1 0.504898 0.752855 0.119898
29 1 0.872971 0.992877 0.00708364
1041 1 0.497756 0.501895 0.00382555
1061 1 0.126913 0.6276 0.00734407
1108 1 0.504849 0.810253 0.0645411
1080 1 0.616449 0.685045 0.0616365
1107 1 0.565851 0.749917 0.0631325
1112 1 0.629009 0.812201 0.0613224
1202 1 0.564897 0.688899 0.125624
1234 1 0.567419 0.811189 0.126106
1237 1 0.626752 0.744922 0.128333
139 1 0.314321 1.00045 0.203431
407 1 0.678999 0.996256 0.440462
1177 1 0.751543 0.503629 0.120918
1084 1 0.751027 0.687156 0.0558953
1111 1 0.694573 0.750278 0.0566177
1115 1 0.815523 0.748129 0.063263
1116 1 0.758961 0.803223 0.0624831
1206 1 0.688129 0.680067 0.117185
1210 1 0.810676 0.686215 0.118943
1238 1 0.690529 0.814579 0.117354
1241 1 0.750532 0.75053 0.12181
1242 1 0.814099 0.813665 0.135864
1413 1 0.128293 0.503285 0.363724
1542 1 0.182162 0.564988 0.49542
1299 1 0.559741 0.505478 0.313644
1217 1 0.00871501 0.761979 0.121565
1060 1 0.999115 0.688382 0.0589984
1092 1 0.994776 0.816535 0.0649805
1088 1 0.875059 0.689337 0.0651387
1119 1 0.936844 0.751341 0.0671741
1120 1 0.873528 0.807041 0.0617875
1214 1 0.941637 0.683114 0.127408
1245 1 0.881099 0.746666 0.131109
1246 1 0.935988 0.813825 0.132076
1516 1 0.250316 0.945239 0.442319
1511 1 0.188142 0.882594 0.429089
1533 1 0.874467 0.876364 0.368749
1123 1 0.0642653 0.87612 0.0641983
1128 1 0.119835 0.939733 0.060244
1250 1 0.0596979 0.940737 0.131547
1253 1 0.126738 0.882014 0.122669
1124 1 0.00660952 0.93493 0.0609865
1249 1 0.00370518 0.878955 0.129255
1097 1 0.255059 0.751384 0.00563175
1110 1 0.69181 0.814673 0.00433819
1601 1 0.00273592 0.75781 0.491261
415 1 0.934364 0.988942 0.435402
1127 1 0.19022 0.881017 0.0609272
1131 1 0.313485 0.873871 0.0686081
1132 1 0.24689 0.939991 0.0716187
1254 1 0.190994 0.937491 0.126982
1257 1 0.251088 0.877459 0.127381
1258 1 0.313659 0.943683 0.115813
1633 1 0.996734 0.873515 0.500154
1534 1 0.931322 0.93069 0.378319
1514 1 0.31336 0.943344 0.385538
1135 1 0.439419 0.877206 0.0599912
1136 1 0.386234 0.938252 0.0637174
1261 1 0.380448 0.872022 0.121242
1262 1 0.443599 0.939454 0.128667
1140 1 0.503382 0.935706 0.0647758
397 1 0.377325 0.99907 0.374339
1535 1 0.931261 0.866504 0.430621
1536 1 0.87155 0.929996 0.442615
1265 1 0.504053 0.875383 0.128382
1139 1 0.567438 0.8746 0.0666947
1144 1 0.616627 0.940353 0.0629979
1266 1 0.557986 0.929475 0.122297
1269 1 0.624729 0.87204 0.131651
1508 1 0.996044 0.930583 0.430732
147 1 0.557494 0.994135 0.184024
1126 1 0.191563 0.94576 0.00554318
1289 1 0.249124 0.503193 0.25613
1143 1 0.67461 0.879818 0.069325
1147 1 0.814298 0.872915 0.0732205
1148 1 0.742764 0.931278 0.0700462
1270 1 0.676691 0.938139 0.123242
1273 1 0.746375 0.879664 0.128677
1274 1 0.809452 0.945851 0.136806
393 1 0.248287 0.993971 0.380502
1141 1 0.619632 0.87997 -0.00106732
1094 1 0.193874 0.813582 0.00363694
1509 1 0.126863 0.883809 0.371504
1297 1 0.499966 0.505592 0.251287
25 1 0.755641 0.992464 0.015439
1043 1 0.56723 0.504027 0.06642
1151 1 0.933699 0.880291 0.0652444
1152 1 0.869926 0.936168 0.0711224
1277 1 0.868995 0.879609 0.13238
1278 1 0.935183 0.940439 0.124242
1419 1 0.313822 0.505308 0.444114
1507 1 0.0618779 0.872047 0.435864
1160 1 0.14074 0.556122 0.198038
1187 1 0.0709308 0.621441 0.191973
1282 1 0.0586099 0.56293 0.246111
1288 1 0.117693 0.564904 0.307671
1315 1 0.0672605 0.62489 0.306282
1317 1 0.130875 0.627987 0.248009
1284 1 0.995419 0.565586 0.313497
1164 1 0.251377 0.553766 0.190179
1191 1 0.191927 0.62191 0.193184
1195 1 0.311883 0.612654 0.190306
1286 1 0.188421 0.561492 0.26297
1290 1 0.313956 0.54936 0.252438
1292 1 0.251314 0.554125 0.31734
1319 1 0.205411 0.633285 0.316198
1321 1 0.256187 0.618661 0.250319
1323 1 0.306609 0.620005 0.321033
1181 1 0.883115 0.506274 0.110137
1168 1 0.374721 0.557743 0.192014
1199 1 0.435498 0.621007 0.190809
1294 1 0.430228 0.558747 0.262624
1296 1 0.365205 0.562403 0.32043
1325 1 0.365451 0.620548 0.258964
1327 1 0.436384 0.620838 0.311852
1657 1 0.750348 0.863941 0.492676
1435 1 0.803586 0.506377 0.428963
1532 1 0.746396 0.933005 0.432098
1329 1 0.496421 0.6285 0.248782
1300 1 0.500653 0.558792 0.303215
1172 1 0.5042 0.566235 0.189136
1176 1 0.615263 0.556312 0.181874
1298 1 0.574122 0.56436 0.247659
1304 1 0.626326 0.565325 0.319309
1331 1 0.567668 0.62928 0.308285
1333 1 0.624949 0.632208 0.251267
259 1 0.0551979 0.996698 0.309369
1526 1 0.67747 0.938923 0.381779
1531 1 0.811783 0.873719 0.423889
1207 1 0.684228 0.625847 0.179066
1180 1 0.745571 0.56878 0.180094
1211 1 0.812992 0.620517 0.182691
1302 1 0.680113 0.562911 0.25706
1306 1 0.81179 0.553503 0.243133
1308 1 0.743143 0.556911 0.308426
1335 1 0.679218 0.629499 0.308111
1337 1 0.743981 0.619346 0.238062
1339 1 0.806818 0.623065 0.305462
1529 1 0.743947 0.876109 0.380053
1512 1 0.126799 0.948555 0.435683
1156 1 0.00610069 0.558672 0.180341
1313 1 0.993246 0.632379 0.248936
1184 1 0.870902 0.555527 0.185651
1215 1 0.935394 0.619336 0.184995
1310 1 0.929015 0.566422 0.244951
1312 1 0.877792 0.56177 0.301241
1341 1 0.868805 0.62396 0.242957
1343 1 0.935064 0.631167 0.309814
1192 1 0.121698 0.693353 0.184826
1219 1 0.0697026 0.754006 0.191466
1224 1 0.133318 0.831071 0.187952
1314 1 0.0693154 0.688841 0.250767
1320 1 0.137623 0.684013 0.308682
1346 1 0.0641328 0.810526 0.24604
1347 1 0.075807 0.749979 0.308715
1349 1 0.130187 0.758451 0.252805
1352 1 0.137011 0.818915 0.316688
1220 1 0.00525486 0.812819 0.18586
1345 1 0.0159319 0.747227 0.250539
1316 1 0.0112946 0.688693 0.31584
1188 1 0.0104933 0.697606 0.182169
1196 1 0.260798 0.69277 0.194544
1223 1 0.18878 0.753659 0.193305
1227 1 0.316293 0.759495 0.184314
1228 1 0.26174 0.818067 0.197043
1318 1 0.195934 0.692141 0.254482
1322 1 0.314374 0.69469 0.258181
1324 1 0.260628 0.702975 0.31942
1350 1 0.19107 0.815114 0.251266
1351 1 0.192676 0.750433 0.3091
1353 1 0.258946 0.757671 0.252374
1355 1 0.326057 0.758063 0.31659
1356 1 0.254001 0.814304 0.313123
1354 1 0.317115 0.821224 0.255744
1200 1 0.376797 0.686787 0.193751
1231 1 0.43915 0.747512 0.194494
1232 1 0.377957 0.807808 0.190159
1326 1 0.445126 0.690704 0.249064
1328 1 0.377693 0.694367 0.314988
1357 1 0.37767 0.758642 0.254395
1358 1 0.439092 0.81208 0.250002
1359 1 0.447814 0.746579 0.318054
1360 1 0.377196 0.826818 0.324077
1236 1 0.499612 0.815571 0.186533
1361 1 0.497456 0.754841 0.249038
1332 1 0.502607 0.683882 0.311039
1204 1 0.505019 0.683013 0.18928
1364 1 0.501318 0.817918 0.315131
1208 1 0.620316 0.692899 0.190046
1235 1 0.559529 0.756205 0.190494
1330 1 0.560609 0.69327 0.251458
1336 1 0.619023 0.690993 0.31346
1363 1 0.557353 0.765867 0.315858
1365 1 0.615553 0.751237 0.253433
1368 1 0.622608 0.822211 0.311955
1240 1 0.629518 0.807231 0.196882
1362 1 0.560141 0.816887 0.245742
1212 1 0.753132 0.687412 0.181006
1239 1 0.684992 0.745594 0.190469
1243 1 0.797241 0.755741 0.190604
1244 1 0.737754 0.810178 0.187659
1334 1 0.679792 0.688769 0.252573
1338 1 0.810997 0.684894 0.236617
1340 1 0.750692 0.685862 0.313213
1366 1 0.684486 0.821155 0.253374
1367 1 0.678987 0.752294 0.314271
1369 1 0.741025 0.754922 0.251487
1370 1 0.812403 0.815758 0.250417
1371 1 0.807115 0.754594 0.314835
1372 1 0.739 0.812274 0.311788
1348 1 -0.0005533 0.818543 0.306265
1216 1 0.87481 0.685943 0.184313
1247 1 0.9495 0.751865 0.191711
1248 1 0.879061 0.808614 0.184625
1342 1 0.929811 0.685651 0.247201
1373 1 0.868079 0.748519 0.2438
1374 1 0.932873 0.814753 0.242408
1375 1 0.941298 0.750545 0.293867
1376 1 0.870339 0.813334 0.309089
1344 1 0.8688 0.692982 0.305619
1251 1 0.0628911 0.872919 0.18631
1256 1 0.123714 0.937386 0.183578
1378 1 0.0600563 0.940736 0.245672
1379 1 0.0571404 0.881553 0.314627
1381 1 0.11808 0.869434 0.25664
1384 1 0.121835 0.94973 0.29822
1380 1 0.989983 0.93707 0.318727
1037 1 0.364839 0.504956 0.00470677
1570 1 0.0743476 0.698754 0.49429
1506 1 0.0628418 0.947092 0.376122
1427 1 0.564503 0.503744 0.441214
1255 1 0.192844 0.891013 0.195612
1260 1 0.254888 0.936001 0.199254
1382 1 0.184395 0.953601 0.24998
1383 1 0.194272 0.874958 0.306196
1385 1 0.257542 0.87888 0.255086
1386 1 0.318986 0.943215 0.261701
1387 1 0.308653 0.881234 0.32289
1388 1 0.248354 0.942484 0.309775
1259 1 0.325846 0.874205 0.187215
1527 1 0.682513 0.878743 0.437323
1263 1 0.434472 0.869067 0.180838
1264 1 0.374447 0.942209 0.185109
1389 1 0.380428 0.872098 0.251491
1390 1 0.440524 0.936713 0.245812
1391 1 0.435611 0.878588 0.310415
1392 1 0.376339 0.939058 0.31617
1393 1 0.497884 0.878028 0.245839
1396 1 0.493847 0.938057 0.315608
1530 1 0.808757 0.931992 0.369763
1268 1 0.501194 0.942646 0.184819
1267 1 0.558124 0.8841 0.194088
1272 1 0.618603 0.945142 0.189206
1394 1 0.557001 0.940706 0.252914
1395 1 0.557668 0.875501 0.313434
1397 1 0.62745 0.88727 0.25147
1400 1 0.622352 0.941148 0.320376
1271 1 0.684504 0.890287 0.190279
1275 1 0.812883 0.880113 0.187699
1276 1 0.748102 0.943257 0.181162
1398 1 0.679037 0.94834 0.250757
1399 1 0.677343 0.882749 0.3145
1401 1 0.752051 0.872164 0.246975
1402 1 0.812277 0.936438 0.255249
1403 1 0.816222 0.870421 0.312372
1404 1 0.745571 0.935589 0.314214
537 1 0.738172 0.997999 0.493782
1521 1 0.497194 0.877062 0.379644
129 1 0.988496 0.999008 0.124611
1252 1 0.996712 0.937402 0.185635
1377 1 0.992743 0.872723 0.24699
1279 1 0.928372 0.885651 0.187599
1280 1 0.861954 0.944158 0.195566
1405 1 0.869781 0.874384 0.246736
1406 1 0.92757 0.937798 0.256133
1407 1 0.93106 0.866937 0.303607
1408 1 0.878086 0.940659 0.319282
1423 1 0.439471 0.510393 0.43971
1410 1 0.0593114 0.571991 0.371723
1416 1 0.118284 0.56237 0.421069
1443 1 0.068024 0.62687 0.440063
1445 1 0.128818 0.629948 0.375934
1441 1 1.00004 0.632385 0.380039
1412 1 0.994161 0.565529 0.42305
1641 1 0.239064 0.885664 0.498748
1562 1 0.806014 0.564465 0.492997
1449 1 0.254187 0.622693 0.381187
1451 1 0.31666 0.637023 0.438726
1420 1 0.242775 0.555979 0.434123
1447 1 0.189321 0.621182 0.438081
1414 1 0.18702 0.566561 0.36365
1418 1 0.305456 0.558846 0.375816
1429 1 0.626771 0.502735 0.384475
1025 1 0.0116481 0.501746 0.0025856
1549 1 0.381136 0.504682 0.498944
1424 1 0.380127 0.566174 0.440325
1455 1 0.440051 0.628761 0.436842
1422 1 0.442782 0.555479 0.3735
1453 1 0.380697 0.631497 0.368801
1513 1 0.249 0.886853 0.377636
1428 1 0.499684 0.562704 0.439295
1457 1 0.497714 0.617553 0.365428
1461 1 0.614893 0.631821 0.381707
1426 1 0.563828 0.564632 0.371351
1432 1 0.627591 0.558517 0.45139
1459 1 0.560439 0.624704 0.44147
1606 1 0.18898 0.824763 0.495067
1510 1 0.186102 0.942335 0.374893
1430 1 0.682827 0.566985 0.376889
1463 1 0.68495 0.626314 0.43638
1436 1 0.750723 0.571205 0.430767
1434 1 0.812551 0.560272 0.369794
1465 1 0.741005 0.624165 0.374791
1467 1 0.812981 0.621896 0.424295
1525 1 0.619793 0.885257 0.37655
1519 1 0.4348 0.876507 0.437631
1528 1 0.620592 0.941948 0.453595
409 1 0.743151 1.00149 0.370157
1618 1 0.562948 0.805654 0.496955
1522 1 0.560084 0.933056 0.376313
1523 1 0.567135 0.878698 0.444339
1307 1 0.811691 0.499987 0.309272
1440 1 0.872636 0.560725 0.427356
1469 1 0.875205 0.62256 0.374743
1471 1 0.933586 0.629289 0.435385
1438 1 0.933898 0.559478 0.368707
1517 1 0.379843 0.883868 0.380607
1520 1 0.376357 0.939073 0.441256
275 1 0.558743 0.993523 0.321572
1448 1 0.133901 0.692978 0.428993
1480 1 0.133245 0.821985 0.438589
1475 1 0.0612961 0.767712 0.433284
1477 1 0.132258 0.747525 0.374197
1474 1 0.0750615 0.815547 0.373241
1442 1 0.0715881 0.696896 0.368128
1473 1 0.991506 0.754386 0.363594
1444 1 0.00467242 0.696828 0.434828
1476 1 0.00170583 0.819331 0.426739
1605 1 0.138957 0.754198 0.501922
23 1 0.684883 0.991911 0.064074
1581 1 0.377854 0.634776 0.496925
1446 1 0.194914 0.69013 0.36991
1482 1 0.310289 0.824739 0.381623
1479 1 0.193751 0.767323 0.439378
1481 1 0.254608 0.764286 0.375706
1483 1 0.316656 0.764359 0.442911
1450 1 0.321046 0.702708 0.386548
1484 1 0.247241 0.822785 0.436643
1478 1 0.189305 0.817295 0.371691
1452 1 0.249308 0.692026 0.429844
1488 1 0.377653 0.819791 0.446423
1487 1 0.448241 0.750344 0.434679
1486 1 0.443642 0.812811 0.381393
1454 1 0.443051 0.685575 0.368993
1485 1 0.378348 0.760019 0.377843
1456 1 0.380934 0.694083 0.441925
1492 1 0.502743 0.817485 0.434907
1524 1 0.497238 0.940085 0.438853
1518 1 0.440551 0.938182 0.376079
1515 1 0.31129 0.880995 0.446396
1460 1 0.503018 0.690783 0.434673
1489 1 0.504323 0.753478 0.377778
1493 1 0.619303 0.742957 0.383997
1496 1 0.627051 0.810891 0.435574
1458 1 0.55353 0.681835 0.376568
1464 1 0.626313 0.678907 0.445789
1491 1 0.565907 0.740074 0.435334
1490 1 0.566483 0.816036 0.375889
7 1 0.18233 0.99759 0.0676705
1661 1 0.871754 0.862182 0.498321
1468 1 0.757689 0.684557 0.433471
1494 1 0.679239 0.820086 0.374616
1462 1 0.678631 0.674814 0.374935
1500 1 0.74369 0.809473 0.434904
1497 1 0.735416 0.740749 0.377039
1495 1 0.684982 0.745403 0.438868
1466 1 0.812583 0.690302 0.362455
1499 1 0.80883 0.752418 0.434614
1498 1 0.800264 0.80984 0.375012
411 1 0.808924 0.991424 0.423366
541 1 0.867032 0.991984 0.499818
1470 1 0.938731 0.692411 0.379239
1472 1 0.87579 0.683671 0.441061
1502 1 0.934357 0.809788 0.368779
1501 1 0.880973 0.752683 0.374781
1504 1 0.866044 0.81445 0.427121
1503 1 0.941631 0.763038 0.432196
1565 1 0.870931 0.504616 0.485239
133 1 0.123651 0.995241 0.118709
265 1 0.25275 1.00158 0.257103
1566 1 0.941298 0.563869 0.487827
157 1 0.878357 0.996631 0.126229
1569 1 0.00851625 0.628704 0.497286
1613 1 0.386201 0.759037 0.500035
1411 1 0.0623519 0.50033 0.438144
1105 1 0.495534 0.7595 0.00153148
1069 1 0.366311 0.633344 0.0040036
1597 1 0.868569 0.620788 0.487702
1637 1 0.119868 0.87481 0.495638
1062 1 0.195628 0.700126 0.00328282
1574 1 0.197986 0.688434 0.49538
1662 1 0.933578 0.926204 0.49944
279 1 0.682251 0.998497 0.316257
1038 1 0.431032 0.565226 0.00351547
1609 1 0.260612 0.75008 0.49775
1573 1 0.121455 0.619351 0.496377
1090 1 0.0632996 0.811195 0.00562457
1030 1 0.188826 0.560964 0.00450386
1582 1 0.443137 0.691296 0.492981
1578 1 0.318504 0.700449 0.494459
1145 1 0.753989 0.871347 0.0115636
1122 1 0.0658472 0.946656 0.00477397
1577 1 0.250759 0.61543 0.496173
1149 1 0.87181 0.870867 0.00216349
1585 1 0.494378 0.620864 0.495195
1054 1 0.942117 0.569167 0.00108259
1642 1 0.321499 0.950352 0.500069
1561 1 0.755172 0.50551 0.496768
1077 1 0.612816 0.636218 0.0039648
1121 1 0.00901296 0.878398 0.0045925
1638 1 0.186984 0.946891 0.492806
1593 1 0.742015 0.62838 0.496865
1053 1 0.878629 0.504322 0.0106853
1070 1 0.434454 0.68477 0.00729365
1544 1 0.117365 0.564763 0.565718
1571 1 0.0607538 0.633836 0.554028
1666 1 0.0597668 0.558357 0.629197
1701 1 0.120495 0.625673 0.622309
1130 1 0.32138 0.943226 0.999682
1617 1 0.496114 0.753633 0.502425
539 1 0.799901 0.996553 0.555692
1805 1 0.376227 0.502108 0.759666
913 1 0.506586 0.997614 0.869351
1548 1 0.250445 0.564979 0.565709
1575 1 0.186502 0.62328 0.559828
1579 1 0.311595 0.636842 0.55328
1670 1 0.192498 0.569467 0.621088
1674 1 0.313364 0.577424 0.62313
1705 1 0.247178 0.627223 0.62322
535 1 0.689155 0.996455 0.566826
907 1 0.311634 1.00226 0.944072
1923 1 0.0690236 0.499849 0.945074
1552 1 0.378428 0.564364 0.553131
1583 1 0.438182 0.621202 0.559072
1678 1 0.433646 0.570047 0.625142
1709 1 0.37888 0.632759 0.628254
1713 1 0.508183 0.624206 0.62318
1933 1 0.375368 0.501188 0.880235
1556 1 0.499519 0.554072 0.561835
1560 1 0.630459 0.558607 0.57259
1587 1 0.566965 0.624874 0.562875
1682 1 0.563537 0.569537 0.628347
1717 1 0.624199 0.625627 0.629433
1689 1 0.754114 0.501779 0.631883
21 1 0.625541 0.991343 0.991901
1564 1 0.750574 0.5668 0.558165
1591 1 0.680833 0.621079 0.566592
1595 1 0.814289 0.628445 0.566971
1686 1 0.690163 0.558333 0.633624
1690 1 0.823725 0.569432 0.620796
1721 1 0.754888 0.612871 0.623594
1101 1 0.387087 0.755862 0.995543
1697 1 0.0118198 0.636868 0.624015
1540 1 0.00627776 0.570445 0.565105
1568 1 0.873636 0.564848 0.549628
1599 1 0.939378 0.623467 0.549946
1694 1 0.937267 0.571465 0.616451
1725 1 0.877353 0.630826 0.625026
781 1 0.373856 0.991 0.744044
775 1 0.188862 0.989035 0.810575
1576 1 0.132023 0.685893 0.550073
1603 1 0.0687134 0.757258 0.565444
1608 1 0.130649 0.814114 0.557001
1698 1 0.0711522 0.69268 0.623886
1730 1 0.0519441 0.819188 0.627644
1733 1 0.121088 0.757523 0.627438
1572 1 0.00168042 0.692501 0.558912
1729 1 0.996307 0.750608 0.612647
1604 1 0.00599888 0.821816 0.558075
1081 1 0.749456 0.625565 0.997817
1580 1 0.251296 0.691574 0.567165
1607 1 0.190521 0.760783 0.566189
1611 1 0.323002 0.748214 0.568272
1612 1 0.262175 0.806938 0.567916
1702 1 0.172964 0.688057 0.621801
1706 1 0.316668 0.683378 0.623007
1734 1 0.182446 0.810353 0.629224
1737 1 0.257619 0.755103 0.623948
1738 1 0.322303 0.806984 0.631251
1797 1 0.134043 0.505357 0.750265
655 1 0.441471 0.991448 0.679381
1683 1 0.569179 0.50262 0.678015
1584 1 0.387667 0.691597 0.562635
1615 1 0.443266 0.751899 0.558462
1616 1 0.374025 0.80687 0.561613
1710 1 0.438219 0.6827 0.632708
1741 1 0.394257 0.750998 0.623695
1742 1 0.441575 0.814699 0.61768
1588 1 0.499889 0.677936 0.555572
1745 1 0.511524 0.750939 0.635332
1620 1 0.503538 0.81649 0.554532
1714 1 0.558908 0.686681 0.63286
1592 1 0.619138 0.684372 0.576777
1619 1 0.553711 0.74765 0.568193
1624 1 0.629339 0.794853 0.564635
1746 1 0.571071 0.810769 0.625586
1749 1 0.619921 0.742508 0.635034
1795 1 0.0683146 0.505729 0.817566
1807 1 0.439782 0.508319 0.814185
1596 1 0.757036 0.678029 0.568162
1623 1 0.693311 0.741693 0.557393
1627 1 0.809707 0.7428 0.569344
1628 1 0.755124 0.801685 0.565053
1718 1 0.696087 0.67602 0.631694
1722 1 0.816068 0.687809 0.640747
1750 1 0.682338 0.809284 0.631691
1753 1 0.749353 0.742377 0.625977
1754 1 0.816617 0.8121 0.62337
663 1 0.684404 0.999124 0.678895
1545 1 0.249551 0.511721 0.503363
527 1 0.434748 0.996979 0.564599
1600 1 0.881331 0.684107 0.552783
1631 1 0.950726 0.747995 0.555816
1632 1 0.880404 0.792364 0.559268
1726 1 0.939089 0.687074 0.623821
1757 1 0.872433 0.747601 0.622803
1758 1 0.938671 0.810645 0.619243
909 1 0.378415 0.997517 0.879066
1118 1 0.947893 0.817147 0.99525
1635 1 0.0653635 0.88909 0.556566
1640 1 0.136223 0.947179 0.562546
1762 1 0.0608262 0.938907 0.624605
1765 1 0.126421 0.875152 0.622644
1761 1 0.997989 0.888772 0.624834
769 1 0.995274 0.994003 0.748859
1636 1 -0.000267055 0.936609 0.549225
647 1 0.183241 0.997666 0.687567
1695 1 0.944397 0.507494 0.692277
1639 1 0.19452 0.864337 0.569298
1643 1 0.311422 0.880653 0.55632
1644 1 0.25445 0.940466 0.563955
1766 1 0.191783 0.937122 0.619647
1769 1 0.255188 0.873726 0.628571
1770 1 0.313112 0.941958 0.636446
1929 1 0.263082 0.509152 0.888192
1089 1 0.00369592 0.748924 0.998552
1809 1 0.50115 0.501441 0.753304
1681 1 0.493828 0.508929 0.629726
1645 1 0.376117 0.885118 0.500398
1610 1 0.313287 0.819879 0.4973
1647 1 0.436982 0.878162 0.56583
1648 1 0.369089 0.941795 0.565211
1773 1 0.381016 0.869285 0.624958
1774 1 0.434961 0.933218 0.618828
1777 1 0.514558 0.871149 0.62473
1559 1 0.691308 0.502684 0.571832
1558 1 0.68689 0.558203 0.508424
1652 1 0.500552 0.927047 0.558365
1651 1 0.566963 0.870494 0.55678
1656 1 0.622287 0.9381 0.562955
1778 1 0.563261 0.932702 0.632551
1781 1 0.623715 0.874761 0.615306
2020 1 0.998136 0.93249 0.946167
2045 1 0.882115 0.879523 0.872993
1811 1 0.559864 0.504546 0.814102
1935 1 0.439356 0.5019 0.943121
777 1 0.251226 0.998578 0.756157
1655 1 0.690603 0.867184 0.562427
1659 1 0.804479 0.877097 0.562847
1660 1 0.742301 0.937599 0.565498
1782 1 0.68449 0.933446 0.630558
1785 1 0.745661 0.880867 0.622871
1786 1 0.81261 0.935945 0.622634
641 1 0.996263 0.988629 0.623421
1675 1 0.305825 0.500687 0.687884
1622 1 0.687315 0.808796 0.504991
1547 1 0.314804 0.511978 0.561866
773 1 0.123552 0.99774 0.75741
1663 1 0.925659 0.865324 0.560355
1664 1 0.866063 0.932211 0.557691
1789 1 0.874891 0.871986 0.625055
1790 1 0.934217 0.942473 0.623927
515 1 0.064742 0.990809 0.559189
899 1 0.0617481 0.992478 0.934584
1672 1 0.121874 0.559817 0.687698
1699 1 0.0667808 0.622855 0.686911
1794 1 0.0690116 0.561527 0.752866
1800 1 0.140158 0.562941 0.809025
1827 1 0.061283 0.627682 0.815833
1829 1 0.134213 0.617116 0.751023
1796 1 0.00625448 0.562965 0.807712
1825 1 0.0101491 0.624636 0.746183
1685 1 0.631375 0.50361 0.628973
1085 1 0.881438 0.630104 0.99375
1626 1 0.815982 0.811408 0.50177
1676 1 0.249957 0.572252 0.685231
1703 1 0.179877 0.630854 0.680127
1707 1 0.317997 0.631179 0.68848
1798 1 0.19698 0.56269 0.751547
1802 1 0.311625 0.573671 0.749752
1804 1 0.252964 0.564644 0.811784
1831 1 0.19469 0.631645 0.815855
1833 1 0.250069 0.632251 0.746887
1835 1 0.305571 0.629602 0.813815
2046 1 0.948228 0.937833 0.883591
905 1 0.250821 0.998733 0.87559
1680 1 0.372993 0.570283 0.688082
1711 1 0.444679 0.632788 0.697591
1806 1 0.440907 0.562008 0.750496
1808 1 0.373348 0.558692 0.818765
1837 1 0.381054 0.619783 0.750858
1839 1 0.435591 0.610416 0.81796
1812 1 0.497434 0.562089 0.825834
1106 1 0.567864 0.80969 0.995995
1841 1 0.504225 0.626003 0.762797
1684 1 0.502298 0.564194 0.69498
1653 1 0.626426 0.865011 0.502816
2047 1 0.933907 0.867862 0.930106
1688 1 0.628062 0.562432 0.69307
1715 1 0.558754 0.621653 0.70007
1810 1 0.562849 0.55668 0.756428
1816 1 0.635113 0.559295 0.803676
1843 1 0.564342 0.628641 0.824801
1845 1 0.613806 0.620028 0.759426
1049 1 0.751646 0.505848 0.995653
919 1 0.695794 0.997038 0.928604
793 1 0.751731 0.996782 0.743968
2048 1 0.87495 0.931921 0.936205
671 1 0.931629 0.996281 0.678543
1567 1 0.94008 0.516096 0.555311
1692 1 0.751975 0.563153 0.692285
1719 1 0.691287 0.619604 0.693965
1723 1 0.816542 0.619171 0.686149
1814 1 0.690153 0.554288 0.749897
1818 1 0.825053 0.564276 0.753932
1820 1 0.750337 0.557809 0.812269
1847 1 0.687534 0.622537 0.814955
1849 1 0.749028 0.615753 0.760289
1851 1 0.8146 0.620403 0.814987
1821 1 0.887921 0.509149 0.750126
1819 1 0.815611 0.502261 0.816156
1589 1 0.62783 0.629119 0.510549
1668 1 0.0107017 0.555185 0.684445
1696 1 0.887746 0.563863 0.684545
1727 1 0.94879 0.61122 0.68669
1822 1 0.940444 0.568559 0.758427
1824 1 0.879448 0.555082 0.817437
1853 1 0.884133 0.629858 0.755323
1855 1 0.942432 0.619894 0.820906
915 1 0.566458 0.999358 0.927098
1704 1 0.122797 0.6879 0.691249
1731 1 0.074062 0.749478 0.689337
1736 1 0.121369 0.819747 0.688963
1826 1 0.0635954 0.689118 0.747693
1832 1 0.120184 0.685088 0.810485
1858 1 0.062885 0.823623 0.745657
1859 1 0.061771 0.745433 0.816458
1861 1 0.124778 0.750743 0.750243
1864 1 0.126735 0.80437 0.823049
1857 1 0.00405109 0.756865 0.750765
1860 1 0.00148547 0.814645 0.810947
1700 1 0.00816868 0.689248 0.681307
1828 1 0.00208553 0.680257 0.809997
1862 1 0.188862 0.811479 0.760874
1740 1 0.254494 0.810147 0.68371
1708 1 0.257914 0.686626 0.68798
1735 1 0.186898 0.744438 0.687639
1739 1 0.313969 0.745826 0.692223
1830 1 0.195133 0.687243 0.744946
1834 1 0.306065 0.690951 0.758188
1836 1 0.250797 0.6954 0.818834
1863 1 0.186381 0.749762 0.813929
1865 1 0.25064 0.749033 0.75359
1866 1 0.311147 0.812042 0.739948
1867 1 0.31032 0.759955 0.813228
1868 1 0.245629 0.810629 0.811604
1744 1 0.386314 0.807617 0.685071
1712 1 0.380762 0.683505 0.699427
1743 1 0.441744 0.744572 0.702562
1838 1 0.441684 0.683537 0.76241
1840 1 0.370409 0.677397 0.811893
1869 1 0.373771 0.748301 0.749014
1870 1 0.436096 0.809647 0.75214
1871 1 0.447137 0.748919 0.817038
1872 1 0.374142 0.805161 0.813725
1716 1 0.50669 0.692026 0.695153
1876 1 0.511725 0.819109 0.823581
1748 1 0.513759 0.805389 0.69302
1844 1 0.498893 0.681377 0.820774
1873 1 0.504174 0.741272 0.764734
1720 1 0.619622 0.673743 0.691711
1747 1 0.569593 0.747808 0.705463
1752 1 0.618882 0.810699 0.698061
1842 1 0.563426 0.678592 0.758533
1848 1 0.62656 0.684678 0.813093
1874 1 0.564434 0.80994 0.75956
1875 1 0.569997 0.749692 0.818437
1877 1 0.636649 0.763562 0.755152
1880 1 0.628005 0.811714 0.817097
1724 1 0.752396 0.687659 0.692061
1751 1 0.684217 0.744751 0.690068
1755 1 0.813776 0.751287 0.683874
1756 1 0.745296 0.807072 0.677898
1846 1 0.67832 0.677916 0.7462
1850 1 0.81032 0.687261 0.753571
1852 1 0.748547 0.687826 0.807388
1878 1 0.694836 0.814721 0.746095
1879 1 0.687761 0.747451 0.815634
1881 1 0.749807 0.756097 0.740857
1882 1 0.816491 0.807044 0.757252
1883 1 0.81688 0.749219 0.811863
1884 1 0.752044 0.802506 0.805571
1732 1 0.00010023 0.806894 0.684898
1728 1 0.882227 0.685092 0.689245
1759 1 0.943321 0.750846 0.686643
1760 1 0.872113 0.804165 0.683458
1854 1 0.944831 0.687658 0.749951
1856 1 0.870787 0.678165 0.818789
1885 1 0.876372 0.744395 0.74326
1886 1 0.935673 0.813766 0.738774
1887 1 0.925094 0.751116 0.813933
1888 1 0.88972 0.824667 0.808924
1931 1 0.319767 0.503496 0.942309
1763 1 0.062022 0.87746 0.687691
1768 1 0.123538 0.932397 0.682748
1890 1 0.0570582 0.942902 0.747027
1891 1 0.0688268 0.871466 0.814956
1893 1 0.119682 0.877443 0.751067
1896 1 0.124039 0.939106 0.818814
1764 1 0.00560643 0.934365 0.689511
1889 1 0.997118 0.874484 0.755217
1892 1 0.00111534 0.934206 0.825172
1621 1 0.626607 0.735 0.499691
1947 1 0.817801 0.501628 0.939735
1767 1 0.194151 0.8673 0.696387
1771 1 0.309462 0.869246 0.688332
1772 1 0.246194 0.941583 0.693754
1894 1 0.179487 0.933368 0.748346
1895 1 0.180728 0.874747 0.811992
1897 1 0.247512 0.877582 0.750926
1898 1 0.312819 0.93475 0.748895
1899 1 0.310558 0.859064 0.812765
1900 1 0.263049 0.935468 0.819357
1775 1 0.442387 0.864554 0.681366
1776 1 0.371717 0.927681 0.688317
1901 1 0.375258 0.86948 0.748245
1902 1 0.453841 0.939449 0.747077
1903 1 0.44286 0.872322 0.814869
1904 1 0.396004 0.938088 0.801017
1905 1 0.495902 0.870015 0.75055
1908 1 0.503009 0.930584 0.812212
1780 1 0.49893 0.931095 0.679878
1541 1 0.114759 0.505733 0.504011
667 1 0.812751 0.997456 0.679115
1779 1 0.563722 0.881262 0.701796
1784 1 0.630153 0.93456 0.697679
1906 1 0.562976 0.944517 0.74669
1907 1 0.572023 0.882226 0.807575
1909 1 0.634571 0.865293 0.759034
1912 1 0.630294 0.938946 0.801772
787 1 0.555774 0.998952 0.799764
1911 1 0.696211 0.876378 0.814524
1783 1 0.68581 0.871497 0.688227
1787 1 0.808486 0.871672 0.679823
1788 1 0.749974 0.934036 0.687528
1910 1 0.692822 0.933372 0.747077
1913 1 0.755255 0.869399 0.750461
1914 1 0.813756 0.931836 0.746483
1915 1 0.821191 0.878079 0.807068
1916 1 0.750668 0.939549 0.808996
901 1 0.130932 0.989872 0.877467
665 1 0.751058 0.999222 0.61864
1925 1 0.129993 0.504924 0.887049
1791 1 0.934801 0.875427 0.687479
1792 1 0.868136 0.938344 0.681629
1917 1 0.875584 0.872701 0.740542
1918 1 0.932977 0.939959 0.74246
1919 1 0.943527 0.881191 0.816534
1920 1 0.871046 0.943096 0.814192
1590 1 0.690605 0.680965 0.507121
1922 1 0.0723241 0.561783 0.880817
1928 1 0.129359 0.568738 0.947931
1955 1 0.0750683 0.630869 0.936864
1957 1 0.136676 0.627999 0.875619
1150 1 0.930482 0.922361 0.995052
771 1 0.0585822 0.996878 0.828185
1114 1 0.819189 0.811662 1.00025
645 1 0.12744 0.999782 0.620905
1930 1 0.311405 0.571676 0.872204
1926 1 0.192516 0.561388 0.885214
1963 1 0.318251 0.62244 0.937971
1961 1 0.24674 0.623457 0.878098
1932 1 0.256205 0.570637 0.941882
1959 1 0.185862 0.623127 0.946872
2034 1 0.563049 0.932594 0.870842
1066 1 0.303285 0.679528 1.001
1965 1 0.374702 0.625733 0.873942
1934 1 0.437674 0.564139 0.883237
1936 1 0.375494 0.565033 0.941494
1967 1 0.4415 0.622969 0.947298
1969 1 0.500462 0.62869 0.880905
1082 1 0.813069 0.684871 0.995164
13 1 0.380195 0.99222 0.991026
2035 1 0.571382 0.876822 0.938606
1102 1 0.439052 0.817036 0.990257
2039 1 0.676799 0.876878 0.945154
1940 1 0.505192 0.567618 0.935929
1938 1 0.562719 0.558001 0.882562
1944 1 0.623026 0.559916 0.940914
1971 1 0.566483 0.626019 0.936229
1973 1 0.623499 0.609693 0.870559
1646 1 0.437173 0.939254 0.500203
657 1 0.49544 0.997812 0.619007
791 1 0.689555 0.998722 0.803598
2042 1 0.815815 0.932565 0.870591
1942 1 0.686436 0.559792 0.876538
1946 1 0.822213 0.561197 0.871365
1977 1 0.753564 0.622699 0.870634
1979 1 0.810395 0.617131 0.92901
1948 1 0.746853 0.566083 0.931023
1975 1 0.691023 0.62692 0.931906
1949 1 0.879105 0.50011 0.873942
1654 1 0.682899 0.93963 0.504364
1650 1 0.557599 0.946045 0.505276
1602 1 0.0647622 0.813027 0.506327
1924 1 0.013649 0.559237 0.935838
1953 1 0.00172223 0.614633 0.879386
1950 1 0.9387 0.560356 0.886513
1981 1 0.881592 0.623036 0.88251
1983 1 0.947504 0.626062 0.937341
1952 1 0.873729 0.564508 0.94248
1554 1 0.569628 0.559775 0.51168
923 1 0.808389 0.995236 0.940876
1960 1 0.131131 0.692544 0.946432
1989 1 0.132578 0.746047 0.878663
1954 1 0.0649695 0.686575 0.881688
1987 1 0.0707553 0.752804 0.934198
1992 1 0.125049 0.812251 0.947387
1986 1 0.0616538 0.81468 0.876346
1793 1 0.00219982 0.506279 0.751255
2040 1 0.625047 0.94004 0.929726
1995 1 0.320599 0.745274 0.94704
1958 1 0.19198 0.686922 0.876126
1994 1 0.314868 0.813245 0.881401
1991 1 0.193126 0.745229 0.942419
1990 1 0.190613 0.822843 0.879456
1993 1 0.258105 0.75253 0.88051
1964 1 0.255661 0.69013 0.935411
1962 1 0.311469 0.686226 0.874982
1996 1 0.248487 0.809279 0.940541
1998 1 0.431709 0.82221 0.876852
1966 1 0.435307 0.681628 0.860965
1968 1 0.37917 0.682212 0.939992
1999 1 0.43292 0.753513 0.917017
2000 1 0.379943 0.818181 0.936075
1997 1 0.377573 0.739194 0.86489
2004 1 0.504009 0.814045 0.929932
1972 1 0.496554 0.696921 0.943134
2001 1 0.505298 0.746784 0.876117
1976 1 0.635433 0.683804 0.936888
2005 1 0.631328 0.755073 0.879038
2003 1 0.560293 0.750657 0.935921
2008 1 0.631827 0.809978 0.937044
2002 1 0.568501 0.823113 0.880059
1970 1 0.570699 0.687531 0.883037
1625 1 0.751407 0.745274 0.499958
2007 1 0.687706 0.748828 0.934752
1974 1 0.693388 0.686529 0.875273
2009 1 0.753608 0.747305 0.878777
1978 1 0.813551 0.685856 0.870691
2006 1 0.690871 0.812692 0.871241
2011 1 0.80726 0.747819 0.932827
1980 1 0.747107 0.680304 0.934923
2010 1 0.808082 0.807058 0.869307
2012 1 0.749731 0.812109 0.936475
2038 1 0.693132 0.932595 0.87831
2016 1 0.873844 0.809443 0.937322
1982 1 0.942294 0.687019 0.872982
1984 1 0.870032 0.687954 0.932589
2015 1 0.938703 0.757411 0.939319
1985 1 0.00814689 0.74154 0.866171
2014 1 0.944204 0.804233 0.875985
2013 1 0.872679 0.749058 0.876629
1988 1 0.00830551 0.797113 0.934812
1956 1 0.00408883 0.68784 0.93343
653 1 0.375156 0.998548 0.626808
1941 1 0.628262 0.504356 0.869765
2030 1 0.432706 0.939562 0.872485
2036 1 0.495994 0.945278 0.93662
2032 1 0.372965 0.936987 0.930137
2021 1 0.136323 0.882252 0.886013
2018 1 0.0672623 0.930517 0.881535
2019 1 0.0714161 0.86351 0.939304
2024 1 0.127771 0.936813 0.94665
2017 1 0.994539 0.869848 0.879574
2044 1 0.750796 0.936004 0.937592
2029 1 0.374874 0.882301 0.865492
2043 1 0.807953 0.865233 0.92826
2037 1 0.629204 0.875219 0.877811
2026 1 0.310767 0.937104 0.880998
2023 1 0.184541 0.875215 0.943298
2025 1 0.254034 0.873924 0.879325
2022 1 0.19933 0.938089 0.882525
2028 1 0.252443 0.938424 0.94403
2027 1 0.311894 0.873393 0.939076
2033 1 0.502912 0.877257 0.882766
2041 1 0.754998 0.871828 0.869649
2031 1 0.433706 0.88008 0.933393
1133 1 0.376666 0.880639 0.994421
1586 1 0.558015 0.690761 0.500631
1823 1 0.942698 0.50141 0.821287
1937 1 0.504469 0.500033 0.882354
661 1 0.620089 0.991382 0.625478
1921 1 0.0145332 0.503372 0.877567
1799 1 0.198215 0.500757 0.819554
921 1 0.74905 0.98946 0.870167
1086 1 0.943692 0.683703 0.992984
1614 1 0.440617 0.81903 0.501631
1109 1 0.623178 0.746867 0.995033
1113 1 0.751595 0.756465 0.99521
1046 1 0.690069 0.564183 0.990447
1669 1 0.13407 0.507688 0.626742
1671 1 0.193311 0.506956 0.688238
1817 1 0.763635 0.506688 0.752941
1058 1 0.0679213 0.693432 0.993918
1098 1 0.320164 0.813797 0.995936
1034 1 0.310286 0.567838 0.997497
1074 1 0.555948 0.694886 0.997242
1138 1 0.553895 0.94636 0.998468
1630 1 0.945674 0.811588 0.506818
1050 1 0.814907 0.569474 0.996052
1142 1 0.697717 0.941403 0.993081
1057 1 0.0110402 0.61793 0.995355
1146 1 0.814856 0.926025 0.99801
1550 1 0.445715 0.557917 0.506874
1117 1 0.8825 0.747862 0.998005
1546 1 0.317883 0.56956 0.500444
1594 1 0.8116 0.689156 0.504245
1134 1 0.447604 0.931865 0.999193
1658 1 0.797676 0.930274 0.505095
|
[
"scheuclu@gmail.com"
] |
scheuclu@gmail.com
|
295338183b59fe88a08317b8e639fd6a5734f638
|
1ee4c8d3208d1b51a72d30e4732a9b2082da605c
|
/sao_portal/asgi.py
|
42ad8861fc2ad5d0afd93f540fdc60c77c34b824
|
[] |
no_license
|
abhiram-g/SAO_service_dashboard
|
8336f52a9968019102884e24edc735e8e4f38bc6
|
4d2cde4cefe6c10bc644223981b67755cf6c1145
|
refs/heads/master
| 2022-10-15T10:23:30.537956
| 2020-06-08T12:43:51
| 2020-06-08T12:43:51
| 270,624,725
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
ASGI config for sao_portal project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sao_portal.settings')
application = get_asgi_application()
|
[
"abc@gmail.com"
] |
abc@gmail.com
|
b71f4f4c0c82b54bf051e4b6b83878612d3b30c1
|
dc9f2638209a9be235a1c4acc44fe2a26256c4b4
|
/venv/projects/lib/python3.8/site-packages/pip/_vendor/msgpack/_version.py
|
7f0f77b35e6f3f520b75e0ff6182498615a30fa0
|
[] |
no_license
|
alwinruby/RealWorld
|
4f5fcaed68fdd2d9fc37f5973fec365195cb3e9e
|
ec446f96f3545cb847429b5e33cefdc4f00ce432
|
refs/heads/main
| 2023-08-13T10:28:40.528047
| 2021-10-10T14:58:23
| 2021-10-10T14:58:23
| 408,079,742
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20
|
py
|
version = (1, 0, 2)
|
[
"alwinsolanky@gmail.com"
] |
alwinsolanky@gmail.com
|
c89fddca810ff0a6a6816e86a3546737492b1e5e
|
56b3f373de5189f128d84ea191cc5dfc6f88636a
|
/TrackProblems/trackproblems/controllers/__init__.py
|
32c45f64e71509d5257896d8ff5c8bf2795962c0
|
[
"Apache-2.0"
] |
permissive
|
tongpa/TrackProblems
|
0e301d01735ebc523de18553d670207b9a75427e
|
41e827a21907f0e9e4cc036ec0f96ab379b94544
|
refs/heads/master
| 2021-01-02T08:53:47.648786
| 2017-09-11T09:56:39
| 2017-09-11T09:56:39
| 99,089,555
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 77
|
py
|
# -*- coding: utf-8 -*-
"""Controllers for the TrackProblems application."""
|
[
"tong_pa@hotmail.com"
] |
tong_pa@hotmail.com
|
2adf1b16dc0fe58417825d349d3e29ccf10e3135
|
a247e3a40bca426f604ee057319ae3f7fce5c22f
|
/django1/venv/bin/django-admin
|
8279c65ae89eb5715eb0a7f394b21f42bacec363
|
[
"MIT"
] |
permissive
|
stephenndele/django-1
|
fcb5cd2a8598b5d68855814fb588a231e06efc09
|
11be0289bc3b6b3234b1e34979f282bd06cbce2e
|
refs/heads/main
| 2023-03-26T14:55:11.769279
| 2021-03-25T12:12:55
| 2021-03-25T12:12:55
| 349,335,362
| 0
| 2
| null | 2021-03-25T12:12:56
| 2021-03-19T07:22:05
|
Python
|
UTF-8
|
Python
| false
| false
| 345
|
#!/home/moringa/Desktop/moringa-school-projects/core/Django/django-playlist/django1/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
"stephenndele09346@gmail.com"
] |
stephenndele09346@gmail.com
|
|
d7833d20c9de724dea0ff27dce90bb80523ae797
|
a22cc323b29f50da397d8363ac2521e3542a0fd7
|
/tests/dpaycli/test_witness.py
|
a4b82ed9882df3b715a284b0fdf967a5516a4db1
|
[
"MIT"
] |
permissive
|
dpays/dpay-cli
|
1a58c7dae45218e3b05b7e17ff5ce03e918d27b9
|
dfa80898e1faea2cee92ebec6fe04873381bd40f
|
refs/heads/master
| 2020-04-01T09:26:43.200933
| 2018-10-15T08:03:06
| 2018-10-15T08:03:06
| 153,075,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,801
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import super
import unittest
from parameterized import parameterized
from pprint import pprint
from dpaycli import DPay
from dpaycli.witness import Witness, Witnesses, WitnessesVotedByAccount, WitnessesRankedByVote
from dpaycli.instance import set_shared_dpay_instance
from dpaycli.nodelist import NodeList
wif = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
class Testcases(unittest.TestCase):
@classmethod
def setUpClass(cls):
nodelist = NodeList()
nodelist.update_nodes(dpay_instance=DPay(node=nodelist.get_nodes(normal=True, appbase=True), num_retries=10))
cls.bts = DPay(
node=nodelist.get_nodes(),
nobroadcast=True,
unsigned=True,
keys={"active": wif},
num_retries=10
)
cls.testnet = DPay(
# node="https://testnet.timcliff.com",
node=nodelist.get_nodes(),
nobroadcast=True,
unsigned=True,
keys={"active": wif},
num_retries=10
)
# from getpass import getpass
# self.bts.wallet.unlock(getpass())
set_shared_dpay_instance(cls.bts)
cls.bts.set_default_account("test")
@parameterized.expand([
("normal"),
("testnet"),
])
def test_feed_publish(self, node_param):
if node_param == "normal":
bts = self.bts
else:
bts = self.testnet
bts.txbuffer.clear()
w = Witness("gtg", dpay_instance=bts)
tx = w.feed_publish("4 BBD", "1 BEX")
self.assertEqual(
(tx["operations"][0][0]),
"feed_publish"
)
op = tx["operations"][0][1]
self.assertIn(
"gtg",
op["publisher"])
@parameterized.expand([
("normal"),
("testnet"),
])
def test_update(self, node_param):
if node_param == "normal":
bts = self.bts
else:
bts = self.testnet
bts.txbuffer.clear()
w = Witness("gtg", dpay_instance=bts)
props = {"account_creation_fee": "0.1 BEX",
"maximum_block_size": 32000,
"bbd_interest_rate": 0}
tx = w.update(wif, "", props)
self.assertEqual((tx["operations"][0][0]), "witness_update")
op = tx["operations"][0][1]
self.assertIn(
"gtg",
op["owner"])
@parameterized.expand([
("normal"),
("testnet"),
])
def test_witnesses(self, node_param):
if node_param == "normal":
bts = self.bts
else:
bts = self.testnet
w = Witnesses(dpay_instance=bts)
w.printAsTable()
self.assertTrue(len(w) > 0)
self.assertTrue(isinstance(w[0], Witness))
@parameterized.expand([
("normal"),
("testnet"),
])
def test_WitnessesVotedByAccount(self, node_param):
if node_param == "normal":
bts = self.bts
else:
bts = self.testnet
w = WitnessesVotedByAccount("gtg", dpay_instance=bts)
w.printAsTable()
self.assertTrue(len(w) > 0)
self.assertTrue(isinstance(w[0], Witness))
@parameterized.expand([
("normal"),
("testnet"),
])
def test_WitnessesRankedByVote(self, node_param):
if node_param == "normal":
bts = self.bts
else:
bts = self.testnet
w = WitnessesRankedByVote(dpay_instance=bts)
w.printAsTable()
self.assertTrue(len(w) > 0)
self.assertTrue(isinstance(w[0], Witness))
@parameterized.expand([
("normal"),
("testnet"),
])
def test_export(self, node_param):
if node_param == "normal":
bts = self.bts
else:
bts = self.testnet
owner = "gtg"
if bts.rpc.get_use_appbase():
witness = bts.rpc.find_witnesses({'owners': [owner]}, api="database")['witnesses']
if len(witness) > 0:
witness = witness[0]
else:
witness = bts.rpc.get_witness_by_account(owner)
w = Witness(owner, dpay_instance=bts)
keys = list(witness.keys())
json_witness = w.json()
exclude_list = ['votes', 'virtual_last_update', 'virtual_scheduled_time']
for k in keys:
if k not in exclude_list:
if isinstance(witness[k], dict) and isinstance(json_witness[k], list):
self.assertEqual(list(witness[k].values()), json_witness[k])
else:
self.assertEqual(witness[k], json_witness[k])
|
[
"jaredricelegal@gmail.com"
] |
jaredricelegal@gmail.com
|
4b3bcb583dfe4073fbaf60da96a44bc417c9ff61
|
1ddbe64e41ba648bb60a8758be6764e19b1c418a
|
/trunk/tygame-hall5-py/src/hall5/plugins/hallitem/_private/_actions/open.py
|
722642fc9426119f820446f54c160267601f47ce
|
[] |
no_license
|
zhaozw/freetime5
|
9bc3d0671a594822cc82e04b69c8016b7afd0554
|
99c47ad235583e765c35627ba34d4f496ccccbe4
|
refs/heads/master
| 2020-03-08T04:09:15.293616
| 2020-02-11T06:06:29
| 2020-02-11T06:06:29
| 127,913,013
| 0
| 0
| null | 2018-04-03T13:31:36
| 2018-04-03T13:31:35
| null |
UTF-8
|
Python
| false
| false
| 8,612
|
py
|
# -*- coding=utf-8 -*-
"""
@file : itemaction
@date : 2016-09-22
@author: GongXiaobo
"""
from hall5.plugins.hallitem._private._actions import _action
from hall5.plugins.hallitem._private._items.box import TYBoxItem
from tuyoo5.core.typlugin import pluginCross
from tuyoo5.game import tycontent
from tuyoo5.game._private._tycontent import TYContentItem, TYEmptyContent
from tuyoo5.plugins.item import assetutils, items
from tuyoo5.plugins.item.itemexceptions import TYItemConfException
class TYItemActionBoxOpenResult(items.TYItemActionResult):
def __init__(self, action, item, message, gotAssetList, todotask):
super(TYItemActionBoxOpenResult, self).__init__(action, item, 0, message, todotask)
self.gotAssetList = gotAssetList
class _TYItemBindings(object):
def __init__(self, items, params):
self.items = items
self.params = params
def getParam(self, paramName, defVal=None):
return self.params.get(paramName, defVal)
@property
def failure(self):
return self.getParam('failure', '')
@classmethod
def decodeFromDict(cls, d):
params = d.get('params', {})
if not isinstance(params, dict):
raise TYItemConfException(d, 'TYItemBindings.params must be dict')
items = TYContentItem.decodeList(d.get('items', []))
return cls(items, params)
# 处理items
def consume(self, gameId, item, userAssets, timestamp, eventId, intEventParam):
for contentItem in self.items:
assetKind, consumeCount, final = userAssets.consumeAsset(gameId,
contentItem.assetKindId,
contentItem.count,
timestamp,
eventId,
intEventParam)
if consumeCount == contentItem.count:
return True, (assetKind, consumeCount, final)
return False, None
class TYItemActionBoxOpen(_action.HallItemAction):
TYPE_ID = 'common.box.open'
def __init__(self):
super(TYItemActionBoxOpen, self).__init__()
self.itemBindings = None
self.contentList = None
self.nextItemKindId = None
self.nextItemKind = None
def _decodeFromDictImpl(self, d):
bindings = d.get('bindings')
if bindings:
self.itemBindings = _TYItemBindings.decodeFromDict(bindings)
self.contentList = self._decodeContents(d)
self.nextItemKindId = d.get('nextItemKindId')
if self.nextItemKindId is not None and not isinstance(self.nextItemKindId, int):
raise TYItemConfException(d, 'TYItemActionBoxOpen.nextItemKindId must be int')
def _decodeContents(self, d):
'''
从d中解析数据
'''
contentList = []
contents = d.get('contents')
if not isinstance(contents, list) or not contents:
raise TYItemConfException(d, 'TYItemActionBoxOpen.contents must be not empty list')
for contentConf in contents:
openTimes = contentConf.get('openTimes', {'start': 0, 'stop': -1})
if not isinstance(openTimes, dict):
raise TYItemConfException(contentConf, 'TYItemActionBoxOpen.openTimes must be dict')
startTimes = openTimes.get('start')
stopTimes = openTimes.get('stop')
if (not isinstance(startTimes, int)
or not isinstance(stopTimes, int)):
raise TYItemConfException(openTimes, 'TYItemActionBoxOpen.openTimes.start end must be int')
if 0 <= stopTimes < startTimes:
raise TYItemConfException(openTimes, 'TYItemActionBoxOpen.openTimes.stop must ge start')
content = tycontent.decodeFromDict(contentConf)
contentList.append((startTimes, stopTimes, content))
return contentList
def _initWhenLoaded(self, itemKind, itemKindMap, assetKindMap):
if self.nextItemKindId:
nextItemKind = itemKindMap.get(self.nextItemKindId)
if not nextItemKind:
raise TYItemConfException(self.conf, 'TYItemActionBoxOpen._initWhenLoad unknown nextItemKind %s' % (
self.nextItemKindId))
self.nextItemKind = nextItemKind
def canDo(self, gameId, clientId, userBag, item, timestamp):
return not item.isDied(timestamp)
def doAction(self, gameId, clientId, userAssets, item, timestamp, params):
assert (isinstance(item, TYBoxItem))
userBag = userAssets.getUserBag()
if item.isDied(timestamp):
return items.TYItemActionResult(None, None, -30, '道具已经过期', None)
if self.itemBindings:
ok, _assetTuple = self.itemBindings.consume(gameId,
item,
userAssets,
timestamp,
'ITEM_USE',
item.kindId)
if not ok:
return _action._makeTodoWithPayOrder(self.itemBindings,
gameId,
userAssets.userId,
clientId)
if not item.itemKind.singleMode:
# 互斥型道具打开时直接删除
userBag.removeItem(gameId, item, timestamp, 'ITEM_USE', item.kindId)
else:
# 保存item
item.openTimes += 1
item.original = 0
userBag.consumeItemUnits(gameId, item, 1, timestamp, 'ITEM_USE', item.kindId)
sendItems = self._getContent(item).getItems()
assetItemList = userAssets.sendContentItemList(gameId,
sendItems,
1,
True,
timestamp,
'ITEM_USE',
item.kindId)
# 如果需要生成下一个道具
if self.nextItemKind:
userBag.addItemUnitsByKind(gameId,
self.nextItemKind,
1,
timestamp,
0,
'ITEM_USE',
item.kindId)
# 生成打开生成的列表
rewardsList = []
for assetItemTuple in assetItemList:
'''
0 - assetItem
1 - count
2 - final
'''
assetItem = assetItemTuple[0]
reward = {'name': assetItem.displayName, 'pic': assetItem.pic, 'count': assetItemTuple[1],
'kindId': assetItem.kindId}
rewardsList.append(reward)
rewardTodotask = pluginCross.halltodotask.makeTodoTaskShowRewards(rewardsList)
# 提示文案
gotContent = assetutils.buildContentsString(assetItemList)
# 提示消息替换参数
replaceParams = {'item': item.itemKind.displayName, 'gotContent': gotContent}
_mail, message, _changed = _action._handleMailAndMessageAndChanged(gameId,
userAssets,
self,
assetItemList,
replaceParams)
# TGHall.getEventBus().publishEvent(TYOpenItemEvent(gameId, userBag.userId, item, assetItemList))
return TYItemActionBoxOpenResult(self, item, message, assetItemList, rewardTodotask)
def _getContent(self, item):
if self.contentList:
openTimes = max(item.openTimes - 1, 0)
for startTimes, stopTimes, content in self.contentList:
if (startTimes < 0 or openTimes >= startTimes) and (stopTimes < 0 or openTimes <= stopTimes):
return content
return TYEmptyContent()
|
[
"tuyoo@tuyoodeMac-mini-8.local"
] |
tuyoo@tuyoodeMac-mini-8.local
|
baaf7396d7d64ca02b696064862bf5652b225a14
|
568ed7fdc9ccbd7967dd2950669c68002b454869
|
/yotta/test/cli/test.py
|
ccec43116468a2790ebad484c3f8dcd52ce643de
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
headlessme/yotta
|
ade06c41108dca045e295bd2e0fdb2b7baef8c89
|
947ab074b629c8f18ca91ab84ebaa29096b011c6
|
refs/heads/master
| 2021-01-17T11:10:07.569198
| 2015-12-08T11:45:12
| 2015-12-08T11:45:12
| 27,595,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,592
|
py
|
#!/usr/bin/env python
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import unittest
import copy
# internal modules:
from yotta.lib.detect import systemDefaultTarget
from . import cli
from . import util
Test_Tests = {
'module.json':'''{
"name": "test-tests",
"version": "0.0.0",
"description": "Test yotta's compilation of tests.",
"author": "James Crosby <james.crosby@arm.com>",
"licenses": [
{
"url": "https://spdx.org/licenses/Apache-2.0",
"type": "Apache-2.0"
}
]
}''',
'source/foo.c':'''#include "stdio.h"
int foo(){
printf("foo!\\n");
return 7;
}''',
'test-tests/foo.h':'int foo();',
'test/a/bar.c':'#include "test-tests/foo.h"\nint main(){ foo(); return 0; }',
'test/b/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/b/b/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }',
'test/c/a/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/c/b/a/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }',
'test/d/a/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/d/a/b/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }',
'test/e/a/a/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/e/b/a/a/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }',
'test/f/a/a/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/f/a/b/a/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }',
'test/g/a/a/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/g/a/a/b/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }'
}
Test_Fitler_Pass = copy.copy(Test_Tests)
Test_Fitler_Pass['module.json'] = '''{
"name": "test-tests",
"version": "0.0.0",
"licenses": [
{
"url": "https://spdx.org/licenses/Apache-2.0",
"type": "Apache-2.0"
}
],
"scripts": {
"testReporter": [
"grep",
"!"
]
}
}'''
Test_Fitler_Fail = copy.copy(Test_Tests)
Test_Fitler_Fail['module.json'] = '''{
"name": "test-tests",
"version": "0.0.0",
"licenses": [
{
"url": "https://spdx.org/licenses/Apache-2.0",
"type": "Apache-2.0"
}
],
"scripts": {
"testReporter": [
"grep",
"string that isnt in the output"
]
}
}'''
Test_Fitler_NotFound = copy.copy(Test_Tests)
Test_Fitler_NotFound['module.json'] = '''{
"name": "test-tests",
"version": "0.0.0",
"licenses": [
{
"url": "https://spdx.org/licenses/Apache-2.0",
"type": "Apache-2.0"
}
],
"scripts": {
"testReporter": [
"commandthatshouldntexist"
]
}
}'''
class TestCLITest(unittest.TestCase):
@unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet")
def test_tests(self):
test_dir = util.writeTestFiles(Test_Tests, True)
output = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir)
output = self.runCheckCommand(['--target', systemDefaultTarget(), 'test'], test_dir)
self.assertIn('test-a passed', output)
self.assertIn('test-c passed', output)
self.assertIn('test-d passed', output)
self.assertIn('test-e passed', output)
self.assertIn('test-f passed', output)
self.assertIn('test-g passed', output)
util.rmRf(test_dir)
@unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet")
def test_testOutputFilterPassing(self):
test_dir = util.writeTestFiles(Test_Fitler_Pass, True)
stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'test'], test_dir)
util.rmRf(test_dir)
@unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet")
def test_testOutputFilterFailing(self):
test_dir = util.writeTestFiles(Test_Fitler_Fail, True)
stdout, stderr, statuscode = cli.run(['--target', systemDefaultTarget(), 'test'], cwd=test_dir)
if statuscode == 0:
print(stdout)
print(stderr)
self.assertIn('test-a failed', '%s %s' % (stdout, stderr))
self.assertIn('test-c failed', '%s %s' % (stdout, stderr))
self.assertIn('test-d failed', '%s %s' % (stdout, stderr))
self.assertIn('test-e failed', '%s %s' % (stdout, stderr))
self.assertIn('test-f failed', '%s %s' % (stdout, stderr))
self.assertIn('test-g failed', '%s %s' % (stdout, stderr))
self.assertNotEqual(statuscode, 0)
util.rmRf(test_dir)
@unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet")
def test_testOutputFilterNotFound(self):
test_dir = util.writeTestFiles(Test_Fitler_NotFound, True)
stdout, stderr, statuscode = cli.run(['--target', systemDefaultTarget(), 'test'], cwd=test_dir)
if statuscode == 0:
print(stdout)
print(stderr)
self.assertNotEqual(statuscode, 0)
util.rmRf(test_dir)
def runCheckCommand(self, args, test_dir):
stdout, stderr, statuscode = cli.run(args, cwd=test_dir)
if statuscode != 0:
print('command failed with status %s' % statuscode)
print(stdout)
print(stderr)
self.assertEqual(statuscode, 0)
return '%s %s' % (stdout, stderr)
|
[
"James.Crosby@arm.com"
] |
James.Crosby@arm.com
|
f1516933ea445803defec8a1fa0c6335c45eb5e6
|
491d3ad04c852d2efe3e49842ccfcd20e40eab96
|
/mysite/blog/admin.py
|
6f0dd47e26f5ddf14bfd772d3edc6b2cfbd7becd
|
[] |
no_license
|
marianwitkowski/python-24082019
|
746c9824c15c2072caceeac8a9b610d79c63f0f6
|
df849d09aa7d9b7a08d8276a9c2b557d9f9d7ce7
|
refs/heads/master
| 2020-07-07T03:55:55.398961
| 2019-09-29T16:12:40
| 2019-09-29T16:12:40
| 203,239,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 337
|
py
|
from django.contrib import admin
from .models import Post
# Register your models here.
class PostAdmin(admin.ModelAdmin):
list_display = ('title', 'slug', 'status','created_on')
list_filter = ("status",)
search_fields = ['title', 'content']
prepopulated_fields = {'slug': ('title',)}
admin.site.register(Post)
|
[
"marian.witkowski@gmail.com"
] |
marian.witkowski@gmail.com
|
9f70e24acb6247d89104f02908ac2638143ee173
|
ca4910e944cca453050299cb6c8e856c06a76fb0
|
/blog/settings.py
|
1175ab8bc4cd442f1245d312eacf024ca32835cc
|
[] |
no_license
|
SonerArslan2019/djangoRESTAPI
|
f8e33cd8570f86f14810ef8fabea918503e0fc90
|
5398b578524fc5c6eb3b7ed51db68bc4f3687221
|
refs/heads/master
| 2023-04-18T01:45:45.093105
| 2021-04-24T09:50:22
| 2021-04-24T09:50:22
| 360,634,992
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,603
|
py
|
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-$eiq=w_$+n^n#iy6c45zc0hsni!wjycxipc!4yrx+zq+!(js43'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# REST_FRAMEWORK = {
# 'DEFAULT_AUTHENTICATION_CLASSES': [
# 'rest_framework_simplejwt.authentication.JWTAuthentication',
# 'rest_framework.authentication.SessionAuthentication'
# ],
# 'DEFAULT_THROTTLE_CLASSES': (
# 'rest_framework.throttling.ScopedRateThrottle',
# ),
# 'DEFAULT_THROTTLE_RATES': {
# 'registerthrottle': '15/hour',
# # 'hasan' : '5/hour'
# }
# }
# SIMPLE_JWT = {
# 'ACCESS_TOKEN_LIFETIME': timedelta(minutes=15)
# }
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'post',
'comment',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
# AUTH_PASSWORD_VALIDATORS = [
# {
# 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
# },
# ]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
|
[
"soner@arslanyapi.com.tr"
] |
soner@arslanyapi.com.tr
|
5bcdd778d1e6f731488beb83daa9a83c0f4996a2
|
b8e29b6e957b0a55571f7cffc4357666a43fb56e
|
/mxnet/insightface/insightface/src/symbols/fdpn.py
|
0544f8347e0d24662ee3f007fe9468e1d3a22ddd
|
[
"MIT",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] |
permissive
|
aliyun/alibabacloud-aiacc-demo
|
b9bbe565021757ecaea0e7d7209632cbdb5cc8ab
|
2e49deeb38d12d4af4c5e50bb15d731c4bbf4cf1
|
refs/heads/master
| 2023-05-14T08:09:33.067050
| 2023-05-04T08:19:51
| 2023-05-04T08:19:51
| 228,604,743
| 38
| 14
|
Apache-2.0
| 2022-06-22T02:41:01
| 2019-12-17T11:46:44
|
Python
|
UTF-8
|
Python
| false
| false
| 9,635
|
py
|
import mxnet as mx
import symbol_utils
bn_momentum = 0.9
def BK(data):
return mx.symbol.BlockGrad(data=data)
# - - - - - - - - - - - - - - - - - - - - - - -
# Fundamental Elements
def BN(data, fix_gamma=False, momentum=bn_momentum, name=None):
bn = mx.symbol.BatchNorm( data=data, fix_gamma=fix_gamma, momentum=bn_momentum, name=('%s__bn'%name))
return bn
def AC(data, act_type='relu', name=None):
act = mx.symbol.Activation(data=data, act_type=act_type, name=('%s__%s' % (name, act_type)))
return act
def BN_AC(data, momentum=bn_momentum, name=None):
bn = BN(data=data, name=name, fix_gamma=False, momentum=momentum)
bn_ac = AC(data=bn, name=name)
return bn_ac
def Conv(data, num_filter, kernel, stride=(1,1), pad=(0, 0), name=None, no_bias=True, w=None, b=None, attr=None, num_group=1):
Convolution = mx.symbol.Convolution
if w is None:
conv = Convolution(data=data, num_filter=num_filter, num_group=num_group, kernel=kernel, pad=pad, stride=stride, name=('%s__conv' %name), no_bias=no_bias, attr=attr)
else:
if b is None:
conv = Convolution(data=data, num_filter=num_filter, num_group=num_group, kernel=kernel, pad=pad, stride=stride, name=('%s__conv' %name), no_bias=no_bias, weight=w, attr=attr)
else:
conv = Convolution(data=data, num_filter=num_filter, num_group=num_group, kernel=kernel, pad=pad, stride=stride, name=('%s__conv' %name), no_bias=False, bias=b, weight=w, attr=attr)
return conv
# - - - - - - - - - - - - - - - - - - - - - - -
# Standard Common functions < CVPR >
def Conv_BN( data, num_filter, kernel, pad, stride=(1,1), name=None, w=None, b=None, no_bias=True, attr=None, num_group=1):
cov = Conv( data=data, num_filter=num_filter, num_group=num_group, kernel=kernel, pad=pad, stride=stride, name=name, w=w, b=b, no_bias=no_bias, attr=attr)
cov_bn = BN( data=cov, name=('%s__bn' % name))
return cov_bn
def Conv_BN_AC(data, num_filter, kernel, pad, stride=(1,1), name=None, w=None, b=None, no_bias=True, attr=None, num_group=1):
cov_bn = Conv_BN(data=data, num_filter=num_filter, num_group=num_group, kernel=kernel, pad=pad, stride=stride, name=name, w=w, b=b, no_bias=no_bias, attr=attr)
cov_ba = AC( data=cov_bn, name=('%s__ac' % name))
return cov_ba
# - - - - - - - - - - - - - - - - - - - - - - -
# Standard Common functions < ECCV >
def BN_Conv( data, num_filter, kernel, pad, stride=(1,1), name=None, w=None, b=None, no_bias=True, attr=None, num_group=1):
bn = BN( data=data, name=('%s__bn' % name))
bn_cov = Conv( data=bn, num_filter=num_filter, num_group=num_group, kernel=kernel, pad=pad, stride=stride, name=name, w=w, b=b, no_bias=no_bias, attr=attr)
return bn_cov
def AC_Conv( data, num_filter, kernel, pad, stride=(1,1), name=None, w=None, b=None, no_bias=True, attr=None, num_group=1):
ac = AC( data=data, name=('%s__ac' % name))
ac_cov = Conv( data=ac, num_filter=num_filter, num_group=num_group, kernel=kernel, pad=pad, stride=stride, name=name, w=w, b=b, no_bias=no_bias, attr=attr)
return ac_cov
def BN_AC_Conv(data, num_filter, kernel, pad, stride=(1,1), name=None, w=None, b=None, no_bias=True, attr=None, num_group=1):
bn = BN( data=data, name=('%s__bn' % name))
ba_cov = AC_Conv(data=bn, num_filter=num_filter, num_group=num_group, kernel=kernel, pad=pad, stride=stride, name=name, w=w, b=b, no_bias=no_bias, attr=attr)
return ba_cov
def DualPathFactory(data, num_1x1_a, num_3x3_b, num_1x1_c, name, inc, G, _type='normal'):
kw = 3
kh = 3
pw = (kw-1)/2
ph = (kh-1)/2
# type
if _type is 'proj':
key_stride = 1
has_proj = True
if _type is 'down':
key_stride = 2
has_proj = True
if _type is 'normal':
key_stride = 1
has_proj = False
# PROJ
if type(data) is list:
data_in = mx.symbol.Concat(*[data[0], data[1]], name=('%s_cat-input' % name))
else:
data_in = data
if has_proj:
c1x1_w = BN_AC_Conv( data=data_in, num_filter=(num_1x1_c+2*inc), kernel=( 1, 1), stride=(key_stride, key_stride), name=('%s_c1x1-w(s/%d)' %(name, key_stride)), pad=(0, 0))
data_o1 = mx.symbol.slice_axis(data=c1x1_w, axis=1, begin=0, end=num_1x1_c, name=('%s_c1x1-w(s/%d)-split1' %(name, key_stride)))
data_o2 = mx.symbol.slice_axis(data=c1x1_w, axis=1, begin=num_1x1_c, end=(num_1x1_c+2*inc), name=('%s_c1x1-w(s/%d)-split2' %(name, key_stride)))
else:
data_o1 = data[0]
data_o2 = data[1]
# MAIN
c1x1_a = BN_AC_Conv( data=data_in, num_filter=num_1x1_a, kernel=( 1, 1), pad=( 0, 0), name=('%s_c1x1-a' % name))
c3x3_b = BN_AC_Conv( data=c1x1_a, num_filter=num_3x3_b, kernel=(kw, kh), pad=(pw, ph), name=('%s_c%dx%d-b' % (name,kw,kh)), stride=(key_stride,key_stride), num_group=G)
c1x1_c = BN_AC_Conv( data=c3x3_b, num_filter=(num_1x1_c+inc), kernel=( 1, 1), pad=( 0, 0), name=('%s_c1x1-c' % name))
c1x1_c1= mx.symbol.slice_axis(data=c1x1_c, axis=1, begin=0, end=num_1x1_c, name=('%s_c1x1-c-split1' % name))
c1x1_c2= mx.symbol.slice_axis(data=c1x1_c, axis=1, begin=num_1x1_c, end=(num_1x1_c+inc), name=('%s_c1x1-c-split2' % name))
# OUTPUTS
summ = mx.symbol.ElementWiseSum(*[data_o1, c1x1_c1], name=('%s_sum' % name))
dense = mx.symbol.Concat( *[data_o2, c1x1_c2], name=('%s_cat' % name))
return [summ, dense]
k_R = 160
G = 40
k_sec = { 2: 4, \
3: 8, \
4: 28, \
5: 3 }
inc_sec= { 2: 16, \
3: 32, \
4: 32, \
5: 128 }
def get_symbol(num_classes = 1000, num_layers=92, **kwargs):
if num_layers==68:
k_R = 128
G = 32
k_sec = { 2: 3, \
3: 4, \
4: 12, \
5: 3 }
inc_sec= { 2: 16, \
3: 32, \
4: 32, \
5: 64 }
elif num_layers==92:
k_R = 96
G = 32
k_sec = { 2: 3, \
3: 4, \
4: 20, \
5: 3 }
inc_sec= { 2: 16, \
3: 32, \
4: 24, \
5: 128 }
elif num_layers==107:
k_R = 200
G = 50
k_sec = { 2: 4, \
3: 8, \
4: 20, \
5: 3 }
inc_sec= { 2: 20, \
3: 64, \
4: 64, \
5: 128 }
elif num_layers==131:
k_R = 160
G = 40
k_sec = { 2: 4, \
3: 8, \
4: 28, \
5: 3 }
inc_sec= { 2: 16, \
3: 32, \
4: 32, \
5: 128 }
else:
raise ValueError("no experiments done on dpn num_layers {}, you can do it yourself".format(num_layers))
version_se = kwargs.get('version_se', 1)
version_input = kwargs.get('version_input', 1)
assert version_input>=0
version_output = kwargs.get('version_output', 'E')
fc_type = version_output
version_unit = kwargs.get('version_unit', 3)
print(version_se, version_input, version_output, version_unit)
## define Dual Path Network
data = mx.symbol.Variable(name="data")
#data = data-127.5
#data = data*0.0078125
#if version_input==0:
# conv1_x_1 = Conv(data=data, num_filter=128, kernel=(7, 7), name='conv1_x_1', pad=(3,3), stride=(2,2))
#else:
# conv1_x_1 = Conv(data=data, num_filter=128, kernel=(3, 3), name='conv1_x_1', pad=(3,3), stride=(1,1))
#conv1_x_1 = BN_AC(conv1_x_1, name='conv1_x_1__relu-sp')
#conv1_x_x = mx.symbol.Pooling(data=conv1_x_1, pool_type="max", kernel=(3, 3), pad=(1,1), stride=(2,2), name="pool1")
conv1_x_x = symbol_utils.get_head(data, version_input, 128)
# conv2
bw = 256
inc= inc_sec[2]
R = (k_R*bw)/256
conv2_x_x = DualPathFactory( conv1_x_x, R, R, bw, 'conv2_x__1', inc, G, 'proj' )
for i_ly in range(2, k_sec[2]+1):
conv2_x_x = DualPathFactory( conv2_x_x, R, R, bw, ('conv2_x__%d'% i_ly), inc, G, 'normal')
# conv3
bw = 512
inc= inc_sec[3]
R = (k_R*bw)/256
conv3_x_x = DualPathFactory( conv2_x_x, R, R, bw, 'conv3_x__1', inc, G, 'down' )
for i_ly in range(2, k_sec[3]+1):
conv3_x_x = DualPathFactory( conv3_x_x, R, R, bw, ('conv3_x__%d'% i_ly), inc, G, 'normal')
# conv4
bw = 1024
inc= inc_sec[4]
R = (k_R*bw)/256
conv4_x_x = DualPathFactory( conv3_x_x, R, R, bw, 'conv4_x__1', inc, G, 'down' )
for i_ly in range(2, k_sec[4]+1):
conv4_x_x = DualPathFactory( conv4_x_x, R, R, bw, ('conv4_x__%d'% i_ly), inc, G, 'normal')
# conv5
bw = 2048
inc= inc_sec[5]
R = (k_R*bw)/256
conv5_x_x = DualPathFactory( conv4_x_x, R, R, bw, 'conv5_x__1', inc, G, 'down' )
for i_ly in range(2, k_sec[5]+1):
conv5_x_x = DualPathFactory( conv5_x_x, R, R, bw, ('conv5_x__%d'% i_ly), inc, G, 'normal')
# output: concat
conv5_x_x = mx.symbol.Concat(*[conv5_x_x[0], conv5_x_x[1]], name='conv5_x_x_cat-final')
#conv5_x_x = BN_AC(conv5_x_x, name='conv5_x_x__relu-sp')
before_pool = conv5_x_x
fc1 = symbol_utils.get_fc1(before_pool, num_classes, fc_type)
return fc1
|
[
"ziqi.yzq@alibaba-inc.com"
] |
ziqi.yzq@alibaba-inc.com
|
f6a760119a4c4b2c583957abb4a7066cbb64a2eb
|
dc67e70a303f265ee6cb4c1a2d61fe811053fb3d
|
/beginner/066/A.py
|
cabb38041ad8e0ea035492830c9cef953fb894b2
|
[] |
no_license
|
cry999/AtCoder
|
d39ce22d49dfce805cb7bab9d1ff0dd21825823a
|
879d0e43e3fac0aadc4d772dc57374ae72571fe6
|
refs/heads/master
| 2020-04-23T13:55:00.018156
| 2019-12-11T05:23:03
| 2019-12-11T05:23:03
| 171,214,066
| 0
| 0
| null | 2019-05-13T15:17:02
| 2019-02-18T04:24:01
|
Python
|
UTF-8
|
Python
| false
| false
| 193
|
py
|
def ringring(a: int, b: int, c: int)->int:
return sum(sorted([a, b, c])[:2])
if __name__ == "__main__":
a, b, c = map(int, input().split())
ans = ringring(a, b, c)
print(ans)
|
[
"when.the.cry999@gmail.com"
] |
when.the.cry999@gmail.com
|
72d7de871b2fb085d76442aa9a24ad3405cd961b
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/centerface/preprocess.py
|
a985c6b64428994c27265c5fcd6ff413bee6b92a
|
[
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,585
|
py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""pre process for 310 inference"""
import os
import shutil
import cv2
import numpy as np
from src.model_utils.config import config
from dependency.centernet.src.lib.detectors.base_detector import CenterFaceDetector
def preprocess(dataset_path, preprocess_path):
event_list = os.listdir(dataset_path)
input_path = os.path.join(preprocess_path, "input")
meta_path = os.path.join(preprocess_path, "meta/meta")
if not os.path.exists(input_path):
os.makedirs(os.path.join(preprocess_path, "input"))
if not os.path.exists(meta_path):
os.makedirs(os.path.join(preprocess_path, "meta/meta"))
detector = CenterFaceDetector(config, None)
name_list = []
meta_list = []
i = 0
for _, event in enumerate(event_list):
file_list_item = os.listdir(os.path.join(dataset_path, event))
im_dir = event
for _, file in enumerate(file_list_item):
im_name = file.split('.')[0]
zip_name = '%s/%s' % (im_dir, file)
img_path = os.path.join(dataset_path, zip_name)
image = cv2.imread(img_path)
for scale in config.test_scales:
_, meta = detector.pre_process(image, scale)
img_file_path = os.path.join(input_path, file)
shutil.copyfile(img_path, img_file_path)
meta_file_path = os.path.join(preprocess_path + "/meta/meta", im_name + ".txt")
with open(meta_file_path, 'w+') as f:
f.write(str(meta))
name_list.append(im_name)
meta_list.append(meta)
i += 1
print(f"preprocess: no.[{i}], img_name:{im_name}")
np.save(os.path.join(preprocess_path + "/meta", "name_list.npy"), np.array(name_list))
np.save(os.path.join(preprocess_path + "/meta", "meta_list.npy"), np.array(meta_list))
if __name__ == '__main__':
preprocess(config.dataset_path, config.preprocess_path)
|
[
"chenhaozhe1@huawei.com"
] |
chenhaozhe1@huawei.com
|
95869793a95931568444941801533d4d5e6cb5eb
|
d6be2453d1c4428a4b9d9f78ea80e7e1a39f0f5b
|
/src/utils.py
|
20225ec0e46d35e08388cbfdfc634ce8c9a2e343
|
[] |
no_license
|
bcrestel/sls
|
8f6a6356264747285fb193b2ebfa1c2914aa0fe3
|
f0392135e5c4072e3341998651091c8455a882fb
|
refs/heads/master
| 2020-12-15T16:51:03.663284
| 2020-10-06T14:22:58
| 2020-10-06T14:22:58
| 235,185,248
| 0
| 0
| null | 2020-01-20T19:47:07
| 2020-01-20T19:47:06
| null |
UTF-8
|
Python
| false
| false
| 1,478
|
py
|
import hashlib
import pickle
import json
import os
import itertools
import torch
import numpy as np
def hash_dict(dictionary):
"""Create a hash for a dictionary."""
dict2hash = ""
for k in sorted(dictionary.keys()):
if isinstance(dictionary[k], dict):
v = hash_dict(dictionary[k])
else:
v = dictionary[k]
dict2hash += "%s_%s_" % (str(k), str(v))
return hashlib.md5(dict2hash.encode()).hexdigest()
def save_pkl(fname, data):
"""Save data in pkl format."""
# Save file
fname_tmp = fname + "_tmp.pkl"
with open(fname_tmp, "wb") as f:
pickle.dump(data, f)
os.rename(fname_tmp, fname)
def load_pkl(fname):
"""Load the content of a pkl file."""
with open(fname, "rb") as f:
return pickle.load(f)
def load_json(fname, decode=None):
with open(fname, "r") as json_file:
d = json.load(json_file)
return d
def save_json(fname, data):
with open(fname, "w") as json_file:
json.dump(data, json_file, indent=4, sort_keys=True)
def torch_save(fname, obj):
""""Save data in torch format."""
# Define names of temporal files
fname_tmp = fname + ".tmp"
torch.save(obj, fname_tmp)
os.rename(fname_tmp, fname)
def read_text(fname):
# READS LINES
with open(fname, "r", encoding="utf-8") as f:
lines = f.readlines()
# lines = [line.decode('utf-8').strip() for line in f.readlines()]
return lines
|
[
"issam.laradji@gmail.com"
] |
issam.laradji@gmail.com
|
91489aef1cfcb6675882a5ed78249f485727af5a
|
975b2d421d3661e6770b601929d5f11d981d8985
|
/msgraph/generated/models/access_package_request_state.py
|
8d2207eae29267f561c8c719c8dc968d0f903cb0
|
[
"MIT"
] |
permissive
|
microsoftgraph/msgraph-sdk-python
|
a7c551b85daadeebf76ec4ae12668664ea639b42
|
27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949
|
refs/heads/main
| 2023-09-03T21:45:27.989672
| 2023-08-31T06:22:18
| 2023-08-31T06:22:18
| 534,665,999
| 135
| 18
|
MIT
| 2023-09-14T11:04:11
| 2022-09-09T14:00:17
|
Python
|
UTF-8
|
Python
| false
| false
| 410
|
py
|
from enum import Enum
class AccessPackageRequestState(str, Enum):
Submitted = "submitted",
PendingApproval = "pendingApproval",
Delivering = "delivering",
Delivered = "delivered",
DeliveryFailed = "deliveryFailed",
Denied = "denied",
Scheduled = "scheduled",
Canceled = "canceled",
PartiallyDelivered = "partiallyDelivered",
UnknownFutureValue = "unknownFutureValue",
|
[
"GraphTooling@service.microsoft.com"
] |
GraphTooling@service.microsoft.com
|
a31faa28ea7fa887dcbc8ad53795258aa189f931
|
498e792e16ab1a74ac034c53177c4cccbeef2749
|
/classification/resnet/train.py
|
662ceca52750777835c1b05e25f7eaacf8d247aa
|
[] |
no_license
|
ydwisroad/imageprocessingpytorch
|
f97bec4469c087f6bbbca5d42da180c95be8b13f
|
bd8d1af228619c9c6c9c1a2b880422f7d5048dd5
|
refs/heads/master
| 2023-07-29T05:05:11.145832
| 2022-02-21T23:32:03
| 2022-02-21T23:32:03
| 284,976,501
| 7
| 3
| null | 2023-07-24T01:08:22
| 2020-08-04T12:43:24
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,052
|
py
|
import torch
import torch.nn as nn
from torchvision import transforms, datasets
import json
import matplotlib.pyplot as plt
import os
import torch.optim as optim
from model import resnet34, resnet101
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
data_transform = {
"train": transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),
"val": transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])}
data_root = os.path.abspath(os.path.join(os.getcwd(), "../../data")) # get data root path
image_path = data_root + "/flower_photos_simple/" # flower data set path
train_dataset = datasets.ImageFolder(root=image_path+"train",
transform=data_transform["train"])
train_num = len(train_dataset)
# {'daisy':0, 'dandelion':1, 'roses':2, 'sunflower':3, 'tulips':4}
flower_list = train_dataset.class_to_idx
cla_dict = dict((val, key) for key, val in flower_list.items())
# write dict into json file
json_str = json.dumps(cla_dict, indent=4)
with open('class_indices.json', 'w') as json_file:
json_file.write(json_str)
batch_size = 4
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size, shuffle=True,
num_workers=0)
validate_dataset = datasets.ImageFolder(root=image_path + "val",
transform=data_transform["val"])
val_num = len(validate_dataset)
validate_loader = torch.utils.data.DataLoader(validate_dataset,
batch_size=batch_size, shuffle=False,
num_workers=0)
net = resnet34()
# load pretrain weights
#model_weight_path = "./resnet34-pre.pth"
#missing_keys, unexpected_keys = net.load_state_dict(torch.load(model_weight_path), strict=False)
# for param in net.parameters():
# param.requires_grad = False
# change fc layer structure
inchannel = net.fc.in_features
net.fc = nn.Linear(inchannel, 5)
net.to(device)
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.0001)
best_acc = 0.0
save_path = './resNet34.pth'
for epoch in range(10):
# train
net.train()
running_loss = 0.0
for step, data in enumerate(train_loader, start=0):
images, labels = data
optimizer.zero_grad()
logits = net(images.to(device))
loss = loss_function(logits, labels.to(device))
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
# print train process
rate = (step+1)/len(train_loader)
a = "*" * int(rate * 50)
b = "." * int((1 - rate) * 50)
print("\rtrain loss: {:^3.0f}%[{}->{}]{:.4f}".format(int(rate*100), a, b, loss), end="")
print()
# validate
net.eval()
acc = 0.0 # accumulate accurate number / epoch
with torch.no_grad():
for val_data in validate_loader:
val_images, val_labels = val_data
outputs = net(val_images.to(device)) # eval model only have last output layer
# loss = loss_function(outputs, test_labels)
predict_y = torch.max(outputs, dim=1)[1]
acc += (predict_y == val_labels.to(device)).sum().item()
val_accurate = acc / val_num
if val_accurate > best_acc:
best_acc = val_accurate
torch.save(net.state_dict(), save_path)
print('[epoch %d] train_loss: %.3f test_accuracy: %.3f' %
(epoch + 1, running_loss / step, val_accurate))
print('Finished Training')
|
[
"wandf12345@163.com"
] |
wandf12345@163.com
|
5bf7470e827eea42e7c8955e6c2fb564dbc45de9
|
f453f183834e3bf587a120023615ed2ddd38c157
|
/tsa/lib/encoders.py
|
969cdf1f6c1712d900097659bf0862df709f2d35
|
[
"MIT"
] |
permissive
|
chbrown/topic-sentiment-authorship
|
72c21638eb72888c370cd3b1b5f06504df09ce2e
|
e8cacf11b06583d9ed85ff790e1d5322e59f2fd6
|
refs/heads/master
| 2022-07-05T22:58:24.456139
| 2020-03-29T16:12:21
| 2020-03-29T16:12:21
| 13,025,589
| 0
| 0
|
MIT
| 2020-03-29T16:13:35
| 2013-09-23T02:53:40
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 492
|
py
|
import json
from datetime import datetime
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, '__json__'):
return obj.__json__()
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%S')
# return super(JSONEncoder, self).default(obj)
return obj
# encoder = JSONEncoder()
# def json(obj):
# return encoder.encode(obj)
# c'mon, just DIY
def csv(obj):
return ','.join(map(str, obj))
|
[
"io@henrian.com"
] |
io@henrian.com
|
977cd1f34ed3ff2b174cb7a5bb2ad1829606c277
|
fbff973537eae45b724b23e9b6fc8692da959b21
|
/app/core/config.py
|
979658548ef83b1914a5730ab318dedd6ab5b824
|
[
"MIT"
] |
permissive
|
lsetiawan/cava-metadata
|
d4a8878480cd9da4bfa163b9d9c42d705a0fb263
|
e45c469a4b5cbdebfba74ab0031fb94eb59fd724
|
refs/heads/main
| 2023-04-08T02:28:24.402853
| 2021-01-27T20:02:23
| 2021-01-27T20:02:23
| 358,033,596
| 0
| 0
|
MIT
| 2021-04-14T20:26:35
| 2021-04-14T20:26:35
| null |
UTF-8
|
Python
| false
| false
| 1,418
|
py
|
import os
import fsspec
# API SETTINGS
SERVICE_NAME = "Metadata Service"
SERVICE_ID = "metadata"
OPENAPI_URL = f"/{SERVICE_ID}/openapi.json"
DOCS_URL = f"/{SERVICE_ID}/"
SERVICE_DESCRIPTION = """Metadata service for Interactive Oceans."""
CORS_ORIGINS = [
"http://localhost",
"http://localhost:8000",
"http://localhost:5000",
"http://localhost:4000",
"https://appdev.ooica.net",
"https://app-dev.ooica.net",
"https://app.interactiveoceans.washington.edu",
"https://api-dev.ooica.net",
"https://api.interactiveoceans.washington.edu",
]
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# API VERSION
CURRENT_API_VERSION = 2.0
# Redis configurations
REDIS_HOST = os.environ.get("REDIS_HOST", "localhost")
REDIS_PORT = os.environ.get("REDIS_PORT", 6379)
# OOI Configurations
BASE_URL = "https://ooinet.oceanobservatories.org"
M2M_URL = "api/m2m"
USERNAME = os.environ.get("OOI_USERNAME", "")
TOKEN = os.environ.get("OOI_TOKEN", "")
# File Systems Configurations
FILE_SYSTEMS = {
"minio_s3": fsspec.filesystem(
"s3", client_kwargs={"endpoint_url": "http://minio:9000"}
),
"aws_s3": fsspec.filesystem(
"s3",
skip_instance_cache=True,
use_listings_cache=False,
config_kwargs={"max_pool_connections": 1000},
),
}
GOOGLE_SERVICE_JSON = os.environ.get("GOOGLE_SERVICE_JSON", "",)
DATA_BUCKET = 'ooi-data'
|
[
"landungs@uw.edu"
] |
landungs@uw.edu
|
f9b589aa7e5cb26eda1a3b56bc67249768ee6093
|
4b819b9c7aee9d60689f487557e437445101188d
|
/lanuch/accounts/views.py
|
e04d7ebbd2e15bedabf699d153c0170baa54e03b
|
[] |
no_license
|
Damidara16/dev
|
c2fe90fb70d4644bdee964ce9b7b85bf9f71c99a
|
f3c8666bc32b19ffb623b83019fdbf404433ece8
|
refs/heads/master
| 2020-03-10T20:14:11.173397
| 2018-04-15T00:56:56
| 2018-04-15T00:56:56
| 129,565,993
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,485
|
py
|
from django.shortcuts import render, redirect
from .forms import RegistrationForm, EditProfileForm, AddInfo
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth import update_session_auth_hash
from .models import Author
from blog.models import waste
from django.contrib.auth.models import User
def ViewProfile(request, author_pk):
if author_pk == request.user.id:
if request.user.is_authenicated():
user = User.objects.get(user=request.user)
#print('suceess')
return render(request, 'accounts/profile.html', {'user':user})
else:
user = User.objects.get(pk=author_pk)
#user.author_set.views += 1
#user.views += 1
#user.save()
return render(request, 'accounts/profile.html', {'user':user})
def register(request):
if request.method == 'POST':
form = RegistrationForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('/home')
#return render(request, 'blog/home.html', context)
else: return redirect('/accounts/register')
else:
form = RegistrationForm()
title = 'Change Your Password'
btnName = 'Register'
context = {'form':form, 'title':title, 'btnName':btnName}
return render(request, 'accounts/edit.html', context)
'''
def jregister(request):
if request.method =='POST':
form = RegistrationForm(request.POST)
if form.is_valid():
form.save()
return redirect(reverse('accounts:home'))
else:
form = RegistrationForm()
args = {'form': form}
return render(request, 'accounts/reg_form.html', args)
'''
def EditProfile(request):
if request.Method == 'POST':
form = EditProfileForm(request.Post, instance=request.User)
if form.is_valid():
form.save()
return re
else:
form = EditProfileForm(instance=request.user)
title = 'Edit Your Profile'
btnName = 'Done editing'
context = {'form':form, 'title':title, 'btnName':btnName}
return render(request, 'accounts/edit.html', context)
def AddInfo(request):
if request.Method == 'POST' and request.user.is_authenicated():
form = AddInfo(request.POST)
if form.is_valid():
instance = form.save(commit=False)
instance = form.cleaned_data['description']
instance = form.cleaned_data['link']
form.save()
return redirect('/home/')
else:
return redirect('/accounts/add')
else:
form = RegistrationForm
title = 'Tell Us More'
btnName = 'Finish'
context = {'form':form, 'title':title, 'btnName':btnName}
return render(request, 'accounts/edit.html', context)
def Changepassword(request):
if request.Method == 'POST':
form = PasswordChangeForm(data=request.Post, user=request.User)
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
return redirect('/accounts/profile')
else:
return redirect('/accounts/Changepassword')
else:
form = PasswordChangeForm(instance=request.user)
title = 'Change Your Password'
btnName = 'Change Password'
context = {'form':form, 'title':title, 'btnName':btnName}
return render(request, 'accounts/edit.html', context)
|
[
"sajala8624@gmail.com"
] |
sajala8624@gmail.com
|
08ab74257fcfe8e582694e17d8f70578c069d383
|
f15449e438b0b799a3866ba21243924ce0e4fa2d
|
/survey/models.py
|
e6565f3535ec711e92f3831b062f00dd86ac58f5
|
[] |
no_license
|
xmduhan/qisite
|
46af79d0e4d1af814298862cfaa18c6f7ddf3a74
|
2c9d7513c3e0cd483341dc457a8d289e5e174f20
|
refs/heads/master
| 2021-01-17T08:44:29.826082
| 2020-02-07T11:22:29
| 2020-02-07T11:22:29
| 14,419,020
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,025
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
from django.db import models
from django.db.models import F
import account.models
from datetime import datetime
from numstyle import NumStyle, defaultQuestionNumStyle, defaultBranchNumStyle
from django.core.exceptions import ValidationError
from django.core.signing import Signer
import copy
from dateutil.relativedelta import relativedelta
import operator
import re
from jieba.analyse import extract_tags
from qisite.definitions import MAX_TEXT_CONTENT_LENGTH
phonePattern = re.compile(r'^((13[0-9])|(15[^4,\D])|(14[57])|(17[0])|(18[0,0-9]))\d{8}$')
def validate_phone(phone):
if not phonePattern.match(phone):
raise ValidationError(u'phone:手机号码的格式不正确')
class TimeModel(models.Model):
createTime = models.DateTimeField("创建时间", default=datetime.now)
modifyTime = models.DateTimeField("修改时间", default=datetime.now)
class Meta:
abstract = True
class Paper(TimeModel):
def __unicode__(self):
return self.title
# PAPER_STYLE = ( ('F', '平展'), ('P', '分页'))
QUESTION_NUM_STYLE = (('123', '1.2.3.……'), ('(1)(2)(3)', '(1).(2).(3).……'), ('Q1Q2Q3', 'Q1.Q2.Q3.……'))
PAPER_TYPE = (('T', '模板'), ('I', '实例'))
code = models.CharField('编码', max_length=100, blank=True, null=True, default=None) # 用于在测试中找到对象
title = models.CharField('问卷标题', max_length=500)
description = models.CharField('问卷说明', max_length=500, blank=True)
# 题目集 question_set (ok) (已在Question中设置外键引用)
inOrder = models.BooleanField('顺序答题', default=False)
questionNumStyle = models.CharField(
'问题标号样式', max_length=50, choices=QUESTION_NUM_STYLE, default=defaultQuestionNumStyle)
lookBack = models.BooleanField('返回修改', default=False)
# style = models.CharField('展现方式', max_length=5, choices=PAPER_STYLE) #使用paging字段取代
# paging = models.BooleanField('分页答题', default=True) # 正在考虑用step字段取代
step = models.BooleanField('分步答题', default=False)
type = models.CharField('问题类型', choices=PAPER_TYPE, max_length=10, default='T')
survey = models.ForeignKey('Survey', related_name='paperReversed_set', verbose_name="调查", null=True,
blank=True) # 执行调查的反向链接,用于自动删除
createBy = models.ForeignKey(
account.models.User, verbose_name="创建者", related_name='paperCreated_set', blank=True, null=True)
modifyBy = models.ForeignKey(
account.models.User, verbose_name="修改者", related_name='paperModified_set', blank=True, null=True)
# 样本集 sample_set (ok) (已在sample中设置外键引用)
previewSurvey = models.ForeignKey(
'Survey', related_name='paperPreview_set', verbose_name="预览对象", null=True, blank=True, on_delete=models.SET_NULL)
def clean(self):
'''
说明:
1、createBy和modifyBy不能为空的校验放在这里,主要是考虑到我们经常需要创建一些测试用的Paper,如果这两个字段在
定义时就限定死成不能为空,则每次我们都还要多创建一个User,比较麻烦。
'''
if self.createBy is None:
raise ValidationError(u'创建者信息不能为空')
if self.modifyBy is None:
raise ValidationError(u'修改者信息不能为空')
# 处理那些向前跳转的选项
invalidBranchSet = Branch.objects.filter(
question__paper=self, question__ord__gte=F('nextQuestion__ord'))
invalidBranchSet.update(nextQuestion=None)
class Meta:
verbose_name = "问卷"
verbose_name_plural = "[01].问卷"
ordering = ["title"]
def getQuestionSetInOrder(self):
return self.question_set.order_by('ord')
def getNumStyleAvailable(self):
return Paper.QUESTION_NUM_STYLE
def getIdSigned(self):
signer = Signer()
return signer.sign(self.id)
def copy(self, user=None):
'''
拷贝问卷信息
'''
# 拷贝问题对象本身的信息
newPaper = copy.copy(self)
newPaper.createTime = datetime.now()
newPaper.modifyTime = datetime.now()
if user:
newPaper.createBy = user
newPaper.modifyBy = user
newPaper.id = None
newPaper.save()
# 号码问卷的所有问题
questionContrast = {}
for question in self.question_set.all():
newQuestion = question.copy(user)
newQuestion.paper = newPaper
newQuestion.save()
questionContrast[question] = newQuestion
# 将选项指向新拷贝出来的问题
for question in newPaper.question_set.all():
for branch in question.branch_set.all():
if branch.nextQuestion in questionContrast:
branch.nextQuestion = questionContrast[branch.nextQuestion]
branch.save()
return newPaper
def getSampleCount(self):
"""
获取文件采集到的样本数量
"""
return self.sample_set.count()
def createPaperInstance(self, user):
'''
通过一个模板paper创建调查问卷的实例
'''
if self.type != 'T':
raise Exception('非模板Paper对象不能创建Instance')
newPaper = self.copy(user)
newPaper.type = 'I'
newPaper.save()
return newPaper
def isStepNeed(self):
"""
检查文件是否需要分步
"""
count = Branch.objects.filter(question__paper=self, nextQuestion__isnull=False).count()
return count != 0
class PaperCatalog(TimeModel):
name = models.CharField("目录名称", max_length=100)
code = models.CharField("目录编码", max_length=50, unique=True)
parent = models.ForeignKey('self', verbose_name="上级目录", blank=True, null=True)
ord = models.IntegerField("排序号")
paper_set = models.ManyToManyField(Paper, verbose_name='包含问卷', through='PaperCatalogPaper')
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='paperCatalogCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='paperCatalogModified_set')
class Meta:
verbose_name = "问卷目录"
verbose_name_plural = "[02].问卷目录"
class PaperCatalogPaper(TimeModel):
paperCatalog = models.ForeignKey(PaperCatalog, verbose_name='对应的目录')
paper = models.ForeignKey(Paper, verbose_name='对应的问卷')
ord = models.IntegerField("排序号")
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='paperCatalogPaperCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='paperCatalogPaperModified_set')
class Meta:
verbose_name = "问卷目录-问卷"
verbose_name_plural = "[03].问卷目录-问卷"
class Question(TimeModel):
QUESTION_TYPE = (
('Single', '单选题'), ('Multiple', '多选题'), ('Text', '问答题'), ('Score', '评分题'),
('EndValid', '有效结束'), ('EndInvalid', '无效结束')
)
QUESTION_TYPE_AVAILABLE = ('Single', 'Multiple', 'Text', 'Score')
BRANCH_NUM_STYLE = (('ABC', 'A.B.C.……'), ('abc.', 'a.b.c.……'), ('123.', '1.2.3……'))
text = models.CharField('文字', max_length=300)
type = models.CharField('题型', max_length=100, choices=QUESTION_TYPE)
ord = models.IntegerField("排序号")
# contentLength = models.IntegerField('内容长度', default=MAX_TEXT_CONTENT_LENGTH) # 仅填空题有效,是否可以作为多选题的选项数量限制
contentLength = models.IntegerField('内容长度', default=0) # 仅填空题有效,是否可以作为多选题的选项数量限制
valueMin = models.FloatField('最小值', null=True, blank=True, default=0) # 仅评分题有效
valueMax = models.FloatField('最大值', null=True, blank=True, default=10) # 仅评分题有效
# 题支 branch_set 对象集 (ok) (已在branche中设置反向外键)
confused = models.BooleanField('乱序', default=False)
branchNumStyle = models.CharField('标号样式', max_length=50, choices=BRANCH_NUM_STYLE, default=defaultBranchNumStyle)
# nextQuestion 是否需要这个信息,似乎多余?
nextQuestion = models.ForeignKey('self', verbose_name='下一题', blank=True, null=True, on_delete=models.SET_NULL)
paper = models.ForeignKey(Paper, verbose_name='所属问卷', null=True, blank=True)
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='questionCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='questionModified_set')
def clean(self):
'''
问题模型校验
'''
if self.type not in Question.QUESTION_TYPE_AVAILABLE:
raise ValidationError(u'无效的问题类型')
if self.type in ('Single', 'Multiple') and self.contentLength != 0:
raise ValidationError(u'选择题不能有填写值长度')
if self.type not in ('Single', 'Multiple') and self.confused:
raise ValidationError(u'非选择题不能指定乱序选项')
def setOrd(self, newOrd):
"""
修改当前问题的顺序,其他问题将自动响应调整顺序,并且讲删除无效的选项跳转引用
参数:
newOrd 问题的新排序号
"""
paper = Paper.objects.select_for_update().get(id=self.paper.id)
ord = self.ord
# 锁定所有的问题
questionList = list(paper.question_set.select_for_update().order_by('ord'))
questionCount = len(questionList)
if newOrd == ord:
return
if (newOrd > questionCount - 1) or (newOrd < 0):
# TODO : 这里需要设置合适的异常类型
raise Exception()
questionList.insert(newOrd, questionList.pop(ord))
for i, q in enumerate(questionList):
if q.ord != i:
q.ord = i
q.save()
paper.clean()
def getStemText(self):
'''
通过问题直接读取题干的文字信息
'''
return self.text
getStemText.short_description = '题干信息'
def getBranchSetInOrder(self):
return self.branch_set.order_by('ord')
def getNum(self):
# 针对特殊问题类型做特殊处理
if self.type in ('EndValid', 'EndInvalid'):
return self.get_type_display()
else:
numStyle = NumStyle(self.paper.questionNumStyle)
return numStyle.getNum(self.ord)
def __unicode__(self):
return u"(%d)(%s)%s" % (self.ord, self.type, unicode(self.text))
class Meta:
verbose_name = "问题"
verbose_name_plural = "[04].问题"
ordering = ["ord"]
def getIdSigned(self):
signer = Signer()
return signer.sign(self.id)
def getScoreStat(self, max=10):
"""
获取评分分布统计信息
"""
querySet = SampleItem.objects.filter(question=self)
r1 = querySet.values('score').annotate(count=models.Count('score'))
r2 = {i['score']: i['count']for i in r1}
r3 = sorted(r2.items(), key=operator.itemgetter(1), reverse=True)[:10]
r4 = zip(*r3)
return r4
def getTextKeywords(self, n=10):
"""
从文字题中提取关键字
"""
querySet = SampleItem.objects.filter(question=self)
text = ' '.join([rec['content'] for rec in querySet.values('content')])
tags = extract_tags(text, topK=n)
return tags
def copy(self, user=None):
'''
拷贝一个问题
'''
# 拷贝问题对象本身的信息
newQuestion = copy.copy(self)
newQuestion.createTime = datetime.now()
newQuestion.modifyTime = datetime.now()
if user:
newQuestion.createBy = user
newQuestion.modifyBy = user
newQuestion.id = None
newQuestion.save()
# 拷贝问题所属选项信息
for branch in self.branch_set.all():
newBranch = branch.copy(user)
newBranch.question = newQuestion
newBranch.save()
return newQuestion
class QuestionCatalog(TimeModel):
name = models.CharField("目录名称", max_length=100)
code = models.CharField("目录编码", max_length=50, unique=True)
parent = models.ForeignKey('self', blank=True, null=True, verbose_name="上级目录")
ord = models.IntegerField("排序号")
question_set = models.ManyToManyField(Question, verbose_name='包含问题', through='QuestionCatalogQuestion')
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='questionCatalogCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者",
related_name='questionCatalogModified_set')
class Meta:
verbose_name = "问题目录"
verbose_name_plural = "[05].问题目录"
def __unicode__(self):
return '%s(%s)' % (self.name, self.code)
class QuestionCatalogQuestion(TimeModel):
questionCatalog = models.ForeignKey(QuestionCatalog, verbose_name='对应的目录')
question = models.ForeignKey(Question, verbose_name='对应的问题')
ord = models.IntegerField("排序号")
createBy = models.ForeignKey(
account.models.User, verbose_name="创建者", related_name='questionCatalogQuestionCreated_set')
modifyBy = models.ForeignKey(
account.models.User, verbose_name="修改者", related_name='questionCatalogQuestionModified_set')
class Meta:
verbose_name = "问题目录-问题"
verbose_name_plural = "[06].问题目录-问题"
class Resource(TimeModel):
RESOURCE_TYPE = (('Picture', '图片'), ('Audio', '音频'), ('Video', '视频'))
resourceType = models.CharField('文字', max_length=50, choices=RESOURCE_TYPE)
resourceUrl = models.CharField('文字', max_length=1000)
width = models.FloatField("资源宽度")
height = models.FloatField("资源高度")
question = models.ForeignKey(Question, verbose_name="对应问题")
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='resourceCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='resourceModified_set')
class Meta:
verbose_name = "资源"
verbose_name_plural = "[08].资源"
class Branch(TimeModel):
text = models.CharField('文字', max_length=200)
ord = models.IntegerField('排序号')
nextQuestion = models.ForeignKey(
# 如何包含结果信息呢?(结束无效问卷,结束有效问卷)
'Question', verbose_name='下个问题', related_name='fromBranch', null=True, blank=True, on_delete=models.SET_NULL)
question = models.ForeignKey(Question, verbose_name="问题")
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='branchCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='branchModified_set')
class Meta:
verbose_name = "题支"
verbose_name_plural = "[09].题支"
def getNum(self):
numStyle = NumStyle(self.question.branchNumStyle)
return numStyle.getNum(self.ord)
def getReachableQuestionList(self):
# 获取当前选项对应问题的之后的所有问题
question = self.question
paper = question.paper
reachableQuestion = list(paper.question_set.filter(ord__gt=question.ord).order_by('ord'))
return reachableQuestion
def getSystemPredefined(self):
# 获取预定义的问题
systemPredefinedCatalog = QuestionCatalog.objects.filter(code='SystemPredefined')[0]
systemPredefined = list(systemPredefinedCatalog.question_set.order_by('ord'))
return systemPredefined
def getIdSigned(self):
signer = Signer()
return signer.sign(self.id)
def copy(self, user=None):
newBranch = copy.copy(self)
newBranch.createTime = datetime.now()
newBranch.modifyTime = datetime.now()
if user:
newBranch.createBy = user
newBranch.modifyBy = user
newBranch.id = None
newBranch.save()
return newBranch
def getSelectedCount(self):
"""
获取选择该选项的样本项的数量,实际就是统计该选项被用户选了几次
"""
return self.sampleitem_set.count()
def getSelectedPct(self):
"""
获得当前选项的选择比例
其值为0-100之间
"""
sampleCount = self.question.paper.sample_set.count()
if sampleCount == 0:
return None
else:
return self.getSelectedCount() / sampleCount * 100
def oneYearLater():
return datetime.now() + relativedelta(years=1)
class Survey(TimeModel):
code = models.CharField('编码', max_length=100, blank=True, null=True, default=None) # 用于在测试中找到对象
paper = models.ForeignKey('Paper', related_name='survey_set', verbose_name="问卷", null=True, blank=True)
# 目标客户清单 targetcust_set (ok) (已在目标客户中设置外键)
targetOnly = models.BooleanField('定向调查', default=False)
custList = models.ForeignKey('CustList', verbose_name='客户清单', blank=True, null=True, default=None)
state = models.CharField("状态", max_length=5, default='A')
paused = models.BooleanField('暂停', default=False)
shared = models.BooleanField('是否分享', default=False)
viewResult = models.BooleanField('查看结果', default=True)
anonymous = models.BooleanField('查看结果', default=False)
resubmit = models.BooleanField('是否允许重填', default=True)
password = models.CharField("参与密码", max_length=10, blank=True)
ipLimit = models.IntegerField("IP限制", default=5)
macLimit = models.IntegerField("MAC限制", default=5)
publishTime = models.DateTimeField("发布时间", default=datetime.now)
endTime = models.DateTimeField("结束时间", default=oneYearLater)
# 参与者约束 constraints 对象集 (hold)
pay = models.BooleanField('查看结果', default=True)
hardCost = models.FloatField('调查费', default=0)
bonus = models.FloatField('奖金', default=0)
fee = models.FloatField('手续费', default=0)
validSampleLimit = models.IntegerField("有效样本上限", default=0) # 0 表示无限制
lastSmsSendTime = models.DateTimeField("最后一次推送短信时间", blank=True, null=True, default=None)
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='surveyCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='surveyModified_set')
def getResubmitText(self):
return u'是' if self.resubmit else u'否'
def getVeiwResultText(self):
return u'是' if self.viewResult else u'否'
def getAnonymousText(self):
return u'是' if self.anonymous else u'否'
def getSharedText(self):
return u'是' if self.shared else u'否'
class Meta:
verbose_name = "调查"
verbose_name_plural = "[10].调查"
def getIdSigned(self):
signer = Signer()
return signer.sign(self.id)
def __unicode__(self):
if self.custList:
name = self.custList.name
else:
name = 'None'
return '<%s,%s>' % (self.paper.title, name)
class TargetCust(TimeModel):
name = models.CharField('姓名', max_length=50)
phone = models.CharField('手机号码', max_length=50)
email = models.CharField('电子邮件', max_length=100)
defineInfo_set = models.ManyToManyField('DefineInfo', verbose_name='附件信息', blank=True, null=True)
# sample = models.ForeignKey('Sample', verbose_name='样本') 在样本中已设定了一对一关系 (ok)
token = models.CharField('访问令牌', max_length=50)
survey = models.ForeignKey(Survey, verbose_name="所属调查", related_name='targetCust_set')
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='targetCustCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='targetCustModified_set')
class Meta:
verbose_name = "目标客户"
verbose_name_plural = "[11].目标客户"
def __unicode__(self):
return u'<%s,%s>' % (self.name, self.phone)
def getIdSigned(self):
signer = Signer()
return signer.sign(self.id)
class Sample(TimeModel):
# 样本项集 sampleItems 对象集 (ok) (已在样本中设置对应外键)
targetCust = models.ForeignKey('TargetCust', verbose_name='清单项', null=True, blank=True)
# session字段用户保存无定向调查客户端标识信息
session = models.CharField('客户端会话标识', max_length=40, null=True, blank=True)
user = models.ForeignKey(account.models.User, verbose_name="参与用户", null=True,
blank=True) # 这里是否设置一个related_name
ipAddress = models.CharField('受访IP', max_length=50)
# macAddress = models.CharField('受访MAC', max_length=50) web端实际无法获得该字段
finished = models.BooleanField('是否完成', default=True)
# lastQuestion用于单步答题,保存最后一次回答的题目,以便之后继续回答
# lastQuestion = models.ForeignKey('Question', verbose_name='下一题', null=True, blank=True, on_delete=models.SET_NULL)
# nextQuestion用于单步答题,保存最后一次回答的题目,以便之后继续回答
# 之前考虑使用的是lastQuestion但是每次进入答题页面时,还要显示判断上次答题结果才能知道要从哪题开始,不直观。
nextQuestion = models.ForeignKey('Question', verbose_name='下一题', null=True, blank=True, on_delete=models.SET_NULL)
isValid = models.BooleanField('是否有效', default=True)
paper = models.ForeignKey(Paper, verbose_name='所属问卷')
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='sampleCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='sampleModified_set')
class Meta:
verbose_name = "样本"
verbose_name_plural = "[12].样本"
class SampleItem(TimeModel):
question = models.ForeignKey('Question', verbose_name='问题')
branch_set = models.ManyToManyField(Branch, verbose_name='已选')
content = models.CharField('内容', max_length=MAX_TEXT_CONTENT_LENGTH, blank=True, null=True)
score = models.FloatField('得分', default=0)
sample = models.ForeignKey(Sample, verbose_name='所属样本')
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='sampleItemCreated_set',
null=True, blank=True)
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='sampleItemModified_set',
null=True, blank=True)
class Meta:
verbose_name = "样本项"
verbose_name_plural = "[13].样本项"
class CustList(TimeModel):
name = models.CharField('清单名称', max_length=50)
descrition = models.CharField('清单说明', max_length=200, blank=True, null=True, default='')
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='custListCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='custListModified_set')
class Meta:
verbose_name = "客户清单"
verbose_name_plural = "[14].客户清单"
def getIdSigned(self):
signer = Signer()
return signer.sign(self.id)
def __unicode__(self):
return self.name
class CustListItem(TimeModel):
name = models.CharField('客户名称', max_length=50)
phone = models.CharField('手机号', max_length=50, validators=[validate_phone])
email = models.CharField('电子邮件', max_length=100, blank=True, null=True, default='')
custList = models.ForeignKey(CustList, verbose_name='所属清单', related_name="custListItem_set")
defineInfo_set = models.ManyToManyField('DefineInfo', verbose_name='附件信息', blank=True, null=True)
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='custListItemCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='custListItemModified_set')
class Meta:
verbose_name = "客户清单项"
verbose_name_plural = "[15].客户清单项"
def getIdSigned(self):
signer = Signer()
return signer.sign(self.id)
def __unicode__(self):
return self.name
class DefineInfo(TimeModel):
name = models.CharField('信息名称', max_length=100)
value = models.CharField('信息值', max_length=200)
ord = models.IntegerField('排序号')
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='defineInfoCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='defineInfoModified_set')
class Meta:
verbose_name = "自定义信息"
verbose_name_plural = "[16].自定义信息"
|
[
"xmduhan@gmail.com"
] |
xmduhan@gmail.com
|
3dbdb608cd2de3f1278d8f0339287fd5ce40c676
|
fcc88521f63a3c22c81a9242ae3b203f2ea888fd
|
/Python3/0844-Backspace-String-Compare/soln-1.py
|
cf9b6afca02d5b7deeaed1a8aa8d927a70cbd4e0
|
[
"MIT"
] |
permissive
|
wyaadarsh/LeetCode-Solutions
|
b5963e3427aa547d485d3a2cb24e6cedc72804fd
|
3719f5cb059eefd66b83eb8ae990652f4b7fd124
|
refs/heads/master
| 2022-12-06T15:50:37.930987
| 2020-08-30T15:49:27
| 2020-08-30T15:49:27
| 291,811,790
| 0
| 1
|
MIT
| 2020-08-31T19:57:35
| 2020-08-31T19:57:34
| null |
UTF-8
|
Python
| false
| false
| 600
|
py
|
class Solution:
def backspaceCompare(self, S, T):
"""
:type S: str
:type T: str
:rtype: bool
"""
i, j = len(S) - 1, len(T) - 1
bs, bt = 0, 0
while True:
while i >= 0 and (bs or S[i] == '#'):
bs = bs + 1 if S[i] == '#' else bs - 1
i -= 1
while j >= 0 and (bt or T[j] == '#'):
bt = bt + 1 if T[j] == '#' else bt - 1
j -= 1
if not(i >= 0 and j >= 0 and S[i] == T[j]):
return i == j == -1
i, j = i - 1, j - 1
|
[
"zhang623@wisc.edu"
] |
zhang623@wisc.edu
|
41ef232e7c76144c75891a57dcc4d00868f32726
|
673f9b85708affe260b892a4eb3b1f6a0bd39d44
|
/Botnets/App/App Web/PDG-env/lib/python3.6/site-packages/pandas/tests/scalar/timedelta/test_arithmetic.py
|
555b47c8dc0fc9a2a6f68e9ffd73c8a00e312d4d
|
[
"MIT"
] |
permissive
|
i2tResearch/Ciberseguridad_web
|
feee3fe299029bef96b158d173ce2d28ef1418e4
|
e6cccba69335816442c515d65d9aedea9e7dc58b
|
refs/heads/master
| 2023-07-06T00:43:51.126684
| 2023-06-26T00:53:53
| 2023-06-26T00:53:53
| 94,152,032
| 14
| 0
|
MIT
| 2023-09-04T02:53:29
| 2017-06-13T00:21:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 24,619
|
py
|
"""
Tests for scalar Timedelta arithmetic ops
"""
from datetime import datetime, timedelta
import operator
import numpy as np
import pytest
import pandas as pd
from pandas import NaT, Timedelta, Timestamp, _is_numpy_dev, offsets
import pandas._testing as tm
from pandas.core import ops
class TestTimedeltaAdditionSubtraction:
"""
Tests for Timedelta methods:
__add__, __radd__,
__sub__, __rsub__
"""
@pytest.mark.parametrize(
"ten_seconds",
[
Timedelta(10, unit="s"),
timedelta(seconds=10),
np.timedelta64(10, "s"),
np.timedelta64(10000000000, "ns"),
offsets.Second(10),
],
)
def test_td_add_sub_ten_seconds(self, ten_seconds):
# GH#6808
base = Timestamp("20130101 09:01:12.123456")
expected_add = Timestamp("20130101 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + ten_seconds
assert result == expected_add
result = base - ten_seconds
assert result == expected_sub
@pytest.mark.parametrize(
"one_day_ten_secs",
[
Timedelta("1 day, 00:00:10"),
Timedelta("1 days, 00:00:10"),
timedelta(days=1, seconds=10),
np.timedelta64(1, "D") + np.timedelta64(10, "s"),
offsets.Day() + offsets.Second(10),
],
)
def test_td_add_sub_one_day_ten_seconds(self, one_day_ten_secs):
# GH#6808
base = Timestamp("20130102 09:01:12.123456")
expected_add = Timestamp("20130103 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + one_day_ten_secs
assert result == expected_add
result = base - one_day_ten_secs
assert result == expected_sub
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_datetimelike_scalar(self, op):
# GH#19738
td = Timedelta(10, unit="d")
result = op(td, datetime(2016, 1, 1))
if op is operator.add:
# datetime + Timedelta does _not_ call Timedelta.__radd__,
# so we get a datetime back instead of a Timestamp
assert isinstance(result, Timestamp)
assert result == Timestamp(2016, 1, 11)
result = op(td, Timestamp("2018-01-12 18:09"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22 18:09")
result = op(td, np.datetime64("2018-01-12"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22")
result = op(td, NaT)
assert result is NaT
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_td(self, op):
td = Timedelta(10, unit="d")
result = op(td, Timedelta(days=10))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=20)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_pytimedelta(self, op):
td = Timedelta(10, unit="d")
result = op(td, timedelta(days=9))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=19)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedelta64(self, op):
td = Timedelta(10, unit="d")
result = op(td, np.timedelta64(-4, "D"))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=6)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_offset(self, op):
td = Timedelta(10, unit="d")
result = op(td, offsets.Hour(6))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=10, hours=6)
def test_td_sub_td(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_pytimedelta(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_pytimedelta()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_pytimedelta() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_timedelta64(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_timedelta64()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_timedelta64() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_nat(self):
# In this context pd.NaT is treated as timedelta-like
td = Timedelta(10, unit="d")
result = td - NaT
assert result is NaT
def test_td_sub_td64_nat(self):
td = Timedelta(10, unit="d")
td_nat = np.timedelta64("NaT")
result = td - td_nat
assert result is NaT
result = td_nat - td
assert result is NaT
def test_td_sub_offset(self):
td = Timedelta(10, unit="d")
result = td - offsets.Hour(1)
assert isinstance(result, Timedelta)
assert result == Timedelta(239, unit="h")
def test_td_add_sub_numeric_raises(self):
td = Timedelta(10, unit="d")
for other in [2, 2.0, np.int64(2), np.float64(2)]:
with pytest.raises(TypeError):
td + other
with pytest.raises(TypeError):
other + td
with pytest.raises(TypeError):
td - other
with pytest.raises(TypeError):
other - td
def test_td_rsub_nat(self):
td = Timedelta(10, unit="d")
result = NaT - td
assert result is NaT
result = np.datetime64("NaT") - td
assert result is NaT
def test_td_rsub_offset(self):
result = offsets.Hour(1) - Timedelta(10, unit="d")
assert isinstance(result, Timedelta)
assert result == Timedelta(-239, unit="h")
def test_td_sub_timedeltalike_object_dtype_array(self):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20121231 9:01"), Timestamp("20121229 9:02")])
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_sub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
exp = np.array(
[
now - Timedelta("1D"),
Timedelta("0D"),
np.timedelta64(2, "h") - Timedelta("1D"),
]
)
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_rsub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
with pytest.raises(TypeError):
Timedelta("1D") - arr
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedeltalike_object_dtype_array(self, op):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20130102 9:01"), Timestamp("20121231 9:02")])
res = op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_mixed_timedeltalike_object_dtype_array(self, op):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D")])
exp = np.array([now + Timedelta("1D"), Timedelta("2D")])
res = op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
# TODO: moved from index tests following #24365, may need de-duplication
def test_ops_ndarray(self):
td = Timedelta("1 day")
# timedelta, timedelta
other = pd.to_timedelta(["1 day"]).values
expected = pd.to_timedelta(["2 days"]).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
msg = r"unsupported operand type\(s\) for \+: 'Timedelta' and 'int'"
with pytest.raises(TypeError, match=msg):
td + np.array([1])
msg = r"unsupported operand type\(s\) for \+: 'numpy.ndarray' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.array([1]) + td
expected = pd.to_timedelta(["0 days"]).values
tm.assert_numpy_array_equal(td - other, expected)
tm.assert_numpy_array_equal(-other + td, expected)
msg = r"unsupported operand type\(s\) for -: 'Timedelta' and 'int'"
with pytest.raises(TypeError, match=msg):
td - np.array([1])
msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.array([1]) - td
expected = pd.to_timedelta(["2 days"]).values
tm.assert_numpy_array_equal(td * np.array([2]), expected)
tm.assert_numpy_array_equal(np.array([2]) * td, expected)
msg = (
"ufunc '?multiply'? cannot use operands with types"
r" dtype\('<m8\[ns\]'\) and dtype\('<m8\[ns\]'\)"
)
with pytest.raises(TypeError, match=msg):
td * other
with pytest.raises(TypeError, match=msg):
other * td
tm.assert_numpy_array_equal(td / other, np.array([1], dtype=np.float64))
tm.assert_numpy_array_equal(other / td, np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(["2000-01-01"]).values
expected = pd.to_datetime(["2000-01-02"]).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(["1999-12-31"]).values
tm.assert_numpy_array_equal(-td + other, expected)
tm.assert_numpy_array_equal(other - td, expected)
class TestTimedeltaMultiplicationDivision:
"""
Tests for Timedelta methods:
__mul__, __rmul__,
__div__, __rdiv__,
__truediv__, __rtruediv__,
__floordiv__, __rfloordiv__,
__mod__, __rmod__,
__divmod__, __rdivmod__
"""
# ---------------------------------------------------------------
# Timedelta.__mul__, __rmul__
@pytest.mark.parametrize(
"td_nat", [NaT, np.timedelta64("NaT", "ns"), np.timedelta64("NaT")]
)
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nat(self, op, td_nat):
# GH#19819
td = Timedelta(10, unit="d")
with pytest.raises(TypeError):
op(td, td_nat)
@pytest.mark.parametrize("nan", [np.nan, np.float64("NaN"), float("nan")])
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nan(self, op, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = Timedelta(10, unit="d")
result = op(td, nan)
assert result is NaT
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_scalar(self, op):
# GH#19738
td = Timedelta(minutes=3)
result = op(td, 2)
assert result == Timedelta(minutes=6)
result = op(td, 1.5)
assert result == Timedelta(minutes=4, seconds=30)
assert op(td, np.nan) is NaT
assert op(-1, td).value == -1 * td.value
assert op(-1.0, td).value == -1.0 * td.value
with pytest.raises(TypeError):
# timedelta * datetime is gibberish
op(td, Timestamp(2016, 1, 2))
with pytest.raises(TypeError):
# invalid multiply with another timedelta
op(td, td)
# ---------------------------------------------------------------
# Timedelta.__div__, __truediv__
def test_td_div_timedeltalike_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = td / offsets.Hour(1)
assert result == 240
assert td / td == 1
assert td / np.timedelta64(60, "h") == 4
assert np.isnan(td / NaT)
def test_td_div_numeric_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = td / 2
assert isinstance(result, Timedelta)
assert result == Timedelta(days=5)
result = td / 5.0
assert isinstance(result, Timedelta)
assert result == Timedelta(days=2)
@pytest.mark.parametrize(
"nan",
[
np.nan,
pytest.param(
np.float64("NaN"),
marks=pytest.mark.xfail(
_is_numpy_dev,
reason="https://github.com/pandas-dev/pandas/issues/31992",
strict=False,
),
),
float("nan"),
],
)
def test_td_div_nan(self, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = Timedelta(10, unit="d")
result = td / nan
assert result is NaT
result = td // nan
assert result is NaT
# ---------------------------------------------------------------
# Timedelta.__rdiv__
def test_td_rdiv_timedeltalike_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = offsets.Hour(1) / td
assert result == 1 / 240.0
assert np.timedelta64(60, "h") / td == 0.25
# ---------------------------------------------------------------
# Timedelta.__floordiv__
def test_td_floordiv_timedeltalike_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
scalar = Timedelta(hours=3, minutes=3)
assert td // scalar == 1
assert -td // scalar.to_pytimedelta() == -2
assert (2 * td) // scalar.to_timedelta64() == 2
def test_td_floordiv_null_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
assert td // np.nan is NaT
assert np.isnan(td // NaT)
assert np.isnan(td // np.timedelta64("NaT"))
def test_td_floordiv_offsets(self):
# GH#19738
td = Timedelta(hours=3, minutes=4)
assert td // offsets.Hour(1) == 3
assert td // offsets.Minute(2) == 92
def test_td_floordiv_invalid_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
with pytest.raises(TypeError):
td // np.datetime64("2016-01-01", dtype="datetime64[us]")
def test_td_floordiv_numeric_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
expected = Timedelta(hours=1, minutes=32)
assert td // 2 == expected
assert td // 2.0 == expected
assert td // np.float64(2.0) == expected
assert td // np.int32(2.0) == expected
assert td // np.uint8(2.0) == expected
def test_td_floordiv_timedeltalike_array(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
scalar = Timedelta(hours=3, minutes=3)
# Array-like others
assert td // np.array(scalar.to_timedelta64()) == 1
res = (3 * td) // np.array([scalar.to_timedelta64()])
expected = np.array([3], dtype=np.int64)
tm.assert_numpy_array_equal(res, expected)
res = (10 * td) // np.array([scalar.to_timedelta64(), np.timedelta64("NaT")])
expected = np.array([10, np.nan])
tm.assert_numpy_array_equal(res, expected)
def test_td_floordiv_numeric_series(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
ser = pd.Series([1], dtype=np.int64)
res = td // ser
assert res.dtype.kind == "m"
# ---------------------------------------------------------------
# Timedelta.__rfloordiv__
def test_td_rfloordiv_timedeltalike_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
scalar = Timedelta(hours=3, minutes=4)
# scalar others
# x // Timedelta is defined only for timedelta-like x. int-like,
# float-like, and date-like, in particular, should all either
# a) raise TypeError directly or
# b) return NotImplemented, following which the reversed
# operation will raise TypeError.
assert td.__rfloordiv__(scalar) == 1
assert (-td).__rfloordiv__(scalar.to_pytimedelta()) == -2
assert (2 * td).__rfloordiv__(scalar.to_timedelta64()) == 0
def test_td_rfloordiv_null_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
assert np.isnan(td.__rfloordiv__(NaT))
assert np.isnan(td.__rfloordiv__(np.timedelta64("NaT")))
def test_td_rfloordiv_offsets(self):
# GH#19738
assert offsets.Hour(1) // Timedelta(minutes=25) == 2
def test_td_rfloordiv_invalid_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
dt64 = np.datetime64("2016-01-01", "us")
with pytest.raises(TypeError):
td.__rfloordiv__(dt64)
def test_td_rfloordiv_numeric_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
assert td.__rfloordiv__(np.nan) is NotImplemented
assert td.__rfloordiv__(3.5) is NotImplemented
assert td.__rfloordiv__(2) is NotImplemented
with pytest.raises(TypeError):
td.__rfloordiv__(np.float64(2.0))
with pytest.raises(TypeError):
td.__rfloordiv__(np.uint8(9))
with pytest.raises(TypeError, match="Invalid dtype"):
# deprecated GH#19761, enforced GH#29797
td.__rfloordiv__(np.int32(2.0))
def test_td_rfloordiv_timedeltalike_array(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
scalar = Timedelta(hours=3, minutes=4)
# Array-like others
assert td.__rfloordiv__(np.array(scalar.to_timedelta64())) == 1
res = td.__rfloordiv__(np.array([(3 * scalar).to_timedelta64()]))
expected = np.array([3], dtype=np.int64)
tm.assert_numpy_array_equal(res, expected)
arr = np.array([(10 * scalar).to_timedelta64(), np.timedelta64("NaT")])
res = td.__rfloordiv__(arr)
expected = np.array([10, np.nan])
tm.assert_numpy_array_equal(res, expected)
def test_td_rfloordiv_numeric_series(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
ser = pd.Series([1], dtype=np.int64)
res = td.__rfloordiv__(ser)
assert res is NotImplemented
with pytest.raises(TypeError, match="Invalid dtype"):
# Deprecated GH#19761, enforced GH#29797
# TODO: GH-19761. Change to TypeError.
ser // td
# ----------------------------------------------------------------
# Timedelta.__mod__, __rmod__
def test_mod_timedeltalike(self):
# GH#19365
td = Timedelta(hours=37)
# Timedelta-like others
result = td % Timedelta(hours=6)
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=1)
result = td % timedelta(minutes=60)
assert isinstance(result, Timedelta)
assert result == Timedelta(0)
result = td % NaT
assert result is NaT
def test_mod_timedelta64_nat(self):
# GH#19365
td = Timedelta(hours=37)
result = td % np.timedelta64("NaT", "ns")
assert result is NaT
def test_mod_timedelta64(self):
# GH#19365
td = Timedelta(hours=37)
result = td % np.timedelta64(2, "h")
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=1)
def test_mod_offset(self):
# GH#19365
td = Timedelta(hours=37)
result = td % offsets.Hour(5)
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=2)
def test_mod_numeric(self):
# GH#19365
td = Timedelta(hours=37)
# Numeric Others
result = td % 2
assert isinstance(result, Timedelta)
assert result == Timedelta(0)
result = td % 1e12
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=3, seconds=20)
result = td % int(1e12)
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=3, seconds=20)
def test_mod_invalid(self):
# GH#19365
td = Timedelta(hours=37)
with pytest.raises(TypeError):
td % Timestamp("2018-01-22")
with pytest.raises(TypeError):
td % []
def test_rmod_pytimedelta(self):
# GH#19365
td = Timedelta(minutes=3)
result = timedelta(minutes=4) % td
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=1)
def test_rmod_timedelta64(self):
# GH#19365
td = Timedelta(minutes=3)
result = np.timedelta64(5, "m") % td
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=2)
def test_rmod_invalid(self):
# GH#19365
td = Timedelta(minutes=3)
with pytest.raises(TypeError):
Timestamp("2018-01-22") % td
with pytest.raises(TypeError):
15 % td
with pytest.raises(TypeError):
16.0 % td
with pytest.raises(TypeError):
np.array([22, 24]) % td
# ----------------------------------------------------------------
# Timedelta.__divmod__, __rdivmod__
def test_divmod_numeric(self):
# GH#19365
td = Timedelta(days=2, hours=6)
result = divmod(td, 53 * 3600 * 1e9)
assert result[0] == Timedelta(1, unit="ns")
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=1)
assert result
result = divmod(td, np.nan)
assert result[0] is NaT
assert result[1] is NaT
def test_divmod(self):
# GH#19365
td = Timedelta(days=2, hours=6)
result = divmod(td, timedelta(days=1))
assert result[0] == 2
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=6)
result = divmod(td, 54)
assert result[0] == Timedelta(hours=1)
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(0)
result = divmod(td, NaT)
assert np.isnan(result[0])
assert result[1] is NaT
def test_divmod_offset(self):
# GH#19365
td = Timedelta(days=2, hours=6)
result = divmod(td, offsets.Hour(-4))
assert result[0] == -14
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=-2)
def test_divmod_invalid(self):
# GH#19365
td = Timedelta(days=2, hours=6)
with pytest.raises(TypeError):
divmod(td, Timestamp("2018-01-22"))
def test_rdivmod_pytimedelta(self):
# GH#19365
result = divmod(timedelta(days=2, hours=6), Timedelta(days=1))
assert result[0] == 2
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=6)
def test_rdivmod_offset(self):
result = divmod(offsets.Hour(54), Timedelta(hours=-4))
assert result[0] == -14
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=-2)
def test_rdivmod_invalid(self):
# GH#19365
td = Timedelta(minutes=3)
with pytest.raises(TypeError):
divmod(Timestamp("2018-01-22"), td)
with pytest.raises(TypeError):
divmod(15, td)
with pytest.raises(TypeError):
divmod(16.0, td)
with pytest.raises(TypeError):
divmod(np.array([22, 24]), td)
# ----------------------------------------------------------------
@pytest.mark.parametrize(
"op", [operator.mul, ops.rmul, operator.truediv, ops.rdiv, ops.rsub]
)
@pytest.mark.parametrize(
"arr",
[
np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")]),
np.array([Timestamp.now(), Timedelta("1D")]),
],
)
def test_td_op_timedelta_timedeltalike_array(self, op, arr):
with pytest.raises(TypeError):
op(arr, Timedelta("1D"))
|
[
"ulcamilo@gmail.com"
] |
ulcamilo@gmail.com
|
1dd9830ae74fa5d06a572e1f0b7f6445fd3ae66c
|
bb27630e7af7f1bccbb5cfc892d0b0e6445fe874
|
/05_django/01_djangoIntro/django_formTest/formTestApp/apps.py
|
f318035ab1e56dfebb9f0f7f047dd9a3a89e5c53
|
[] |
no_license
|
MrBreakIT/pythonStack
|
fe8cd9418ee1060ada8cd1c446332d81facecf4e
|
f615436dbff581c50ded70dec6532f6339977c1d
|
refs/heads/main
| 2023-02-06T14:37:05.688885
| 2020-12-27T22:37:09
| 2020-12-27T22:37:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 97
|
py
|
from django.apps import AppConfig
class FormtestAppConfig(AppConfig):
name = 'formTestApp'
|
[
"johnpike1022@gmail.com"
] |
johnpike1022@gmail.com
|
9dea79ebe2acef41d229a77657e6b1cf232caf43
|
5215715a4cbcf9ce065b1542db224a2b1997c760
|
/T3/t3.py
|
5523731288ed9eb2d6e7a4f542b5a35d71a18b89
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
Cgipson06/reddit
|
02deac29ead779890e42d48400d2233ce888e5a0
|
deb1da398840bbd311a79eec25ef2a8b5a8ed5b1
|
refs/heads/master
| 2021-01-08T23:19:55.245559
| 2014-12-28T20:52:26
| 2014-12-28T20:52:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,034
|
py
|
#/u/GoldenSights
import praw
import time
import sqlite3
import datetime
import random
USERAGENT = """
/u/GoldenSights T3 data collection: Gathering Submission data for
statistical analysis.
More info at https://github.com/voussoir/reddit/tree/master/T3
"""
r = praw.Reddit(USERAGENT)
print('Connected to reddit.')
sql = sqlite3.connect('D:/T3/t3.db')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS meta(label TEXT, data TEXT)')
cur.execute(('CREATE TABLE IF NOT EXISTS posts(idint INT, idstr TEXT, '
'created INT, self INT, nsfw INT, author TEXT, title TEXT, '
'url TEXT, selftext TEXT, score INT, subreddit TEXT, distinguish INT, '
'textlen INT)'))
DISTINGUISHMAP = {0:"user", 1:"moderator", 2:"admin"}
DISTINGUISHMAP_R = {"user":0, "moderator":1, "admin":2}
LOWERBOUND = 9999000
# 5yba0
UPPERBOUND = 164790958
# 2q41im
# 1,679,616 = 10000
# 9,999,000 = 5yba0
# 60,466,176 = 100000
# 120,932,352 = 200000
# 164,790,958 = 2q41im
# 0 - idint
# 1 - idstr
# 2 - created
# 3 - self
# 4 - nsfw
# 5 - author
# 6 - title
# 7 - url
# 8 - selftext
# 9 - score
# 10 - subreddit
# 11 - distinguished
# 12 - textlen
class Post:
''' Used to map the indices of DB entries to names '''
def __init__(self, data):
self.idint = data[0]
self.idstr = data[1]
self.created_utc = data[2]
self.is_self = True if data[3] == 1 else False
self.over_18 = True if data[4] == 1 else False
self.author = data[5]
self.title = data[6]
self.url = data[7]
self.selftext = data[8]
self.score = data[9]
self.subreddit = data[10]
self.distinguished = DISTINGUISHMAP[data[11]]
self.textlen = data[12]
def base36encode(number, alphabet='0123456789abcdefghijklmnopqrstuvwxyz'):
"""Converts an integer to a base36 string."""
if not isinstance(number, (int)):
raise TypeError('number must be an integer')
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36
def base36decode(number):
return int(number, 36)
def b36(i):
if type(i) == int:
return base36encode(i)
if type(i) == str:
return base36decode(i)
def human(timestamp):
day = datetime.datetime.utcfromtimestamp(timestamp)
human = datetime.datetime.strftime(day, "%b %d %Y %H:%M:%S UTC")
return human
def process(itemid, log=True, kill=True):
if isinstance(itemid, str):
itemid = [itemid]
if isinstance(itemid, list):
if isinstance(itemid[0], str):
itemid = verify_t3(itemid)
try:
itemid = remove_existing(itemid)
temp = itemid[:]
except Exception:
return
itemid = r.get_info(thing_id=itemid)
try:
len(itemid)
except:
print(temp, "DEAD")
if kill:
logdead(temp[0])
process(temp, kill=kill)
return
for index in range(len(itemid)):
item = itemid[index]
item.idint = b36(item.id)
item.idstr = item.id
if item.distinguished is None:
item.distinguished = 0
else:
item.distinguished = DISTINGUISHMAP_R[item.distinguished]
item.url = "self" if item.is_self else item.url
item.created_utc = int(item.created_utc)
item.is_self = 1 if item.is_self else 0
item.over_18 = 1 if item.over_18 else 0
item.sub = item.subreddit.display_name
item.textlen = len(item.selftext)
try:
item.auth = item.author.name
except AttributeError:
item.auth = "[deleted]"
item = [item.idint, item.idstr, item.created_utc,
item.is_self, item.over_18, item.auth, item.title,
item.url, item.selftext, item.score, item.sub,
item.distinguished, item.textlen]
itemid[index] = item
if log:
logdb(itemid)
else:
return itemid
if len(itemid) < len(temp):
process(temp)
# 0 - idint
# 1 - idstr
# 2 - created
# 3 - self
# 4 - nsfw
# 5 - author
# 6 - title
# 7 - url
# 8 - selftext
# 9 - score
# 10 - subreddit
# 11 - distinguished
# 12 - textlen
def logdb(items):
for item in items:
cur.execute('INSERT INTO posts VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', item)
sql.commit()
def logdead(i):
#If an ID is dead, let's at least add it to the db.
i = i.replace('t3_', '')
data = [b36(i), i, 0, 0, 0, '?', '?', '?', '?', 0, '?', 0, 0]
logdb([data])
def verify_t3(items):
for index in range(len(items)):
i = items[index]
if 't3_' not in i:
items[index] = 't3_' + i
return items
def remove_existing(items):
done = False
items = verify_t3(items)
while not done:
done = True
for item in items:
cur.execute('SELECT * FROM posts WHERE idint=?', [b36(item[3:])])
f = cur.fetchone()
if f:
items.remove(item)
done = False
break
if len(items) == 0:
raise Exception("Nothing new")
return items
def processrange(lower, upper, kill=True):
if isinstance(lower, str):
lower = b36(lower)
if isinstance(upper, int):
upper = lower + upper
if isinstance(upper, str):
upper = b36(upper)
if upper <= lower:
print("Upper must be higher than lower")
return
ids = [b36(x) for x in range(lower, upper)]
while len(ids) > 0:
p = ids[:100]
print("%s >>> %s (%d)" % (p[0], p[-1], len(ids)))
ids = ids[100:]
process(p, kill=kill)
def lastitem():
cur.execute('SELECT * FROM posts ORDER BY idint DESC LIMIT 1')
return cur.fetchone()[1]
def show():
filea = open('show/missing.txt', 'w')
fileb = open('show/stats.txt', 'w')
cur.execute('SELECT Count(*) FROM posts')
count = cur.fetchone()
count = count[0]
counts = '{0:,}'.format(count)
mainstats = '%s posts collected; ' % counts
print('Current total:', counts)
print('Counting dead posts')
cur.execute('SELECT * FROM posts WHERE created=0')
dead = cur.fetchall()
dead = [x[1] for x in dead]
deadcount = len(dead)
deadcount = '{0:,}'.format(deadcount)
mainstats += '%s dead.\n' % deadcount
for deaditem in dead:
print(deaditem, file=filea)
filea.close()
print('Counting selfposts')
cur.execute('SELECT * FROM posts WHERE self=1')
self = cur.fetchall()
self = len(self)
link = count-self
selfs = '{0:,}'.format(self)
links = '{0:,}'.format(link)
selfstats = '%s linkposts; %s selfposts\n' % (links, selfs)
readmefile = open('README.md', 'r')
readmelines = readmefile.readlines()
readmefile.close()
readmelines[3] = mainstats
readmelines[4] = selfstats
readmefile = open('README.md', 'w')
readmefile.write(''.join(readmelines))
readmefile.close()
#STATS TIME
print('Writing subreddit stats')
cur.execute('SELECT * FROM posts')
subredditcounts = {}
while True:
fetch = cur.fetchone()
if fetch:
fetch = Post(fetch)
try:
subredditcounts[fetch.subreddit] += 1
except KeyError:
subredditcounts[fetch.subreddit] = 1
else:
break
subkeys = list(subredditcounts.keys())
subkeys.sort(key=subredditcounts.get, reverse=True)
for key in subkeys:
out = key
out += '.'*(25-len(key))
num = '{0:,}'.format(subredditcounts[key])
out += '.'*(14-len(num))
out += num
print(out, file=fileb)
fileb.close()
|
[
"edalool@yahoo.com"
] |
edalool@yahoo.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.