blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1535301b3f78a727e980c0c4e00988e46ff1b61a | 268c588de53d48f2e48c694535e27c1be104229d | /Decorator_Pattern.py | dbbacb277861f380541934a98021297ec79ff1d3 | [] | no_license | wax8280/Python_Design_Patterns | def64b1662924807946a9847ac1bf0437382a716 | 88fb08ad3605fb06166bf45d814f5b85a37364b5 | refs/heads/master | 2021-01-11T01:21:14.964828 | 2016-10-14T15:40:42 | 2016-10-14T15:40:42 | 70,715,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,105 | py | # coding:utf-8
import functools
def memoize(fn):
known = dict()
@functools.wraps(fn)
def memoizer(*args):
if args not in known:
known[args] = fn(*args)
return known[args]
return memoizer
@memoize
def nsum(n):
'''Returns the sum of the first n numbers'''
assert (n >= 0), 'n must be >= 0'
return 0 if n == 0 else n + nsum(n - 1)
@memoize
def fibonacci(n):
'''Returns the nth number of the Fibonacci sequence'''
assert (n >= 0), 'n must be >= 0'
return n if n in (0, 1) else fibonacci(n - 1) + fibonacci(n - 2)
if __name__ == '__main__':
from timeit import Timer
measure = [{'exec': 'fibonacci(100)', 'import': 'fibonacci', 'func': fibonacci},
{'exec': 'nsum(200)', 'import': 'nsum', 'func': nsum}]
for m in measure:
t = Timer('{}'.format(m['exec']), 'from __main__ import {}'.format(m['import']))
print('name: {}, doc: {}, executing: {}, time: {}'.format(m['func'].__name__, m['func'].__doc__, m['exec'],
t.timeit()))
| [
"wax8280@163.com"
] | wax8280@163.com |
bea389a719f322ce117b6f02b2fbd03de15d243f | 5dac0010edb884cd6d412954c79b75fa946e252d | /101-AWS-S3-Hacks/createobject.py | b2b8a10c68fbf4ae56917a6656cd95a009038b7b | [] | no_license | ralic/aws_hack_collection | c1e1a107aa100e73b6e5334ed9345576057bdc9d | 7b22018169e01d79df7416dd149c015605dea890 | refs/heads/master | 2023-01-09T04:31:57.125028 | 2020-02-06T11:21:39 | 2020-02-06T11:21:39 | 90,350,262 | 3 | 1 | null | 2022-12-26T20:03:05 | 2017-05-05T07:39:34 | Python | UTF-8 | Python | false | false | 396 | py | #!/usr/bin/python
"""
- Author : Nag m
- Hack : Create a new object in S3
- Info : Create a object named
- myfile.txt
"""
import boto
def createaobject():
bucket = conn.get_bucket("101-s3-aws")
obj = bucket.new_key("myfile.txt")
obj.set_contents_from_string("This is my first object created in S3")
if __name__ == "__main__":
conn = boto.connect_s3()
createaobject()
| [
"raliclo@gmail.com"
] | raliclo@gmail.com |
e5202820096b9831f3b03b5f13d7933f9f22108b | ac611b732321d2496862b0c4d7368d37eb1ead5c | /Blog/blogDevMedia/migrations/0001_initial.py | 5fefcd9aed347aa33b619b4e5c85f6a7e6ab79a2 | [] | no_license | WellingtonIdeao/django-admin-miniblog | 85e0c61f186b75a4b11675a6d973dde73d0c1720 | a47ee8d6bbefac4280d6d0abbee1fb43ab030087 | refs/heads/master | 2020-09-15T16:18:30.319187 | 2019-11-22T23:25:20 | 2019-11-22T23:25:20 | 223,501,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | # Generated by Django 2.2.7 on 2019-11-11 16:39
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Postagem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titulo', models.CharField(max_length=100)),
('descricao', models.CharField(max_length=200)),
('conteudo', models.TextField()),
],
),
]
| [
"wellington.ideao@gmail.com"
] | wellington.ideao@gmail.com |
4d369e4a896df1bf1b0412392fc0f664e190cedd | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adverbs/_fore.py | 4ee9f0775ec26099d96243967f7acf17c2d722c6 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py |
#calss header
class _FORE():
def __init__(self,):
self.name = "FORE"
self.definitions = [u'(especially on ships) towards or in the front']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adverbs'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
c0bc4d868d66841eeac3f734176734a63f8f00b7 | 93713f46f16f1e29b725f263da164fed24ebf8a8 | /Library/lib/python3.7/site-packages/bokeh-1.4.0-py3.7.egg/bokeh/themes/_dark_minimal.py | 65ceb00baefed17600816474b78dad06a1826a14 | [
"BSD-3-Clause"
] | permissive | holzschu/Carnets | b83d15136d25db640cea023abb5c280b26a9620e | 1ad7ec05fb1e3676ac879585296c513c3ee50ef9 | refs/heads/master | 2023-02-20T12:05:14.980685 | 2023-02-13T15:59:23 | 2023-02-13T15:59:23 | 167,671,526 | 541 | 36 | BSD-3-Clause | 2022-11-29T03:08:22 | 2019-01-26T09:26:46 | Python | UTF-8 | Python | false | false | 2,451 | py | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
json = {
"attrs": {
"Figure" : {
"background_fill_color": "#20262B",
"border_fill_color": "#15191C",
"outline_line_color": "#E0E0E0",
"outline_line_alpha": 0.25
},
"Grid": {
"grid_line_color": "#E0E0E0",
"grid_line_alpha": 0.25
},
"Axis": {
"major_tick_line_alpha": 0,
"major_tick_line_color": "#E0E0E0",
"minor_tick_line_alpha": 0,
"minor_tick_line_color": "#E0E0E0",
"axis_line_alpha": 0,
"axis_line_color": "#E0E0E0",
"major_label_text_color": "#E0E0E0",
"major_label_text_font": "Helvetica",
"major_label_text_font_size": "1.025em",
"axis_label_standoff": 10,
"axis_label_text_color": "#E0E0E0",
"axis_label_text_font": "Helvetica",
"axis_label_text_font_size": "1.25em",
"axis_label_text_font_style": "normal"
},
"Legend": {
"spacing": 8,
"glyph_width": 15,
"label_standoff": 8,
"label_text_color": "#E0E0E0",
"label_text_font": "Helvetica",
"label_text_font_size": "1.025em",
"border_line_alpha": 0,
"background_fill_alpha": 0.25,
"background_fill_color": "#20262B"
},
"ColorBar": {
"title_text_color": "#E0E0E0",
"title_text_font": "Helvetica",
"title_text_font_size": "1.025em",
"title_text_font_style": "normal",
"major_label_text_color": "#E0E0E0",
"major_label_text_font": "Helvetica",
"major_label_text_font_size": "1.025em",
"background_fill_color": "#15191C",
"major_tick_line_alpha": 0,
"bar_line_alpha": 0
},
"Title": {
"text_color": "#E0E0E0",
"text_font": "Helvetica",
"text_font_size": "1.15em"
}
}
}
| [
"nicolas.holzschuch@inria.fr"
] | nicolas.holzschuch@inria.fr |
efeebd47dc1709a52f66c0796785c9d4a18d99ff | 062fa6891dfe2278bcfa36a00cc8bed4356e9f5b | /examples/remote_store/remote_server.py | 242a24cdda02f56be9be607eb765873d9c12fabd | [
"Apache-2.0"
] | permissive | sepidehhosseinzadeh/mlflow-cpp | f43ffb1dba0e57b9b67fad696966bae683328527 | 724eeaeafbee829201859033315a9d2ebf314844 | refs/heads/master | 2022-12-12T13:41:28.825923 | 2020-06-10T20:42:55 | 2020-06-10T20:42:55 | 158,026,349 | 2 | 0 | Apache-2.0 | 2022-12-08T05:37:42 | 2018-11-17T21:27:07 | Makefile | UTF-8 | Python | false | false | 1,236 | py | from __future__ import print_function
import os
import shutil
import sys
import random
import tempfile
import mlflow
from mlflow import log_metric, log_param, log_artifacts, get_artifact_uri, active_run,\
get_tracking_uri, log_artifact
if __name__ == "__main__":
print("Running {} with tracking URI {}".format(sys.argv[0], get_tracking_uri()))
log_param("param1", 5)
log_metric("foo", 5)
log_metric("foo", 6)
log_metric("foo", 7)
log_metric("random_int", random.randint(0, 100))
run_uuid = active_run().info.run_uuid
# Get run metadata & data from the tracking server
service = mlflow.tracking.MlflowClient()
run = service.get_run(run_uuid)
print("Metadata & data for run with UUID %s: %s" % (run_uuid, run))
local_dir = tempfile.mkdtemp()
message = "test artifact written during run %s within artifact URI %s\n" \
% (active_run().info.run_uuid, get_artifact_uri())
try:
file_path = os.path.join(local_dir, "some_output_file.txt")
with open(file_path, "w") as handle:
handle.write(message)
log_artifacts(local_dir, "some_subdir")
log_artifact(file_path, "another_dir")
finally:
shutil.rmtree(local_dir)
| [
"sepideh.hosseinzadeh.h@gmail.com"
] | sepideh.hosseinzadeh.h@gmail.com |
32c59c5f3c4f7a20d5ebdf417eb2394978f5db44 | 573932f2fc40e94001a3659043b902db4d70a912 | /4-Data-Visualization/dash/components/dash-core-components/tests/integration/dropdown/test_clearable_false.py | f72043bfd1dabb347829ce6ff1b5db07011999a1 | [
"MIT"
] | permissive | BioInformatica-Labs/My-Data-Science-Journey | 69f441f88e9e106975ec27de873d82af2dd351c7 | 92e04aa2e3612a198dc213f2dbd13b5df404bfbe | refs/heads/main | 2023-08-28T05:22:56.970894 | 2021-11-08T20:05:10 | 2021-11-08T20:05:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,397 | py | from dash import Dash, Input, Output, dcc, html
from selenium.webdriver.common.keys import Keys
def test_ddcf001_clearable_false_single(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[
dcc.Dropdown(
id="my-unclearable-dropdown",
options=[
{"label": "New York City", "value": "NYC"},
{"label": "Montreal", "value": "MTL"},
{"label": "San Francisco", "value": "SF"},
],
value="MTL",
clearable=False,
),
html.Div(id="dropdown-value", style={"height": "10px", "width": "10px"}),
]
)
@app.callback(
Output("dropdown-value", "children"),
[Input("my-unclearable-dropdown", "value")],
)
def update_value(val):
return val
dash_duo.start_server(app)
dropdown = dash_duo.find_element("#my-unclearable-dropdown input")
dropdown.send_keys(Keys.BACKSPACE)
dash_duo.find_element("#dropdown-value").click()
assert len(dash_duo.find_element("#dropdown-value").text) > 0
assert dash_duo.get_logs() == []
def test_ddcf002_clearable_false_multi(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[
dcc.Dropdown(
id="my-unclearable-dropdown",
options=[
{"label": "New York City", "value": "NYC"},
{"label": "Montreal", "value": "MTL"},
{"label": "San Francisco", "value": "SF"},
],
value=["MTL", "SF"],
multi=True,
clearable=False,
),
html.Div(id="dropdown-value", style={"height": "10px", "width": "10px"}),
]
)
@app.callback(
Output("dropdown-value", "children"),
[Input("my-unclearable-dropdown", "value")],
)
def update_value(val):
return ", ".join(val)
dash_duo.start_server(app)
dropdown = dash_duo.find_element("#my-unclearable-dropdown input")
dropdown.send_keys(Keys.BACKSPACE)
dropdown.send_keys(Keys.BACKSPACE)
dash_duo.find_element("#dropdown-value").click()
assert len(dash_duo.find_element("#dropdown-value").text) > 0
assert dash_duo.get_logs() == []
| [
"viannaandresouza@gmail.com"
] | viannaandresouza@gmail.com |
b54a31ae9d0c919895d7e4849e0c88033c079784 | 3fd8a3e3f37f9db258df63d8565239b8b8be0f24 | /basic_python/recursive1.py | 0f193c317a780debbb813484ea13ccb457dd86e4 | [] | no_license | raveena17/workout_problems | 713a3e1a6ec513c1ee8b878519171150c6858aa4 | 004812cb7abf096d6f5d20181a29c16f8daaac55 | refs/heads/master | 2021-03-12T19:27:08.013266 | 2017-09-08T16:11:32 | 2017-09-08T16:11:32 | 102,878,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | def fact(n):
if n==1:
return 1
else:
return n*fact(n-1)
#fact(8)-----40320
# fact(5)-----120
| [
"linuxuser@5gws004-linux.fifthgentech.local"
] | linuxuser@5gws004-linux.fifthgentech.local |
210cf835a8be8b9d470daf96cd61f49319e29fe2 | 0fd5793e78e39adbfe9dcd733ef5e42390b8cc9a | /python3/16_Web_Services/f_web_application/c_WSGI/a_simple_app.py | bbc45b452f311ac1047e899d9562b58b27dbb7e0 | [] | no_license | udhayprakash/PythonMaterial | 3ea282ceb4492d94d401e3bc8bad9bf6e9cfa156 | e72f44e147141ebc9bf9ec126b70a5fcdbfbd076 | refs/heads/develop | 2023-07-08T21:07:33.154577 | 2023-07-03T10:53:25 | 2023-07-03T10:53:25 | 73,196,374 | 8 | 5 | null | 2023-05-26T09:59:17 | 2016-11-08T14:55:51 | Jupyter Notebook | UTF-8 | Python | false | false | 530 | py | """
Purpose: To create a dynamic website
WSGI - Web Server Gateway Interface
- It is a specification that describes how web servers communicate with web applications.
- WSGI has been specified in PEP 3333.
"""
from wsgiref.simple_server import make_server
def hello_world_app(environ, start_response):
status = "200 OK"
headers = [("Content-type", "text/plain")]
start_response(status, headers)
return [b"Hello, World!"]
httpd = make_server("localhost", 8000, hello_world_app)
httpd.serve_forever()
| [
"uday3prakash@gmail.com"
] | uday3prakash@gmail.com |
1b6ed3df6f5825b6934d6c29dfdaf515c683f87e | c31c8095ce4d4e9686e3e7ad6b004342e49671fa | /forum/migrations/0003_perso_image.py | 86a65ba4537fa1611bb7668b648b1dbee17b6a3e | [] | no_license | Lionalisk/arrakambre | 7bcc96dea2ca2a471572bfb1646256f1382ce25b | 2caece9be5eebf21ddfa87a6c821c32b5d5019a2 | refs/heads/master | 2020-12-07T19:31:24.471090 | 2020-01-09T10:14:29 | 2020-01-09T10:14:29 | 232,782,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | # Generated by Django 2.1.1 on 2018-09-16 11:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('forum', '0002_remove_post_titre'),
]
operations = [
migrations.AddField(
model_name='perso',
name='image',
field=models.CharField(default='a', max_length=20),
preserve_default=False,
),
]
| [
"lionel.varaire@free.fr"
] | lionel.varaire@free.fr |
d39ace00459e163cdff4e3d6776bcfa7c4a230f4 | 6ae8717002f8fce4457cceb3375a114ddcb837df | /1-100/20. Valid Parentheses.py | b66f4412d2035e54f8a192c54000a27e0634fce7 | [] | no_license | SunnyMarkLiu/LeetCode | 31aea2954d5a84d11a1c4435f760c1d03c6c1243 | 852fad258f5070c7b93c35252f7404e85e709ea6 | refs/heads/master | 2020-05-30T07:17:33.992197 | 2018-03-29T03:57:51 | 2018-03-29T03:57:51 | 104,643,862 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,812 | py | #!/home/sunnymarkliu/software/miniconda2/bin/python
# _*_ coding: utf-8 _*_
"""
1. 用栈来操作,将所有的字符依次入栈,当栈顶的括号和正要入栈的括号匹配时将栈顶的括号弹出且不入栈,否则入栈新的括号。
最后,只有当栈里没有括号时,才表明输入是有效的。
2. 实际上,通过观察可以发现:如果正要入栈的是右括号,而栈顶元素不是能与之消去的相应左括号,那么该输入字符串一定是无效的。
于是,可以大大加快判断过程。
@author: MarkLiu
@time : 17-11-21 下午8:16
"""
class Solution(object):
def isValid2(self, s):
"""
beat 32.85%
:type s: str
:rtype: bool
"""
stack = []
# 括号匹配可以通过字典键值对来匹配
par_map = {')': '(',
']': '[',
'}': '{'}
for c in s:
# 新入栈的字符需要进行匹配
if len(stack) and c in par_map and par_map[c] == stack[-1]: # 所要匹配的括号和栈顶的元素相等, 匹配成功
stack.pop()
else:
stack.append(c)
return len(stack) == 0
def isValid(self, s):
"""
beat 60.91%
:type s: str
:rtype: bool
"""
stack = []
# 括号匹配可以通过字典键值对来匹配
par_map = {')': '(',
']': '[',
'}': '{'}
for c in s:
if c == ']' or c == '}' or c == ')':
if len(stack) == 0 or stack[-1] != par_map[c]:
return False
stack.pop()
else:
stack.append(c)
return len(stack) == 0
print Solution().isValid(']')
| [
"SunnyMarkLiu101@gmail.com"
] | SunnyMarkLiu101@gmail.com |
51b5ce98c2d3c454bb0212a899c1aa3f2f7b4148 | 930c207e245c320b108e9699bbbb036260a36d6a | /BRICK-RDFAlchemy/generatedCode/brick/brickschema/org/schema/_1_0_2/Brick/Heating_Supply_Air_Temperature_Proportional_Band_Setpoint.py | b48526f07713fd30d5de45bfc291f268cd171cad | [] | no_license | InnovationSE/BRICK-Generated-By-OLGA | 24d278f543471e1ce622f5f45d9e305790181fff | 7874dfa450a8a2b6a6f9927c0f91f9c7d2abd4d2 | refs/heads/master | 2021-07-01T14:13:11.302860 | 2017-09-21T12:44:17 | 2017-09-21T12:44:17 | 104,251,784 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | from rdflib import Namespace, Graph, Literal, RDF, URIRef
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfList
from brick.brickschema.org.schema._1_0_2.Brick.Supply_Air_Temperature_Heating_Setpoint import Supply_Air_Temperature_Heating_Setpoint
from brick.brickschema.org.schema._1_0_2.Brick.Discharge_Air_Temperature_Heating_Setpoint import Discharge_Air_Temperature_Heating_Setpoint
from brick.brickschema.org.schema._1_0_2.Brick.Proportional_Band_Setpoint import Proportional_Band_Setpoint
class Heating_Supply_Air_Temperature_Proportional_Band_Setpoint(Supply_Air_Temperature_Heating_Setpoint,Discharge_Air_Temperature_Heating_Setpoint,Proportional_Band_Setpoint):
rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').Heating_Supply_Air_Temperature_Proportional_Band_Setpoint
| [
"Andre.Ponnouradjane@non.schneider-electric.com"
] | Andre.Ponnouradjane@non.schneider-electric.com |
200344d96cc290df7c5cf3b41775aa37cee6ea10 | 30150c7f6ed7a10ac50eee3f40101bc3165ebf9e | /src/ai/RemoveDeadTasks.py | 4f69a3614353cf178eb9fa00a568ccc56fbc93a1 | [] | no_license | toontown-restoration-project/toontown | c2ad0d552cb9d5d3232ae6941e28f00c11ca3aa8 | 9bef6d9f823b2c12a176b33518eaa51ddbe3fd2f | refs/heads/master | 2022-12-23T19:46:16.697036 | 2020-10-02T20:17:09 | 2020-10-02T20:17:09 | 300,672,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,692 | py | from . import RepairAvatars
from . import DatabaseObject
import time
# this removes all traces of a list of tasks from all avatars; if they are
# working on the task, it will disappear
class DeadTaskRemover(RepairAvatars.AvatarIterator):
# When we come to this many non-avatars in a row, assume we have
# reached the end of the database.
endOfListCount = 2000
def __init__(self, air, deadTaskIds):
# pass in list of questIds of tasks to remove
self.deadTaskIds = deadTaskIds
RepairAvatars.AvatarIterator.__init__(self, air)
def fieldsToGet(self, db):
return ['setName', 'setMoney',
'setQuests', 'setQuestHistory', 'setRewardHistory']
def processAvatar(self, av, db):
self.printSometimes(av)
# grab a copy of the av's quests
flatQuests = av.getQuests()
# unflatten the quests
questList = []
questLen = 5
for i in range(0, len(flatQuests), questLen):
questList.append(flatQuests[i:i+questLen])
# make list of quests to remove
toRemove = []
for quest in questList:
id = quest[0]
if id in self.deadTaskIds:
reward = quest[3]
toRemove.append([id, reward])
# remove the quests
questsChanged = (len(toRemove) > 0)
questHistoryChanged = 0
rewardHistoryChanged = 0
for questId, rewardId in toRemove:
av.removeQuest(questId)
if av.removeQuestFromHistory(questId):
questHistoryChanged = 1
if av.removeRewardFromHistory(rewardId):
rewardHistoryChanged = 1
# and store the changes in the DB
if questsChanged:
print("Fixing %s: %s" % (av.doId, av.name))
fields = ['setQuests']
if questHistoryChanged:
fields.append('setQuestHistory')
if rewardHistoryChanged:
fields.append('setRewardHistory')
db2 = DatabaseObject.DatabaseObject(self.air, av.doId)
db2.storeObject(av, fields)
def printSometimes(self, av):
now = time.time()
if now - self.lastPrintTime > self.printInterval:
print("Avatar %d: %s" % (av.doId, av.name))
self.lastPrintTime = now
from . import UtilityStart
f = DeadTaskRemover(simbase.air, (list(range(6979, 6999+1)) +
list(range(7979, 7999+1)) +
list(range(8979, 8999+1)) +
list(range(9979, 9999+1)) +
list(range(10979, 10999+1))))
f.start()
run()
| [
"brianlach72@gmail.com"
] | brianlach72@gmail.com |
fda15d4d9037a68453de5a43d5f6e76a8e7721f6 | a80e9eb7ade3d43ce042071d796c00dd10b93225 | /ch_3/L3_flexible.py | 17eca2a9450b5b8399b92c5c76d17a05ea4d8359 | [] | no_license | ksjpswaroop/python_primer | 69addfdb07471eea13dccfad1f16c212626dee0a | 99c21d80953be3c9dc95f3a316c04b0c5613e830 | refs/heads/master | 2020-07-14T17:37:45.923796 | 2014-06-06T22:30:48 | 2014-06-06T22:30:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | # Exercise 3.31
def L3(x, n=None, epsilon=None, return_n=False):
if (n is None and epsilon is None) or \
(n is not None and epsilon is not None):
print 'Error: Either n or epsilon must be given (not both)'
term = x / (1. + x)
s = term
if n is not None:
for i in range(2, n + 1):
# recursive relation between ci and c(i-1)
term *= (i - 1.) / i * x / (1. + x)
s += term
return (s, n) if return_n is True else s
elif epsilon is not None:
i = 1
while abs(term) > epsilon:
i += 1
# recursive relation between ci and c(i-1)
term *= (i - 1.) / i * x / (1. + x)
s += term
return (s, i) if return_n is True else s
print L3(10, n=100)
print L3(10, n=1000, return_n=True)
print L3(10, epsilon=1e-10)
print L3(10, epsilon=1e-10, return_n=True)
"""
Sample run:
python L3_flexible.py
2.39788868474
(2.397895272798365, 1000)
2.39789527188
(2.397895271877886, 187
"""
| [
"noahwaterfieldprice@gmail.com"
] | noahwaterfieldprice@gmail.com |
c7eb200876e8053297a5a2b32fbd8ecfb0abd3fb | c857d225b50c5040e132d8c3a24005a689ee9ce4 | /problem270.py | 51fe86bde94fcb1af98644897b073459c97c4db5 | [] | no_license | pythonsnake/project-euler | 0e60a6bd2abeb5bf863110c2a551d5590c03201e | 456e4ef5407d2cf021172bc9ecfc2206289ba8c9 | refs/heads/master | 2021-01-25T10:44:27.876962 | 2011-10-21T00:46:02 | 2011-10-21T00:46:02 | 2,335,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | """
A square piece of paper with integer dimensions nn is placed with a corner at the origin and two of its sides along the x- and y-axes. then, we cut it up respecting the following rules:
we only make straight cuts between two points lying on different sides of the square, and having integer coordinates.
two cuts cannot cross, but several cuts can meet at the same border point.
proceed until no more legal cuts can be made.
counting any reflections or rotations as distinct, we call c(n) the number of ways to cut an nn square. for example, c(1) = 2 and c(2) = 30 (shown below).
what is c(30) mod 108 ?
""" | [
"pythonsnake98@gmail.com"
] | pythonsnake98@gmail.com |
3b382c712e6e9267fcf983b30958256927465787 | 3679daa10ea95e90889e07e96e6c98c98f3751ea | /ipu/dummy_company/migrations/0003_auto_20170802_2227.py | 1ab5a35cc8a29af8b8748f4991d6c9e2dded39f4 | [] | no_license | rmn5124/ggsipu-placement-cell-portal | 0a8fef69c75ea444588046fcc7b38d7cf5c8e8e5 | 11876c2171bb07308719b205a69cd8330eb08052 | refs/heads/master | 2023-09-01T12:01:47.475984 | 2019-09-02T21:49:01 | 2019-09-02T21:49:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-08-02 16:57
from __future__ import unicode_literals
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dummy_company', '0002_auto_20170626_0952'),
]
operations = [
migrations.AlterField(
model_name='dummysession',
name='salary',
field=models.DecimalField(decimal_places=2, default=0, help_text="Salary to be offered in LPA. If it's an internship, leave this as 0, and mention salary in placement details.", max_digits=4, validators=[django.core.validators.MinValueValidator(Decimal('0'))], verbose_name='Salary (Lakhs P.A.)'),
),
]
| [
"pratulyabubna@outlook.com"
] | pratulyabubna@outlook.com |
be9452319160a2be77ed98aa146eabfbc389b6cf | a1cf8c06ad8717d06833e81fa2d1241c8fe095a0 | /Multi_output/util/gendataloader.py | 8b33fe0e97fd50803aa3ff87887cb9a3e689701c | [] | no_license | webstorage119/Multi_iris_pattern_classification | 2ea5381ac77397df4b5bf52215e66de757eb6a6d | 1c1d204cf4e9961a5dead61cc546d4eb0a3db0a8 | refs/heads/master | 2022-11-11T07:50:11.896482 | 2020-06-30T02:07:47 | 2020-06-30T02:07:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,217 | py | from keras.preprocessing.image import ImageDataGenerator
from .constants import *
from tensorflow.python.keras.utils import Sequence
from sklearn.preprocessing import LabelBinarizer
from keras.preprocessing.image import img_to_array
import cv2
import os
import numpy as np
from CNNUtil import paths
class ImageGenerator(Sequence):
def __init__(self, data_dir= 'D:\Data\iris_pattern\Multi_output2_test40_train160', augmentations=None):
self.total_paths, self.defect_lbs, self.lacuna_lbs, self.spoke_lbs, self.spot_lbs = self.get_total_data_path(data_dir)
self.batch_size = FLG.BATCH_SIZE
self.indices = np.random.permutation(len(self.total_paths))
self.augment = augmentations
def get_total_data_path(self, data_dir):
total_paths, defect_lbs, lacuna_lbs, spoke_lbs, spot_lbs = [], [], [], [], [] # 이미지 path와 정답(label) 세트를 저장할 list
image_paths = sorted(list(paths.list_images(data_dir)))
for image_path in image_paths:
# a. 이미지 전체 파일 path 저장
total_paths.append(image_path)
# b. 이미지 파일 path에서 이미지의 정답(label) 세트 추출
(defect, lacuna, spoke, spot) = image_path.split(os.path.sep)[-2].split("_")
defect_lbs.append(defect)
lacuna_lbs.append(lacuna)
spoke_lbs.append(spoke)
spot_lbs.append(spot)
defect_lbs = np.array(defect_lbs)
lacuna_lbs = np.array(lacuna_lbs)
spoke_lbs = np.array(spoke_lbs)
spot_lbs = np.array(spot_lbs)
defect_lbs = LabelBinarizer().fit_transform(defect_lbs)
lacuna_lbs = LabelBinarizer().fit_transform(lacuna_lbs)
spoke_lbs = LabelBinarizer().fit_transform(spoke_lbs)
spot_lbs = LabelBinarizer().fit_transform(spot_lbs)
return total_paths, defect_lbs, lacuna_lbs, spoke_lbs, spot_lbs
def load_image(self, image_path):
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (FLG.HEIGHT, FLG.WIDTH))
if self.augment is not None:
image = self.augment(image=image)['image']
image = img_to_array(image)
return image
def __len__(self):
return len(self.total_paths) // self.batch_size
def __getitem__(self, idx):
batch_idx = self.indices[idx * self.batch_size: (idx + 1) * self.batch_size]
x_batch, defect_batch, lacuna_batch, spoke_batch, spot_batch = [], [], [], [], []
img_paths = [self.total_paths[i] for i in batch_idx]
defect_batch = [self.defect_lbs[i] for i in batch_idx]
lacuna_batch = [self.lacuna_lbs[i] for i in batch_idx]
spoke_batch = [self.spoke_lbs[i] for i in batch_idx]
spot_batch = [self.spot_lbs[i] for i in batch_idx]
for img_path in img_paths:
x_batch.append(self.load_image(img_path))
x_batch = np.array(x_batch, dtype="float") / 255.0
return [x_batch], [defect_batch, lacuna_batch, spoke_batch, spot_batch]
def on_epoch_end(self):
print(self.batch_size)
self.indices = np.random.permutation(len(self.total_paths))
| [
"jslee_314@naver.com"
] | jslee_314@naver.com |
98708c98148213ef45a019c8a4757116ef8de427 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_233/ch61_2020_03_11_10_58_49_976982.py | 168113a54572db24f2e8f462c85dc91c75d24631 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | def filtra_positivos(lista):
nova_lista = []
for i in lista:
if i > 0: nova_lista.append(i)
return nova_lista | [
"you@example.com"
] | you@example.com |
67ef7c330dffe15c88c23efd1072f43a6c9a5996 | d177addc1830153404c71fa115a5584f94a392c3 | /N941_ValidMountainArray.py | c19d2de2c95e45e68457fcbba8b25f8d89e4ebe1 | [] | no_license | zerghua/leetcode-python | 38a84452f60a360e991edf90c8156de03a949000 | 02726da394971ef02616a038dadc126c6ff260de | refs/heads/master | 2022-10-25T11:36:22.712564 | 2022-10-02T19:56:52 | 2022-10-02T19:56:52 | 61,502,010 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,378 | py | #
# Create by Hua on 5/10/22.
#
"""
Given an array of integers arr, return true if and only if it is a valid mountain array.
Recall that arr is a mountain array if and only if:
arr.length >= 3
There exists some i with 0 < i < arr.length - 1 such that:
arr[0] < arr[1] < ... < arr[i - 1] < arr[i]
arr[i] > arr[i + 1] > ... > arr[arr.length - 1]
Example 1:
Input: arr = [2,1]
Output: false
Example 2:
Input: arr = [3,5,5]
Output: false
Example 3:
Input: arr = [0,3,2,1]
Output: true
Constraints:
1 <= arr.length <= 104
0 <= arr[i] <= 104
"""
class Solution(object):
def validMountainArray(self, arr):
"""
:type arr: List[int]
:rtype: bool
thought: find largest num/index in arr first, and check its left
and right should be strictly increasing and decreasing
05/10/2022 09:52 Accepted 236 ms 14.5 MB python
easy 10 min. some corner cases.
"""
n = len(arr)
if n < 3: return False
peak = arr.index(max(arr))
if peak == 0 or peak == n-1: return False
for i in range(1, peak+1): # check strictly increasing
if arr[i] <= arr[i-1]:
return False
for i in range(peak+1, n): # check strictly decreasing
if arr[i] >= arr[i-1]:
return False
return True | [
"zerghua@gmail.com"
] | zerghua@gmail.com |
36d15f5608287176a5a9980524a7100216b6b6ff | a835dc3f52df8a291fab22e64e9e3e5cbecf0c6e | /lovelive_crawlin_api/lovelive_api/migrations/0015_auto_20181012_0306.py | 90b0a3ec55126030aca51e537659ef0bcc5dfb3d | [] | no_license | tails5555/lovelive_crawlin_project | 99d334bb7e3c892a59ae738158e954f4ad322296 | d0bd42ab183a8a75a07f52c0cf7ab30cb634b513 | refs/heads/master | 2020-03-30T04:12:00.102091 | 2018-11-27T08:04:40 | 2018-11-27T08:04:40 | 150,729,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | # Generated by Django 2.1.2 on 2018-10-11 18:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lovelive_api', '0014_auto_20181012_0244'),
]
operations = [
migrations.AlterField(
model_name='cardimage',
name='img_file',
field=models.ImageField(upload_to='card_images'),
),
]
| [
"tails5555@naver.com"
] | tails5555@naver.com |
50baffaed091096a06ea1478956eb5c01a849c0e | aebe93e3c4d15f64b2611d9b8b5332ce05e37d72 | /Implement_Queue_Stacks.py | bdb5d00130ba64bf23c940741f172441f900585f | [] | no_license | liuwei881/leetcode | 37fa7d329df78f63a8aa48905096b46952533a95 | 33d329a7ce13aeeb1813b7471188cd689f91a1e8 | refs/heads/master | 2020-04-26T22:00:16.813430 | 2019-04-24T03:20:45 | 2019-04-24T03:20:45 | 173,858,310 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,036 | py | # coding=utf-8
"""
Implement the following operations of a queue using stacks.
push(x) -- Push element x to the back of queue.
pop() -- Removes the element from in front of queue.
peek() -- Get the front element.
empty() -- Return whether the queue is empty.
Example:
MyQueue queue = new MyQueue();
queue.push(1);
queue.push(2);
queue.peek(); // returns 1
queue.pop(); // returns 1
queue.empty(); // returns false
Notes:
You must use only standard operations of a stack -- which means only push to top,
peek/pop from top, size, and is empty operations are valid.
Depending on your language, stack may not be supported natively.
You may simulate a stack by using a list or deque (double-ended queue),
as long as you use only standard operations of a stack.
You may assume that all operations are valid
(for example, no pop or peek operations will be called on an empty queue).
"""
class MyQueue:
def __init__(self):
"""
Initialize your data structure here.
"""
self.stack = []
def push(self, x: int) -> None:
"""
Push element x to the back of queue.
"""
swap = []
while self.stack:
swap.append(self.stack.pop())
swap.append(x)
while swap:
self.stack.append(swap.pop())
def pop(self) -> int:
"""
Removes the element from in front of queue and returns that element.
"""
return self.stack.pop()
def peek(self) -> int:
"""
Get the front element.
"""
if self.stack:
return self.stack[-1]
return False
def empty(self) -> bool:
"""
Returns whether the queue is empty.
"""
return len(self.stack) == 0
if __name__ =='__main__':
# Your MyQueue object will be instantiated and called as such:
x = 5
x1 = 6
obj = MyQueue()
obj.push(x)
obj.push(x1)
param_2 = obj.pop()
param_3 = obj.peek()
param_4 = obj.empty()
print(param_2, param_3, param_4)
| [
"liuweia@mail.open.com.cn"
] | liuweia@mail.open.com.cn |
5a4cb878245c61dc6dbbdffff2942db0a0164075 | 5ae98342461af8568aa03e6377f83efb649f6799 | /helper/javascript_completions/show_hint_parameters_command.py | fe014339f7e004793f69700b701748a9cc81fb11 | [
"MIT"
] | permissive | minimallinux/JavaScriptEnhancements | 30651ffb647848646c52138dc3c1a02577fe3210 | d9f578022d9cdf5ee6541319c6a129f896350bee | refs/heads/master | 2021-05-05T21:56:07.179554 | 2018-01-03T02:46:12 | 2018-01-03T02:46:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,253 | py | import sublime, sublime_plugin
class show_hint_parametersCommand(sublime_plugin.TextCommand):
def run(self, edit, **args):
view = self.view
scope = view.scope_name(view.sel()[0].begin()).strip()
meta_fun_call = "meta.function-call.method.js"
result = Util.get_region_scope_last_match(view, scope, view.sel()[0], meta_fun_call+" meta.group.js")
if not result :
meta_fun_call = "meta.function-call.js"
result = Util.get_region_scope_last_match(view, scope, view.sel()[0], meta_fun_call+" meta.group.js")
if result :
point = Util.get_region_scope_last_match(view, scope, view.sel()[0], meta_fun_call)["region"].begin()
sublime.set_timeout_async(lambda: on_hover_description_async(view, point, sublime.HOVER_TEXT, view.sel()[0].begin()))
def is_enabled(self) :
view = self.view
sel = view.sel()[0]
if not view.match_selector(
sel.begin(),
'source.js - comment'
):
return False
scope = view.scope_name(view.sel()[0].begin()).strip()
meta_fun_call = "meta.function-call.method.js"
result = Util.get_region_scope_last_match(view, scope, view.sel()[0], meta_fun_call+" meta.group.js")
if not result :
meta_fun_call = "meta.function-call.js"
result = Util.get_region_scope_last_match(view, scope, view.sel()[0], meta_fun_call+" meta.group.js")
if result :
point = Util.get_region_scope_last_match(view, scope, view.sel()[0], meta_fun_call)["region"].begin()
scope_splitted = scope.split(" ")
find_and_get_scope = Util.find_and_get_pre_string_and_matches(scope, meta_fun_call+" meta.group.js")
find_and_get_scope_splitted = find_and_get_scope.split(" ")
if (
(
len(scope_splitted) == len(find_and_get_scope_splitted) + 1
or scope == find_and_get_scope
or (
len(scope_splitted) == len(find_and_get_scope_splitted) + 2
and ( Util.get_parent_region_scope(view, view.sel()[0])["scope"].split(" ")[-1] == "string.quoted.double.js"
or Util.get_parent_region_scope(view, view.sel()[0])["scope"].split(" ")[-1] == "string.quoted.single.js"
or Util.get_parent_region_scope(view, view.sel()[0])["scope"].split(" ")[-1] == "string.template.js"
)
)
)
and not scope.endswith("meta.block.js")
and not scope.endswith("meta.object-literal.js")
) :
return True
return False
def is_visible(self) :
view = self.view
sel = view.sel()[0]
if not view.match_selector(
sel.begin(),
'source.js - comment'
):
return False
scope = view.scope_name(view.sel()[0].begin()).strip()
meta_fun_call = "meta.function-call.method.js"
result = Util.get_region_scope_last_match(view, scope, view.sel()[0], meta_fun_call+" meta.group.js")
if not result :
meta_fun_call = "meta.function-call.js"
result = Util.get_region_scope_last_match(view, scope, view.sel()[0], meta_fun_call+" meta.group.js")
if result :
point = Util.get_region_scope_last_match(view, scope, view.sel()[0], meta_fun_call)["region"].begin()
scope_splitted = scope.split(" ")
find_and_get_scope = Util.find_and_get_pre_string_and_matches(scope, meta_fun_call+" meta.group.js")
find_and_get_scope_splitted = find_and_get_scope.split(" ")
if (
(
len(scope_splitted) == len(find_and_get_scope_splitted) + 1
or scope == find_and_get_scope
or (
len(scope_splitted) == len(find_and_get_scope_splitted) + 2
and ( Util.get_parent_region_scope(view, view.sel()[0])["scope"].split(" ")[-1] == "string.quoted.double.js"
or Util.get_parent_region_scope(view, view.sel()[0])["scope"].split(" ")[-1] == "string.quoted.single.js"
or Util.get_parent_region_scope(view, view.sel()[0])["scope"].split(" ")[-1] == "string.template.js"
)
)
)
and not scope.endswith("meta.block.js")
and not scope.endswith("meta.object-literal.js")
) :
return True
return False | [
"pichillilorenzo@gmail.com"
] | pichillilorenzo@gmail.com |
b2f89bafaa88589c3f1df86671f16797c3be58f1 | 20930fa97c20bc41a363782446c8939be47f7f98 | /promus/command/_reset.py | e8e214aeb2cb85e56d1ca1d29ee207d038b789b0 | [
"BSD-3-Clause"
] | permissive | jmlopez-rod/promus | 1018214b502eb1dc0c7a7e5e7fcfaae7666c3826 | 2d5b8ac54afe721298f6326f45fd3587bc00d173 | refs/heads/master | 2016-09-10T03:35:01.710952 | 2014-08-22T22:01:22 | 2014-08-22T22:01:22 | 21,441,146 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,459 | py | """_RESET
This is an axilary function to fix the old repositories created with
the beta version of promus.
"""
import textwrap
import promus.core as prc
DESC = """
Reset the git hooks. Note that this version does not create a backup
of the current hooks. Also you must be in the git repository.
"""
def add_parser(subp, raw):
"Add a parser to the main subparser. "
tmpp = subp.add_parser('_reset',
help='reset git hooks',
formatter_class=raw,
description=textwrap.dedent(DESC))
tmpp.add_argument('--bare', action='store_true',
help="Use this option for bare repositories")
def reset_hooks():
"""Command to rest hooks in a repository. """
print("resetting hooks:")
hooks = ['commit-msg', 'post-checkout', 'post-commit', 'post-merge',
'pre-commit', 'pre-rebase',
'prepare-commit-msg']
for hook in hooks:
print(" %s" % hook)
path = './.git/hooks'
prc.make_hook(hook, path)
def reset_hooks_bare():
"""Command to reset hooks in a bare repository. """
print("resetting hooks in bare repository:")
hooks = ['post-receive', 'update']
for hook in hooks:
print(" %s" % hook)
path = './hooks'
prc.make_hook(hook, path)
def run(arg):
"""Run command. """
if arg.bare:
reset_hooks_bare()
else:
reset_hooks()
| [
"jmlopez.rod@gmail.com"
] | jmlopez.rod@gmail.com |
203a44dd016c6c369edec9ac043147ed5e9b0d56 | 048df2b4dc5ad153a36afad33831017800b9b9c7 | /atcoder/abc003/abc003_2.py | 8d06191a63417901073e63aa87577afe758fc7c4 | [] | no_license | fluffyowl/past-submissions | a73e8f5157c647634668c200cd977f4428c6ac7d | 24706da1f79e5595b2f9f2583c736135ea055eb7 | refs/heads/master | 2022-02-21T06:32:43.156817 | 2019-09-16T00:17:50 | 2019-09-16T00:17:50 | 71,639,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | s = raw_input()
t = raw_input()
atcoder = set(list('atcoder@'))
for i in range(len(s)):
if s[i] == t[i]:
continue
if s[i] == '@' and t[i] in atcoder:
continue
if t[i] == '@' and s[i] in atcoder:
continue
print 'You will lose'
break
else:
print 'You can win'
| [
"nebukuro09@gmail.com"
] | nebukuro09@gmail.com |
d90c2a8eb4a3e0e16ee723fed99f987a006669e1 | 1af4ec17c474e5c05e4de1a471247b546f32c7c7 | /funcoes/gerador_html_v2.py | 4154f930334f5ab6d7483645c4e523224c07e1f7 | [] | no_license | ribeiro-rodrigo-exemplos/curso-python | 825d09bb4284e65c3ba67bed8dcf8b29ac1c90eb | 101c610161775d73b46cc64a668451dfa65abf7d | refs/heads/master | 2020-05-18T06:43:37.263995 | 2019-06-17T09:32:58 | 2019-06-17T09:32:58 | 184,241,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | #!/usr/bin/python3
def tag_bloco(texto, classe='success', inline=False):
tag = 'span' if inline else 'div'
return f'<{tag} class="{classe}">{texto}</{tag}>'
if __name__ == '__main__':
print(tag_bloco('bloco'))
print(tag_bloco('inline e classe', 'info', True))
print(tag_bloco('inline', inline=True))
print(tag_bloco(inline=True, texto='inline'))
print(tag_bloco('falhou', classe='error'))
| [
"ribeiro.rodrigo1989@gmail.com"
] | ribeiro.rodrigo1989@gmail.com |
42333d8e0b5efa7e7e6a23a038e8b16586645d8c | c80d264e1e7af78505af076da879c62cfa247d35 | /Vow_str.py | afa20822a95d536c5c957e93e792651fad8a9439 | [] | no_license | shanthivimalanataraajan01/Codekataplayer | f3679c365099d0f6dedf2fe6b93bd308c67bc485 | 2af9997a164daef213b1053f0becfc0327735f21 | refs/heads/master | 2020-05-23T12:00:19.855401 | 2019-05-05T07:20:07 | 2019-05-05T07:20:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | def vow(x):
c=0
v=['a','e','i','o','u']
for i in x:
if i in v:
c+=1
if c>=1:
a=1
else:
a=0
return a
n=int(input())
g=[]
d=0
for i in range(0,n):
g.append(input())
for i in g:
if vow(i)==1:
d+=1
if d==len(g):
print("yes")
else:
print("no")
| [
"noreply@github.com"
] | shanthivimalanataraajan01.noreply@github.com |
d7a436e6e55cf21fc9c78b8d1320284f36f44d69 | 37f1125544ac1b4800402d981ce030c82b4993d8 | /pythonitems/APIAutoTest20200304/demo/other/demo_unittest03.py | edd4364a6fec5462ef3dd21c08a57161179baf6c | [] | no_license | shixingjian/PythonItems | db6392451fda388ef4f4805aaf9991ec01bd36bd | 6b6a4e4bae2e727581c8805676422f28b8f6231f | refs/heads/master | 2022-11-21T01:17:23.607196 | 2020-07-22T08:37:44 | 2020-07-22T08:37:44 | 281,614,603 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,920 | py | #-*-coding:utf-8-*-
#@Time:2020/4/910:02
#@Author:freya
#@File:dem0_unnittest03.py
from demo.other.demo_calculator import Calculator
import unittest
from ddt import ddt,data
from lib.excelManage import readExcel
import time
from lib.sendCourseRequest import SendCourceRequest
import json
"""
UnitTest结合DDT语法学习
"""
mydata=[[1,2,3],[3,4,7],[4,5,9]]
path=r'../../data/教管系统-测试用例V1.2.xls'
#2-读取测试用例
mydata2=readExcel(path,1)
@ddt
class UnittestDemo3(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.calculator=Calculator(12,3)
print('setUp方法运行了\r\n')
#1. 测试加法
def test_001(self):
print('>>>>用例1运行了')
result=self.calculator.add()
self.assertEqual(result,15)
#2. 测试减法
def test_002(self):
print('>>>>用例2运行了')
result=self.calculator.sub()
self.assertEqual(result,9)
#3. 测试乘法
def test_003(self):
print('>>>>用例3运行了')
result=self.calculator.mul()
self.assertEqual(result,36)
#4. 测试除法
def test_004(self):
print('>>>>用例4运行了')
result=self.calculator.div()
self.assertEqual(result,4)
# 5. 测试加法2
@data(*mydata)
def test_005(self,data):
result=self.calculator.add2(data[0],data[1])
self.assertEqual(result,data[2])
print('>>>>用例5运行了')
# 6. 批量执行excel测试用例
@data(*mydata2)
def test_006(self,row):
# print(row)
dictBody = SendCourceRequest(row)
time.sleep(0.001)
test = json.loads(row[6])
print('>>>>用例6运行了')
# if 'reason' in dictBody.keys():
# self.assertEqual(dictBody['retcode'], test['code'],dictBody['reason'])
# else:
self.assertEqual(dictBody['retcode'], test['code'])
| [
"1440799763@qq.com"
] | 1440799763@qq.com |
f6834b518da6ec85721a33f3d0411fe0f27e0ce3 | 336a5f79d935c277be6d4b22cf3cc58e48c1344d | /tensorflow_probability/python/math/psd_kernels/psd_kernel_properties_test.py | 08b1970db617e8472a618484d6c93464c2dd7aff | [
"Apache-2.0"
] | permissive | fearghus-moloco/probability | 9300328febe09f18bf84e31b7588106588a60fbd | 4b675da7c431d7bb20029f9fdd28db859e6c025f | refs/heads/master | 2023-08-06T08:36:03.041624 | 2021-09-10T22:34:54 | 2021-09-10T22:41:07 | 288,120,063 | 0 | 0 | Apache-2.0 | 2020-08-17T08:04:39 | 2020-08-17T08:04:38 | null | UTF-8 | Python | false | false | 6,236 | py | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import hypothesis as hp
from hypothesis import strategies as hps
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.math.psd_kernels import hypothesis_testlib as kernel_hps
# pylint is unable to handle @hps.composite (e.g. complains "No value for
# argument '...' in function call"), so disable this lint for the file.
# pylint: disable=no-value-for-parameter
EXTRA_TENSOR_CONVERSION_KERNELS = {
# The transformation is applied to each input individually.
'KumaraswamyTransformed': 1,
}
def assert_no_none_grad(kernel, method, wrt_vars, grads):
for var, grad in zip(wrt_vars, grads):
# For the GeneralizedMatern kernel, gradients with respect to `df` don't
# exist.
if tensor_util.is_ref(var) and var.name.strip('_0123456789:') == 'df':
continue
if grad is None:
raise AssertionError('Missing `{}` -> {} grad for kernel {}'.format(
method, var, kernel))
@test_util.test_all_tf_execution_regimes
class KernelPropertiesTest(test_util.TestCase):
@parameterized.named_parameters(
{'testcase_name': dname, 'kernel_name': dname}
for dname in sorted(list(kernel_hps.INSTANTIABLE_BASE_KERNELS.keys()) +
list(kernel_hps.SPECIAL_KERNELS)))
@hp.given(hps.data())
@tfp_hps.tfp_hp_settings(
default_max_examples=10,
suppress_health_check=[
hp.HealthCheck.too_slow,
hp.HealthCheck.data_too_large])
def testKernelGradient(self, kernel_name, data):
event_dim = data.draw(hps.integers(min_value=2, max_value=3))
feature_ndims = data.draw(hps.integers(min_value=1, max_value=2))
feature_dim = data.draw(hps.integers(min_value=2, max_value=4))
batch_shape = data.draw(tfp_hps.shapes(max_ndims=2))
kernel, kernel_parameter_variable_names = data.draw(
kernel_hps.kernels(
batch_shape=batch_shape,
kernel_name=kernel_name,
event_dim=event_dim,
feature_dim=feature_dim,
feature_ndims=feature_ndims,
enable_vars=True))
# Check that variable parameters get passed to the kernel.variables
kernel_variables_names = [
v.name.strip('_0123456789:') for v in kernel.variables]
kernel_parameter_variable_names = [
n.strip('_0123456789:') for n in kernel_parameter_variable_names]
self.assertEqual(
set(kernel_parameter_variable_names),
set(kernel_variables_names))
example_ndims = data.draw(hps.integers(min_value=1, max_value=2))
input_batch_shape = data.draw(tfp_hps.broadcast_compatible_shape(
kernel.batch_shape))
xs = tf.identity(data.draw(kernel_hps.kernel_input(
batch_shape=input_batch_shape,
example_ndims=example_ndims,
feature_dim=feature_dim,
feature_ndims=feature_ndims)))
# Check that we pick up all relevant kernel parameters.
wrt_vars = [xs] + list(kernel.variables)
self.evaluate([v.initializer for v in kernel.variables])
max_permissible = 2 + EXTRA_TENSOR_CONVERSION_KERNELS.get(kernel_name, 0)
with tf.GradientTape() as tape:
with tfp_hps.assert_no_excessive_var_usage(
'method `apply` of {}'.format(kernel),
max_permissible=max_permissible
):
tape.watch(wrt_vars)
with tfp_hps.no_tf_rank_errors():
diag = kernel.apply(xs, xs, example_ndims=example_ndims)
grads = tape.gradient(diag, wrt_vars)
assert_no_none_grad(kernel, 'apply', wrt_vars, grads)
# Check that copying the kernel works.
with tfp_hps.no_tf_rank_errors():
diag2 = self.evaluate(kernel.copy().apply(
xs, xs, example_ndims=example_ndims))
self.assertAllClose(diag, diag2)
@parameterized.named_parameters(
{'testcase_name': dname, 'kernel_name': dname}
for dname in sorted(list(kernel_hps.INSTANTIABLE_BASE_KERNELS.keys()) +
list(kernel_hps.SPECIAL_KERNELS)))
@hp.given(hps.data())
@tfp_hps.tfp_hp_settings(
default_max_examples=10,
suppress_health_check=[
hp.HealthCheck.too_slow,
hp.HealthCheck.data_too_large])
def testCompositeTensor(self, kernel_name, data):
kernel, _ = data.draw(
kernel_hps.kernels(
kernel_name=kernel_name,
event_dim=2,
feature_dim=2,
feature_ndims=1,
enable_vars=True))
self.assertIsInstance(kernel, tf.__internal__.CompositeTensor)
xs = tf.identity(data.draw(kernel_hps.kernel_input(
batch_shape=[],
example_ndims=1,
feature_dim=2,
feature_ndims=1)))
with tfp_hps.no_tf_rank_errors():
diag = kernel.apply(xs, xs, example_ndims=1)
# Test flatten/unflatten.
flat = tf.nest.flatten(kernel, expand_composites=True)
unflat = tf.nest.pack_sequence_as(kernel, flat, expand_composites=True)
# Test tf.function.
@tf.function
def diag_fn(k):
return k.apply(xs, xs, example_ndims=1)
self.evaluate([v.initializer for v in kernel.variables])
with tfp_hps.no_tf_rank_errors():
self.assertAllClose(diag, diag_fn(kernel))
self.assertAllClose(diag, diag_fn(unflat))
if __name__ == '__main__':
test_util.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
37a541c1f5362364dd6c285b52b15dead97246ab | b7137589ec15f08f687fe4a56693be59e95e550f | /backend/blank_test_19535/wsgi.py | bb7e46c39a2636de4f8d656cfd59e9732578eb66 | [] | no_license | crowdbotics-apps/blank-test-19535 | 25391cac32119702065fcd1070fcc6bb32379138 | 849e0186882cf69fd9008bb58685fc0e187b4b77 | refs/heads/master | 2022-12-07T03:07:26.627979 | 2020-08-24T14:30:14 | 2020-08-24T14:30:14 | 287,266,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | """
WSGI config for blank_test_19535 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "blank_test_19535.settings")
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
3265659206a20735c52f8a36a3d204d9ed935475 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02850/s650053544.py | 67d112724c3be08773f6d9bb68cb786210ef37d7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,401 | py | from collections import Counter, defaultdict
import sys
sys.setrecursionlimit(10 ** 5 + 10)
# input = sys.stdin.readline
from math import factorial
import heapq, bisect
import math
import itertools
import queue
from collections import deque
from fractions import Fraction
def main():
num = int(input())
data = [list(map(int, input().split())) for i in range(num - 1)]
node_data = defaultdict(set)
for i in range(num - 1):
a, b = data[i]
node_data[a].add(b)
node_data[b].add(a)
ans_num = 0
for i in range(1, num + 1):
ans_num = max(ans_num, len(node_data[i]))
edge_data = defaultdict(int)
ans_count = 0
for i in range(1, num + 1):
now_list = list(node_data[i])
now_list.sort()
count = bisect.bisect_left(now_list, i) + 1
use_set = set()
for ele in now_list[:count - 1]:
use_set.add(edge_data[(i, ele)])
now_indx = 1
for ele in now_list[count - 1:]:
while now_indx in use_set:
now_indx += 1
edge_data[(i, ele)] = now_indx
edge_data[(ele, i)] = now_indx
use_set.add(now_indx)
ans_count = max(ans_count, now_indx)
print(ans_count)
for i in range(num - 1):
a, b = data[i]
ans = edge_data[(a, b)]
print(ans)
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
85ced45bf2ef40af67c5cbb064e9619f7baac7cf | e063c54c9e73b393e6092d2d1510d1f942904528 | /src/cryptoadvance/specter/devices/generic.py | b4f00abda29ec6b7039f21dd6d5cddcd9ea04b63 | [
"MIT"
] | permissive | zndtoshi/specter-desktop | bd1633dc273c2a6a4eedae7f545fb715155bb8ec | 00c32b0fed4b49380de8e311d79a790756dacba5 | refs/heads/master | 2022-11-25T01:06:49.869265 | 2020-08-05T20:37:14 | 2020-08-05T20:37:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | from ..device import Device
class GenericDevice(Device):
def __init__(self, name, alias, device_type, keys, fullpath, manager):
super().__init__(name, alias, 'other', keys, fullpath, manager)
self.sd_card_support = True
self.qr_code_support = True
def create_psbts(self, base64_psbt, wallet):
psbts = {
'qrcode': base64_psbt,
'sdcard': base64_psbt,
}
return psbts
| [
"snigirev.stepan@gmail.com"
] | snigirev.stepan@gmail.com |
d4dd4d3745bb28e8388a448e33d8e736a4a10859 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/335/usersdata/303/99133/submittedfiles/matriz1.py | 8d6f119a5e174147ca17c1bd61a5ad1fbb6d2d1f | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | # -*- coding: utf-8 -*-
a=[]
m=int(input(''))
n=int(input(''))
for i in range(0,m,1):
linha=[]
for j in range(0,n,1):
linha.append(int(input('')))
a.append(linha)
#ANALISE SUPERIOR/INFERIOR
c=[]
for i in range(0,m,1):
if sum(a[i])>=1:
c.append(i)
break
for i in range(m-1,-1,-1):
if sum(a[i])>=1:
c.append(i)
break
#LIMITES LATERAIS
lat=[]
for i in range(0,m,1):
for j in range(0,n,1):
if a[i][j]==1:
lat.append(j)
lat=sorted(lat)
#LIMITES INTEIROS
c1=int(c[0])
c2=int(c[1])
x1=int(lat[0])
x2=int(lat[len(lat)-1])
#CRIAÇÃO DA NOVA MATRIZ
n=[]
for i in range(c1,c2+1,1):
for j in range(x1,x2+1,1):
n.append(a[i][j])
print(n) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
cd7f1750274d2977bd85d9c9c294caeca2215804 | 0649b25962931fe2b659e64b6580640b647b675c | /watson/auth/providers/abc.py | 2059353119b6f8ca6cbfee271ac4ed48938fc4b1 | [
"BSD-2-Clause"
] | permissive | watsonpy/watson-auth | 117994ef9bb2b45817955a002d77be2462be0337 | 44057e21e24f272f39414ff15a275cf6f9f009e2 | refs/heads/master | 2022-05-26T23:52:17.507355 | 2019-10-03T04:28:38 | 2019-10-03T04:28:38 | 16,093,607 | 0 | 0 | BSD-3-Clause | 2022-05-25T00:34:21 | 2014-01-21T05:29:44 | Python | UTF-8 | Python | false | false | 3,673 | py | import abc
from sqlalchemy.orm import exc
from watson.auth import crypto
from watson.auth.providers import exceptions
from watson.common import imports
from watson.common.decorators import cached_property
class Base(object):
config = None
session = None
def __init__(self, config, session):
self._validate_configuration(config)
self.config = config
self.session = session
# Configuration
def _validate_configuration(self, config):
if 'class' not in config['model']:
raise exceptions.InvalidConfiguration(
'User model not specified, ensure "class" key is set on provider["model"].')
common_keys = [
'system_email_from_address',
'reset_password_route',
'forgotten_password_route']
for key in common_keys:
if key not in config:
raise exceptions.InvalidConfiguration(
'Ensure "{}" key is set on the provider.'.format(key))
# User retrieval
@property
def user_model_identifier(self):
return self.config['model']['identifier']
@cached_property
def user_model(self):
return imports.load_definition_from_string(
self.config['model']['class'])
@property
def user_query(self):
return self.session.query(self.user_model)
def get_user(self, username):
"""Retrieves a user from the database based on their username.
Args:
username (string): The username of the user to find.
"""
user_field = getattr(self.user_model, self.user_model_identifier)
try:
return self.user_query.filter(user_field == username).one()
except exc.NoResultFound:
return None
def get_user_by_email_address(self, email_address):
email_column = getattr(
self.user_model, self.config['model']['email_address'])
try:
return self.user_query.filter(email_column == email_address).one()
except exc.NoResultFound:
return None
# Authentication
def authenticate(self, username, password):
"""Validate a user against a supplied username and password.
Args:
username (string): The username of the user.
password (string): The password of the user.
"""
password_config = self.config['password']
if len(password) > password_config['max_length']:
return None
user = self.get_user(username)
if user:
if crypto.check_password(password, user.password, user.salt,
self.config['encoding']):
return user
return None
def user_meets_requirements(self, user, requires):
for require in requires or []:
if not require(user):
return False
return True
# Authorization
def is_authorized(self, user, roles=None, permissions=None, requires=None):
no_role = roles and not user.acl.has_role(roles)
no_permission = permissions and not user.acl.has_permission(
permissions)
no_requires = self.user_meets_requirements(user, requires)
return False if no_role or no_permission or not no_requires else True
# Actions
@abc.abstractmethod
def logout(self, request):
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def login(self, user, request):
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def handle_request(self, request):
raise NotImplementedError # pragma: no cover
| [
"simon.coulton@gmail.com"
] | simon.coulton@gmail.com |
d5dc3061eb7cbe5b2f99bf91cb705fc72aec5fe0 | 5ad0dace59e449fb0928d7b302f386a84d634e05 | /MERFISH_probe_design/probe_design/quality_check.py | 5fe0f510f97bb6cde2f80a95246af7eb9d347da1 | [
"MIT"
] | permissive | r3fang/merfish_designer | 69784fcadebfb912b0a5557081f120abcc16f2d8 | c8ca3a1c825e6b381e0bbd9654ef7d3c9107bb9c | refs/heads/main | 2023-09-03T17:16:08.554990 | 2021-10-21T17:33:33 | 2021-10-21T17:33:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,471 | py | #!/usr/bin/env python3
import numpy as np
import pandas as pd
def check_and_standardize_transcriptome(transcriptome:pd.core.frame.DataFrame,
remove_non_standard_columns:bool=False):
'''Check the quality of the transcriptome and standardize it.
Return a standardized transcriptome.'''
standard_transcriptome = transcriptome
# Check the existance of standarad columns
standard_columns = ['transcript_id', 'sequence', 'gene_id', 'gene_short_name', 'FPKM']
for sc in standard_columns:
if not sc in transcriptome.columns:
print(f'\033[91m ERROR: missing the standard column {sc}!')
# Remove non-standard columns
if remove_non_standard_columns:
nscs = [c for c in transcriptome.columns if c not in standard_columns]
standard_transcriptome = transcriptome.drop(columns=nscs)
# Check that the transcript ids are unique
t_ids, counts = np.unique(np.array(standard_transcriptome['transcript_id']), return_counts=True)
for i in range(len(t_ids)):
if counts[i] > 1:
print(f'\033[91m ERROR: the transcript {t_ids[i]} have {counts[i]} entries!')
return standard_transcriptome
def barcode_str_to_array(bc_str:str):
return np.array([int(c) for c in bc_str])
def barcode_array_to_str(bc_array:np.ndarray):
return ''.join(['1' if i > 0 else '0' for i in bc_array])
def coverage_string(probe_bit_counts:np.ndarray):
return ':'.join([str(i) for i in probe_bit_counts if i > 0])
def max_N_non_overlapping_probes(shifts:list, target_length:int):
ss = sorted(shifts)
N_accepted = 0
tail = -1
for s in ss:
if s > tail:
N_accepted += 1
tail = s + target_length - 1
return N_accepted
def generate_transcript_level_report(probe_dict:dict, transcriptome:pd.core.frame.DataFrame):
'''Generate a data frame of transcript level metrics.
'''
metrics = {'gene_id':[], 'gene_short_name':[], 'transcript_id':[], 'FPKM':[],
'length':[], 'barcode':[], 'N_probes':[], 'probe_bit_coverage':[],
'max_N_non_overlapping_probes':[]}
for gk in probe_dict.keys():
for tk in probe_dict[gk].keys():
transcript_df = transcriptome[transcriptome['transcript_id'] == tk]
# Add the basic metrics
metrics['gene_id'].append(transcript_df.iloc[0]['gene_id'])
metrics['gene_short_name'].append(transcript_df.iloc[0]['gene_short_name'])
metrics['transcript_id'].append(transcript_df.iloc[0]['transcript_id'])
metrics['FPKM'].append(transcript_df.iloc[0]['FPKM'])
metrics['length'].append(len(transcript_df.iloc[0]['sequence']))
metrics['N_probes'].append(probe_dict[gk][tk].shape[0])
# Calculate barcode related metrics
probe_barcodes = [barcode_str_to_array(bc_str) for bc_str in probe_dict[gk][tk]['probe_barcode']]
probe_bit_counts = np.sum(probe_barcodes, axis=0)
metrics['barcode'].append(barcode_array_to_str(probe_bit_counts))
metrics['probe_bit_coverage'].append(coverage_string(probe_bit_counts))
# Calculate the overlapping matric
target_length = len(probe_dict[gk][tk].iloc[0]['target_sequence'])
metrics['max_N_non_overlapping_probes'].append(max_N_non_overlapping_probes(
list(probe_dict[gk][tk]['shift']), target_length))
return pd.DataFrame(metrics)
| [
"xingjiepan@gmail.com"
] | xingjiepan@gmail.com |
aff49207e5329af6de37a0e3f5893be8c2d66c42 | e523652e0379f291f675e5cba4c1f667a3ac3b19 | /commands/on | 9338a7cc67189251954edfa16efecf5b8cb3426d | [
"Apache-2.0"
] | permissive | sbp/saxo | 735bac23c8d214b85ca48c5c43bc12b1531ce137 | 27030c57ed565db1aafd801576555ae64893d637 | refs/heads/master | 2023-09-01T09:08:13.633734 | 2023-08-29T12:51:40 | 2023-08-29T12:51:40 | 9,411,794 | 25 | 13 | Apache-2.0 | 2021-06-19T15:09:44 | 2013-04-13T10:06:52 | Python | UTF-8 | Python | false | false | 1,287 | #!/usr/bin/env python3
# http://inamidst.com/saxo/
# Created by Sean B. Palmer
import calendar
import datetime
import random
import re
import time
import saxo
months = "(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)"
r_arg = re.compile("(\d?\d %s)(?: (.*))?$" % months)
@saxo.pipe
def main(arg):
if not arg:
return "Set a reminder at a certain time. Must match " + r_arg.pattern
match = r_arg.match(arg)
if not match:
return "Sorry, input must match " + r_arg.pattern
now = datetime.datetime.now()
t = datetime.datetime.strptime(match.group(1), "%d %b")
t = t.replace(hour=8)
t = t.replace(year=now.year)
t = t.replace(second=random.randint(1, 60))
if t < now:
t = t.replace(year=t.year + 1)
message = match.group(2)
unixtime = calendar.timegm(t.utctimetuple())
nick = saxo.env("nick")
sender = saxo.env("sender")
if not (nick or sender):
return "Sorry, couldn't set a reminder!"
if message:
message = nick + ": " + message
else:
message = nick + "!"
args = (unixtime, "msg", (sender, message))
saxo.client("schedule", *args)
when = time.strftime("%d %b %Y %H:%M:%S UTC", time.gmtime(unixtime))
return "%s: Will remind at %s" % (nick, when)
| [
"sean@miscoranda.com"
] | sean@miscoranda.com | |
be586f969f0d8cca5ea9db4244f816c8061ed23c | f8d3f814067415485bb439d7fe92dc2bbe22a048 | /opencvlib/test/imgpipes/test_digikamlib.py | 90d8375bb93b17004e2cf311cbea6861fabd1708 | [] | no_license | gmonkman/python | 2f9ab8f159c01f6235c86cb0cd52062cd3fdedd3 | 9123aa6baf538b662143b9098d963d55165e8409 | refs/heads/master | 2023-04-09T15:53:29.746676 | 2022-11-26T20:35:21 | 2022-11-26T20:35:21 | 60,254,898 | 0 | 2 | null | 2023-03-24T22:58:39 | 2016-06-02T10:25:27 | Python | UTF-8 | Python | false | false | 2,074 | py | # pylint: disable=C0103, too-few-public-methods, locally-disabled, no-self-use, unused-argument
'''unit tests for digikamlib which
is used to interact with the digikam
sqlite database'''
import unittest
from inspect import getsourcefile as _getsourcefile
import os.path as _path
import opencvlib.imgpipes.digikamlib as digikamlib
import funclib.iolib as iolib
class Test(unittest.TestCase):
'''unittest for keypoints'''
def setUp(self):
'''setup variables etc for use in test cases
'''
self.pth = iolib.get_file_parts2(_path.abspath(_getsourcefile(lambda: 0)))[0]
self.modpath = _path.normpath(self.pth)
#@unittest.skip("Temporaily disabled while debugging")
def test_images_by_tags_or(self):
'''test_imagesbytags'''
ImgP = digikamlib.ImagePaths('C:/Users/Graham Monkman/OneDrive/Documents/PHD/images/digikam4.db')
lst = ImgP.images_by_tags_or(
album_label='images',
relative_path='bass/angler',
species=['bass', 'pollock'])
print(str(len(lst)))
#@unittest.skip("Temporaily disabled while debugging")
def test_images_by_tags_outer_and_inner_or(self):
'''test_imagesbytags'''
ImgP = digikamlib.ImagePaths('C:/Users/Graham Monkman/OneDrive/Documents/PHD/images/digikam4.db')
lst = ImgP.images_by_tags_outerAnd_innerOr(
album_label='images',
relative_path='bass/angler',
species=['bass', 'pollock'], pitch=['0', '45', '180'], roll='0', yaw=['0', '180'])
print(str(len(lst)))
#@unittest.skip("Temporaily disabled while debugging")
def test_images_by_tags_outer_and(self):
'''test_imagesbytags'''
ImgP = digikamlib.ImagePaths('C:/Users/Graham Monkman/OneDrive/Documents/PHD/images/digikam4.db')
lst = ImgP.images_by_tags_outerAnd_innerOr(
album_label='images',
relative_path='bass/angler',
species='bass', pitch='0')
print(str(len(lst)))
if __name__ == '__main__':
unittest.main()
| [
"gmonkman@mistymountains.biz"
] | gmonkman@mistymountains.biz |
d3d58a4b31205e7f350a10f5cafce63d2264d44f | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/era5_scripts/01_netCDF_extraction/erafive902TG/417-tideGauge.py | ab7076eb96dc93a1d051f0c632732ff5642fe508 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,595 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 01 10:00:00 2020
ERA5 netCDF extraction script
@author: Michael Tadesse
"""
import time as tt
import os
import pandas as pd
from d_define_grid import Coordinate, findPixels, findindx
from c_read_netcdf import readnetcdf
from f_era5_subsetV2 import subsetter
def extract_data(delta= 1):
"""
This is the master function that calls subsequent functions
to extract uwnd, vwnd, slp for the specified
tide gauges
delta: distance (in degrees) from the tide gauge
"""
print('Delta = {}'.format(delta), '\n')
#defining the folders for predictors
nc_path = {'slp' : "/lustre/fs0/home/mtadesse/era_five/slp",\
"wnd_u": "/lustre/fs0/home/mtadesse/era_five/wnd_u",\
'wnd_v' : "/lustre/fs0/home/mtadesse/era_five/wnd_v"}
surge_path = "/lustre/fs0/home/mtadesse/obs_surge"
csv_path = "/lustre/fs0/home/mtadesse/erafive_localized"
#cd to the obs_surge dir to get TG information
os.chdir(surge_path)
tg_list = os.listdir()
#################################
#looping through the predictor folders
#################################
for pf in nc_path.keys():
print(pf, '\n')
os.chdir(nc_path[pf])
####################################
#looping through the years of the chosen predictor
####################################
for py in os.listdir():
os.chdir(nc_path[pf]) #back to the predictor folder
print(py, '\n')
#get netcdf components - give predicor name and predictor file
nc_file = readnetcdf(pf, py)
lon, lat, time, pred = nc_file[0], nc_file[1], nc_file[2], \
nc_file[3]
x = 417
y = 418
#looping through individual tide gauges
for t in range(x, y):
#the name of the tide gauge - for saving purposes
# tg = tg_list[t].split('.mat.mat.csv')[0]
tg = tg_list[t]
#extract lon and lat data from surge csv file
print("tide gauge", tg, '\n')
os.chdir(surge_path)
if os.stat(tg).st_size == 0:
print('\n', "This tide gauge has no surge data!", '\n')
continue
surge = pd.read_csv(tg, header = None)
#surge_with_date = add_date(surge)
#define tide gauge coordinate(lon, lat)
tg_cord = Coordinate(float(surge.iloc[1,4]), float(surge.iloc[1,5]))
print(tg_cord)
#find closest grid points and their indices
close_grids = findPixels(tg_cord, delta, lon, lat)
ind_grids = findindx(close_grids, lon, lat)
ind_grids.columns = ['lon', 'lat']
#loop through preds#
#subset predictor on selected grid size
print("subsetting \n")
pred_new = subsetter(pred, ind_grids, time)
#create directories to save pred_new
os.chdir(csv_path)
#tide gauge directory
tg_name = tg.split('.csv')[0]
try:
os.makedirs(tg_name)
os.chdir(tg_name) #cd to it after creating it
except FileExistsError:
#directory already exists
os.chdir(tg_name)
#predictor directory
pred_name = pf
try:
os.makedirs(pred_name)
os.chdir(pred_name) #cd to it after creating it
except FileExistsError:
#directory already exists
os.chdir(pred_name)
#time for saving file
print("saving as csv")
yr_name = py.split('_')[-1]
save_name = '_'.join([tg_name, pred_name, yr_name])\
+ ".csv"
pred_new.to_csv(save_name)
#return to the predictor directory
os.chdir(nc_path[pf])
#run script
extract_data(delta= 1) | [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
b3bbf241bc800e52afdc11e43aaefdfc56e938a5 | c9acc2aebb8ba0aa285c38a75e920073923670f9 | /tianyayingshi/src/sentiment/snownlp/normal/__init__.py | a4231e2b2e06164135a4b518fa72a3893c6fa28d | [] | no_license | taojy123/BySM | 4821d92b73c8d9d2bb38356e2d0fc1893fd8497a | 6988d7b33085f2d4ae8e4773f494ff18190318a5 | refs/heads/master | 2020-04-05T14:35:58.520256 | 2016-09-06T07:18:55 | 2016-09-06T07:18:55 | 27,417,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,128 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import re
import codecs
from . import zh
from . import pinyin
stop_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'stop-words_chinese_1_zh.txt')
pinyin_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'pinyin.txt')
stop = set()
fr = codecs.open(stop_path, 'r', 'utf-8')
for word in fr:
stop.add(word.strip())
fr.close()
pin = pinyin.PinYin(pinyin_path)
def filter_stop(words):
return list(filter(lambda x: x not in stop, words))
def zh2hans(sent):
return zh.transfer(sent)
def get_sentences(doc):
line_break = re.compile('[\r\n]')
delimiter = re.compile('[,。?!;]')
sentences = []
for line in line_break.split(doc):
line = line.strip()
if not line:
continue
for sent in delimiter.split(line):
sent = sent.strip()
if not sent:
continue
sentences.append(sent)
return sentences
def get_pinyin(sentence):
return pin.get(sentence)
| [
"taojy123@163.com"
] | taojy123@163.com |
0899c62e03a18f7bd82ee50637c5476d2490ef39 | 641ff82ed3dd80c20dcfffd44fd183350eb11836 | /PyFunceble/core/__init__.py | f80a742a6bb57d657e7de163d8e97846d22b84da | [
"MIT"
] | permissive | cargo12/PyFunceble | 3d338b0b6d8b4dbff30090dc694c54457cf1e65b | d6033601909e88495f5b102e45ee7d5a48dbe46a | refs/heads/master | 2021-02-05T08:20:34.229758 | 2020-02-02T20:27:42 | 2020-02-02T20:27:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,194 | py | """
The tool to check the availability or syntax of domains, IPv4, IPv6 or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Provides the core interfaces.
Author:
Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/special-thanks.html
Contributors:
https://pyfunceble.github.io/contributors.html
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/master/
Project homepage:
https://pyfunceble.github.io/
License:
::
MIT License
Copyright (c) 2017, 2018, 2019, 2020 Nissar Chababy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from .api import APICore as API
from .cli import CLICore as CLI
from .file import FileCore as File
from .multiprocess import MultiprocessCore as Multiprocess
from .simple import SimpleCore as Simple
| [
"contact@funilrys.com"
] | contact@funilrys.com |
79aa3263058778bc9d862d6e547ac28284d77218 | e073d58c135e4b27b861946a6e84aa5b2e0ae7f2 | /datastructure/double_pointer/ReverseString.py | aaa9f0f09a857e1e8fb20514e15fbc435fca632e | [] | no_license | yinhuax/leet_code | c4bdb69752d441af0a3bcc0745e1133423f60a7b | 9acba92695c06406f12f997a720bfe1deb9464a8 | refs/heads/master | 2023-07-25T02:44:59.476954 | 2021-09-04T09:07:06 | 2021-09-04T09:07:06 | 386,097,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : Mike
# @Contact : 597290963@qq.com
# @Time : 2021/1/25 23:25
# @File : ReverseString.py
from typing import List
"""
编写一个函数,其作用是将输入的字符串反转过来。输入字符串以字符数组 char[] 的形式给出。
不要给另外的数组分配额外的空间,你必须原地修改输入数组、使用 O(1) 的额外空间解决这一问题。
你可以假设数组中的所有字符都是 ASCII 码表中的可打印字符。
作者:力扣 (LeetCode)
链接:https://leetcode-cn.com/leetbook/read/array-and-string/cacxi/
来源:力扣(LeetCode)
著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。
"""
class ReverseString(object):
def __init__(self):
pass
def reverseString(self, s: List[str]) -> None:
"""
Do not return anything, modify s in-place instead.
"""
i, j = len(s) - 1, 0
while i > j:
s[i], s[j] = s[j], s[i]
i -= 1
j += 1
if __name__ == '__main__':
print(ReverseString().reverseString(["h", "e", "l", "l", "o"]))
| [
"597290963@qq.com"
] | 597290963@qq.com |
e8885c3766b279cf42727722c441609806e35ff1 | c988a8856d2d3fb7771417b4c7810e528a197d2b | /restaurant py.py | 6e141d75b64eaccbbd3ff3c4f0ded38e66fb6987 | [] | no_license | arunekuriakose/MyPython | 0c8a9161fef20bf77f7ba31149ec4ba0fa79b0bd | 19f44819612a8490d430bafec0616f68ce109776 | refs/heads/master | 2022-01-20T07:56:48.505226 | 2019-07-22T06:26:52 | 2019-07-22T06:26:52 | 198,158,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,350 | py |
def Ref():
x=random.randint(10908,500876)
randomRef=str(x)
rand.set(randomRef)
if (Fries.get()==""):
CoFries=0
else:
CoFries=float(Fries.get())
if (Noodles.get()==""):
CoNoodles=0
else:
CoNoodles=float(Noodles.get())
if (Soup.get()==""):
CoSoup=0
else:
CoSoup=float(Soup.get())
if (Burger.get()==""):
CoBurger=0
else:
CoBurger=float(Burger.get())
if (Sandwich.get()==""):
CoSandwich=0
else:
CoSandwich=float(Sandwich.get())
if (Drinks.get()==""):
CoD=0
else:
CoD=float(Drinks.get())
CostofFries =CoFries * 140
CostofDrinks=CoD * 65
CostofNoodles = CoNoodles* 90
CostofSoup = CoSoup * 140
CostBurger = CoBurger* 260
CostSandwich=CoSandwich * 300
CostofMeal= "Rs", str('%.2f' % (CostofFries+CostofDrinks+CostofNoodles+CostofSoup+CostBurger+CostSandwich))
PayTax=((CostofFries+CostofDrinks+CostofNoodles+CostofSoup+CostBurger+CostSandwich) * 0.2)
TotalCost=(CostofFries+CostofDrinks+CostofNoodles+CostofSoup+CostBurger+CostSandwich)
Ser_Charge= ((CostofFries+CostofDrinks+CostofNoodles+CostofSoup+CostBurger+CostSandwich)/99)
Service = "Rs", str ('%.2f' % Ser_Charge)
OverAllCost ="Rs", str ('%.2f' % (PayTax+TotalCost+Ser_Charge))
PaidTax= "Rs", str ('%.2f' % PayTax)
Service_Charge.set(Service)
Cost.set(CostofMeal)
Tax.set(PaidTax)
SubTotal.set(CostofMeal)
Total.set(OverAllCost)
def qExit():
root.destroy()
def Reset():
rand.set("")
Fries.set("")
Noodles.set("")
Soup.set("")
SubTotal.set("")
Total.set("")
Service_Charge.set("")
Drinks.set("")
Tax.set("")
Cost.set("")
Burger.set("")
Sandwich.set("")
"""
#====================================Restaraunt Info 1===========================================================
rand = StringVar()
Fries=StringVar()
Noodles=StringVar()
Soup=StringVar()
SubTotal=StringVar()
Total=StringVar()
Service_Charge=StringVar()
Drinks=StringVar()
Tax=StringVar()
Cost=StringVar()
Burger=StringVar()
Sandwich=StringVar()
root.mainloop()
"""
| [
"noreply@github.com"
] | arunekuriakose.noreply@github.com |
1f2eee3ef88caeef047310f977d6057f636ec90e | 7f60d03d7326a2768ffb103b7d635260ff2dedd7 | /system/base/expat/actions.py | a7eacf5e14263dd9faa35ca3c6069079237186fe | [] | no_license | haneefmubarak/repository | 76875e098338143852eac230d5232f4a7b3c68bd | 158351499450f7f638722667ffcb00940b773b66 | refs/heads/master | 2021-01-17T17:51:22.358811 | 2014-03-24T19:41:12 | 2014-03-24T19:41:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py |
#!/usr/bin/python
# Created For SolusOS
from pisi.actionsapi import shelltools, get, autotools, pisitools
def setup():
autotools.configure ("--prefix=/usr --disable-static")
def build():
autotools.make ()
def install():
autotools.rawInstall ("DESTDIR=%s" % get.installDIR())
# Add the documentation
pisitools.dodoc ("doc/*.css", "doc/*.png", "doc/*.html")
| [
"ikey@solusos.com"
] | ikey@solusos.com |
e308b90da0b42e67a6db5c0131db265389a98250 | 567ecf4ea5afbd7eb3003f7e14e00c7b9289b9c6 | /ax/utils/common/tests/test_testutils.py | c86a3a1a299a37c71651c9a594ba17df307b3a09 | [
"MIT"
] | permissive | danielrjiang/Ax | f55ef168a59381b5a03c6d51bc394f6c72ed0f39 | 43014b28683b3037b5c7307869cb9b75ca31ffb6 | refs/heads/master | 2023-03-31T12:19:47.118558 | 2019-12-02T16:47:39 | 2019-12-02T16:49:36 | 225,493,047 | 0 | 0 | MIT | 2019-12-03T00:09:52 | 2019-12-03T00:09:51 | null | UTF-8 | Python | false | false | 2,254 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import io
import sys
from ax.utils.common.testutils import TestCase
def _f():
e = RuntimeError("Test")
raise e
def _g():
_f() # Lines along the path are matched too
class TestTestUtils(TestCase):
def test_raises_on(self):
with self.assertRaisesOn(RuntimeError, "raise e"):
_f()
# Check that we fail if the source line is not what we expect
with self.assertRaisesRegex(Exception, "was not found in the traceback"):
with self.assertRaisesOn(RuntimeError, 'raise Exception("Test")'):
_f()
# Check that the exception passes through if it's not the one we meant to catch
with self.assertRaisesRegex(RuntimeError, "Test"):
with self.assertRaisesOn(AssertionError, "raise e"):
_f()
with self.assertRaisesOn(
RuntimeError, "_f() # Lines along the path are matched too"
):
_g()
# Use this as a context manager to get the position of an error
with self.assertRaisesOn(RuntimeError) as cm:
_f()
self.assertEqual(cm.filename, __file__)
self.assertEqual(cm.lineno, 12)
def test_silence_warning_normal(self):
new_stderr = io.StringIO()
old_err = sys.stderr
try:
sys.stderr = new_stderr
with self.silence_stderr():
print("A message", file=sys.stderr)
finally:
sys.stderr = old_err
self.assertEqual(new_stderr.getvalue(), "")
def test_silence_warning(self):
new_stderr = io.StringIO()
old_err = sys.stderr
with self.assertRaises(AssertionError):
try:
sys.stderr = new_stderr
with self.silence_stderr():
print("A message", file=sys.stderr)
raise AssertionError()
finally:
sys.stderr = old_err
self.assertTrue(new_stderr.getvalue().startswith("A message\n"))
def test_fail_deprecated(self):
self.assertEqual(1, 1)
with self.assertRaises(RuntimeError):
self.assertEquals(1, 1)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
da4ba7f8e2bcf8f18cb88f0f33133479d6f3c350 | eb4119dda59e44fc418be51a33c11e5d32f29fd7 | /src/ssadmin/migrations/0002_auto_20161111_1238.py | ad31ef9ffd6873708716cc6852985b4aad46eac4 | [
"MIT"
] | permissive | daimon99/ssadmin | 4ee08f4d56bc8f27099f1e1caa72a3ca8b8b1b57 | 9a1470712bdca5b0db17895d4c8215555d6b1b04 | refs/heads/master | 2020-12-24T12:29:49.070231 | 2016-11-11T13:26:34 | 2016-11-11T13:26:34 | 72,992,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,003 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-11 12:38
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ssadmin', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='flow_add_history',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('port', models.IntegerField()),
],
),
migrations.CreateModel(
name='flow_update_history',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='FlowUseHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('port', models.IntegerField()),
('limit', models.BigIntegerField()),
('remaining', models.BigIntegerField()),
('used', models.BigIntegerField()),
],
),
migrations.CreateModel(
name='password_change_history',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.RemoveField(
model_name='ssuser',
name='balance',
),
migrations.RemoveField(
model_name='ssuser',
name='up_user',
),
migrations.AddField(
model_name='flow_add_history',
name='ssuser',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ssadmin.SSUser'),
),
]
| [
"daijian1@qq.com"
] | daijian1@qq.com |
900b433e08f9ae6f6f1b68885f1633a7b7c47e66 | 3c0104047e826816f7e945434b718b33b7499629 | /sandbox/MP/fun.py | b08d78db857ed97a1ecad1a1fa8188ba5907855b | [] | no_license | larsendt/xhab-spot | 8080d6a213e407fb75cc246702e1dbbcd94eae57 | bd2bdfcb0414f8ed0cbb3be95a5ba13d9baf6b4b | refs/heads/master | 2021-01-19T11:02:49.250480 | 2014-05-06T17:29:30 | 2014-05-06T17:29:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | import lcddriver
import time
lcd=lcddriver.lcd()
lcd.lcd_print_char('X',2, 7)
lcd.lcd_print_char('-',2, 8)
lcd.lcd_print_char('H',2, 9)
lcd.lcd_print_char('A',2, 10)
lcd.lcd_print_char('B',2, 11)
lcd.lcd_print_char('2',3, 7)
lcd.lcd_print_char('0',3, 8)
lcd.lcd_print_char('1',3, 9)
lcd.lcd_print_char('4',3, 10)
lcd.lcd_print_char('!',3, 11)
lcd.lcd_cursor_placement(4,19)
| [
"dane.t.larsen@gmail.com"
] | dane.t.larsen@gmail.com |
7c85e2f48091128f9c0e0f945e742828363d5869 | 9bcba8f3162eacea872dbadc9990a164f945f70a | /Packages/comandos/node_npm_install.py | 5c253d9822e9ba351460bbabafd8c34996310aff | [] | no_license | programadorsito/Packages | a4cb568219dbc10a69e15a2832ef52d19eb83061 | af748327f128ed90bb146dc12bb53b76ccb609fd | refs/heads/master | 2021-01-10T04:41:38.676159 | 2016-04-06T07:52:45 | 2016-04-06T07:52:45 | 55,560,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py |
import sublime, os, platform
import sublime_plugin
import subprocess
import webbrowser
from subprocess import PIPE, Popen
class NodeNpmInstallCommand(sublime_plugin.TextCommand):
def run(self, edit):
window=sublime.active_window()
view=window.active_view()
view.run_command("ejecutar_comando", {"comando":'npm install -g @package'})
| [
"Mac@MacBook-de-Mac.local"
] | Mac@MacBook-de-Mac.local |
7bfd8413070f139e0e99eef5b6377b13be31dc6e | 61f8733c7e25610d04eaccd59db28aa65897f846 | /dot blog/Built-in functions/sorted.py | 27db07a9356814e0017a97afc970ff639092283a | [] | no_license | masato932/study-python | 7e0b271bb5c8663fad1709e260e19ecb2d8d4681 | 03bedc5ec7a9ecb3bafb6ba99bce15ccd4ae29cc | refs/heads/main | 2023-03-27T13:32:05.605593 | 2021-03-21T10:45:16 | 2021-03-21T10:45:16 | 342,985,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | # x = [5, 8, 4, 1, 3, 2, 7, 6]
# y = sorted(x)
# print(y)
# x = [5, 8, 4, 1, 3, 2, 7, 6]
# y = sorted(x, reverse=True)
# print(y)
# x = (5, 8, 4, 1, 3, 2, 7, 6)
# y = sorted(x, reverse=True)
# print(y)
# x = {'b':20, 'c':30, 'a':9, 'd':10}
# y = sorted(x, reverse=True)
# print(y)
# x = {'b':20, 'c':20, 'a':40, 'd':10}
# y = sorted(x.values(), reverse=True)
# print(y)
# x = {'b':20, 'c':30, 'a':40, 'd':10}
# y = sorted(x.items(), reverse=True)
# print(y)
# x = 'cbazdtRMLNKII'
# y = sorted(x, reverse=True)
# print(y)
# x = ['apple', 'Apple', 'amazon', 'Amazon', 'windows', 'Windows', 'walmart', 'Walmart']
# y = sorted(x, reverse=True)
# print(y)
x = ['apple', 'Apple', 'amazon', 'Amazon', 'windows', 'Windows', 'walmart', 'Walmart']
y = sorted(x, key=len, reverse=True)
print(y) | [
"masatowada66@gmail.com"
] | masatowada66@gmail.com |
233fa2bcb9aae97ffdfc0577a2228c2fc29f90a1 | eb5e319b2e7052a007b645d200f810241a2dd07c | /backend/wma_22660/urls.py | 8ad2fe276469de71f56428bdadfbabe0cd4afacc | [] | no_license | crowdbotics-apps/wma-22660 | 206bb7c670385a65528a9b14f0783f354397a1a6 | da2a25ff937242eed2b2ab3dbec4fd690b3db0a1 | refs/heads/master | 2023-01-14T07:49:09.570432 | 2020-11-16T04:23:15 | 2020-11-16T04:23:15 | 313,189,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,578 | py | """wma_22660 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("api/v1/", include("task.api.v1.urls")),
path("task/", include("task.urls")),
path("api/v1/", include("task_profile.api.v1.urls")),
path("task_profile/", include("task_profile.urls")),
path("api/v1/", include("tasker_business.api.v1.urls")),
path("tasker_business/", include("tasker_business.urls")),
path("api/v1/", include("location.api.v1.urls")),
path("location/", include("location.urls")),
path("api/v1/", include("wallet.api.v1.urls")),
path("wallet/", include("wallet.urls")),
path("api/v1/", include("task_category.api.v1.urls")),
path("task_category/", include("task_category.urls")),
path("home/", include("home.urls")),
]
admin.site.site_header = "WMA"
admin.site.site_title = "WMA Admin Portal"
admin.site.index_title = "WMA Admin"
# swagger
api_info = openapi.Info(
title="WMA API",
default_version="v1",
description="API documentation for WMA App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
396dc83dbf9b54aab58f727e635fc95ce30d5f00 | 4216a5fbdea90359abd5597589a12876f02d002c | /2016-March/python/set_container.py | 8494241f9abdcf75a9b8283ae792f865c21e94eb | [] | no_license | tammachari/big-datascience | 014bfa26f37bb99e933a645aec0b2e8b2f74061f | 6e4d2cfffca73297e1284ea6c1498fb08637f641 | refs/heads/master | 2020-07-04T15:11:32.579219 | 2018-10-07T05:11:48 | 2018-10-07T05:11:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | #set container creation and access
animals = set(['cat', 'dog'])
'cat' in animals
'fish' in animals
animals.add('fish')
'fish' in animals
len(animals)
animals.add('cat')
len(animals)
animals.remove('cat')
len(animals)
#loops
animals = set(['cat', 'dog', 'fish'])
for animal in animals:
print animal
for idx, animal in enumerate(animals):
print '#%d: %s' % (idx + 1, animal)
| [
"info@algorithmica.co.in"
] | info@algorithmica.co.in |
28f42444746b3833cfc883c113d802f3661801af | 115c43a6b9bb198d07003684692a37fc97bb626d | /tests/test_utils.py | 7d33d71bed289559ec953c02022d218261e27e48 | [
"BSD-3-Clause"
] | permissive | rodriguez-facundo/pyecore | c79fe8a900fdf4cd8b6ccad6253c2bd57f96ac9f | 22b67ad8799594f8f44fd8bee497583d4f12ed63 | refs/heads/master | 2020-05-17T11:43:04.227860 | 2019-04-19T15:27:00 | 2019-04-19T15:27:00 | 183,692,299 | 0 | 0 | BSD-3-Clause | 2019-07-08T20:14:49 | 2019-04-26T20:48:15 | Python | UTF-8 | Python | false | false | 3,133 | py | import pytest
from pyecore.ecore import *
from pyecore.utils import DynamicEPackage, original_issubclass, alias
import builtins
@pytest.fixture(scope='module')
def simplemm():
A = EClass('A')
B = EClass('B')
Root = EClass('Root')
pack = EPackage('pack', nsURI='http://pack/1.0', nsPrefix='pack')
pack.eClassifiers.extend([Root, A, B])
return pack
@pytest.fixture(scope='module')
def complexmm():
A = EClass('A')
B = EClass('B')
Root = EClass('Root')
pack = EPackage('pack', nsURI='http://pack/1.0', nsPrefix='pack')
pack.eClassifiers.extend([Root, A, B])
innerpackage = EPackage('inner', nsURI='http://inner', nsPrefix='inner')
C = EClass('C')
D = EClass('D')
innerpackage.eClassifiers.extend([C, D])
pack.eSubpackages.append(innerpackage)
return pack
def test_dynamic_access_eclasses(simplemm):
SimpleMM = DynamicEPackage(simplemm)
assert SimpleMM.A
assert SimpleMM.B
def test_dynamic_access_innerpackage(complexmm):
ComplexMM = DynamicEPackage(complexmm)
assert ComplexMM.A
assert ComplexMM.B
assert ComplexMM.inner.C
assert ComplexMM.inner.D
def test_dynamic_addition_eclasses(complexmm):
ComplexMM = DynamicEPackage(complexmm)
E = EClass('E')
complexmm.eClassifiers.append(E)
assert ComplexMM.E
F = EClass('F')
complexmm.eSubpackages[0].eClassifiers.append(F)
assert ComplexMM.inner.F
G = EClass('G')
H = EClass('H')
complexmm.eClassifiers.extend([G, H])
assert ComplexMM.G
assert ComplexMM.H
def test_dynamic_removal_eclasses(complexmm):
ComplexMM = DynamicEPackage(complexmm)
assert ComplexMM.Root
complexmm.eClassifiers.remove(ComplexMM.Root)
with pytest.raises(AttributeError):
ComplexMM.Root
assert ComplexMM.A
complexmm.eClassifiers[0].delete()
with pytest.raises(AttributeError):
ComplexMM.A
def test_original_issubclass():
issub = builtins.issubclass
with original_issubclass():
assert builtins.issubclass is not issub
assert builtins.issubclass is issub
def test_alias_function_static():
@EMetaclass
class A(object):
from_ = EAttribute(eType=EString)
a = A()
assert getattr(a, 'from', -1) == -1
alias('from', A.from_, eclass=A)
assert getattr(a, 'from') is None
@EMetaclass
class B(object):
as_ = EAttribute(eType=EInt)
b = B()
assert getattr(b, 'as', -1) == -1
alias('as', B.as_)
assert getattr(b, 'as') is 0
b.as_ = 4
assert b.as_ == 4
assert getattr(b, 'as') == 4
def test_alias_function_dynamic():
A = EClass('A')
A.eStructuralFeatures.append(EAttribute('from', EString))
a = A()
assert getattr(a, 'from_', -1) == -1
alias('from_', A.findEStructuralFeature('from'), eclass=A)
assert a.from_ is None
B = EClass('B')
B.eStructuralFeatures.append(EAttribute('as', EInt))
b = B()
assert getattr(b, 'as_', -1) == -1
alias('as_', B.findEStructuralFeature('as'))
assert b.as_ is 0
b.as_ = 4
assert b.as_ == 4
assert getattr(b, 'as') == 4
| [
"vincent.aranega@gmail.com"
] | vincent.aranega@gmail.com |
bf0a7bbad9c3796914a9d05b6776b5e21c20e70f | 86137c14ebfef6f3cf0bc3af92f443f93f48ae7d | /dvrl/main_domain_adaptation.py | d5406d61435059b04741b615df7b809730994c99 | [
"Apache-2.0"
] | permissive | jisungyoon/google-research | 692a51a91df36022dba9eb9e439fb69c1b6035e7 | 8e4097cc7c9b7cc57d85fd2f57d0684e737c3fc3 | refs/heads/master | 2020-08-23T14:37:43.706773 | 2019-10-21T18:20:41 | 2019-10-21T18:24:43 | 216,641,313 | 1 | 0 | Apache-2.0 | 2019-10-21T18:46:49 | 2019-10-21T18:46:49 | null | UTF-8 | Python | false | false | 5,391 | py | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main experiment for domain adaptation.
Main experiment of a domain adaptation application
using "Data Valuation using Reinforcement Learning (DVRL)"
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import lightgbm
import numpy as np
import tensorflow as tf
from dvrl import data_loading
from dvrl import dvrl
from dvrl import dvrl_metrics
def main(args):
"""Main function of DVRL for domain adaptation experiment.
Args:
args: train_no, valid_no,
normalization, network parameters
"""
# Data loading
# The number of training and validation samples
dict_no = dict()
dict_no['source'] = args.train_no
dict_no['valid'] = args.valid_no
# Setting and target store type
setting = 'train-on-rest'
target_store_type = 'B'
# Network parameters
parameters = dict()
parameters['hidden_dim'] = args.hidden_dim
parameters['comb_dim'] = args.comb_dim
parameters['iterations'] = args.iterations
parameters['activation'] = tf.nn.tanh
parameters['layer_number'] = args.layer_number
parameters['batch_size'] = args.batch_size
parameters['learning_rate'] = args.learning_rate
# Checkpoint file name
checkpoint_file_name = args.checkpoint_file_name
# Data loading
data_loading.load_rossmann_data(dict_no, setting, target_store_type)
print('Finished data loading.')
# Data preprocessing
# Normalization methods: 'minmax' or 'standard'
normalization = args.normalization
# Extracts features and labels. Then, normalizes features
x_source, y_source, x_valid, y_valid, x_target, y_target, _ = \
data_loading.preprocess_data(normalization,
'source.csv', 'valid.csv', 'target.csv')
print('Finished data preprocess.')
# Run DVRL
# Resets the graph
tf.reset_default_graph()
problem = 'regression'
# Predictor model definition
pred_model = lightgbm.LGBMRegressor()
# Flags for using stochastic gradient descent / pre-trained model
flags = {'sgd': False, 'pretrain': False}
# Initializes DVRL
dvrl_class = dvrl.Dvrl(x_source, y_source, x_valid, y_valid,
problem, pred_model, parameters,
checkpoint_file_name, flags)
# Trains DVRL
dvrl_class.train_dvrl('rmspe')
print('Finished dvrl training.')
# Outputs
# Data valuation
dve_out = dvrl_class.data_valuator(x_source, y_source)
print('Finished date valuation.')
# Evaluations
# Evaluation model
eval_model = lightgbm.LGBMRegressor()
# DVRL-weighted learning
dvrl_perf = dvrl_metrics.learn_with_dvrl(dve_out, eval_model,
x_source, y_source,
x_valid, y_valid,
x_target, y_target, 'rmspe')
# Baseline prediction performance (treat all training samples equally)
base_perf = dvrl_metrics.learn_with_baseline(eval_model,
x_source, y_source,
x_target, y_target, 'rmspe')
print('Finish evaluation.')
print('DVRL learning performance: ' + str(np.round(dvrl_perf, 4)))
print('Baseline performance: ' + str(np.round(base_perf, 4)))
return
if __name__ == '__main__':
# Inputs for the main function
parser = argparse.ArgumentParser()
parser.add_argument(
'--normalization',
help='data normalization method',
default='minmax',
type=str)
parser.add_argument(
'--train_no',
help='number of training samples',
default=667027,
type=int)
parser.add_argument(
'--valid_no',
help='number of validation samples',
default=8443,
type=int)
parser.add_argument(
'--hidden_dim',
help='dimensions of hidden states',
default=100,
type=int)
parser.add_argument(
'--comb_dim',
help='dimensions of hidden states after combinding with prediction diff',
default=10,
type=int)
parser.add_argument(
'--layer_number',
help='number of network layers',
default=5,
type=int)
parser.add_argument(
'--iterations',
help='number of iterations',
default=1000,
type=int)
parser.add_argument(
'--batch_size',
help='number of batch size for RL',
default=50000,
type=int)
parser.add_argument(
'--learning_rate',
help='learning rates for RL',
default=0.001,
type=float)
parser.add_argument(
'--checkpoint_file_name',
help='file name for saving and loading the trained model',
default='./tmp/model.ckpt',
type=str)
args_in = parser.parse_args()
# Calls main function
main(args_in)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
f45993035beacee89af6f73781deef6bd4d70eb8 | 81d0bfe1262008587ddf5ac12ae034d6922b9747 | /.history/Smart/admin/routes_20201121105136.py | f9e3333b2e15b0050561af767aad2f7d7b4be435 | [] | no_license | elvinyeka/Smart-Mobile | 525fffac14b8c460e85002bbf154bf54b4a341fe | a32f557306ae1bfe3ae01f5a8beef93727cfbc47 | refs/heads/master | 2023-06-09T09:52:18.446572 | 2021-07-06T11:35:34 | 2021-07-06T11:35:34 | 313,988,596 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,102 | py | from flask import render_template,session,request,redirect, flash, url_for, flash
from Smart import app, db, bcrypt
from .forms import RegistrationForm, LoginForm
from .models import User
@app.route('/')
def index():
return render_template('index.html', title='home')
@app.route('/admin')
def admin():
return render_template('admin/index.html', title='Admin Page')
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm(request.form)
if request.method == 'POST' and form.validate():
hash_password = bcrypt.generate_password_hash(form.password.data)
user = User(name=form.name.data, username=form.username.data, email=form.email.data, password=hash_password)
db.session.add(user)
db.session.commit()
flash(f'{form.name.data} Qeydiyyat uğurlu oldu', 'success')
return redirect(url_for('login'))
return render_template('admin/register.html', form=form,title='Registeration Page')
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm(request.form)
if request.method == 'POST' and form.validate():
user = User.query.filter_by(email = form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
session['email'] = form.email.data
flash(f'Xoş gəldiniz {form.email.data} Girişiniz uğurlu oldu.', 'success')
return redirect(request.args.get('next') or url_for('admin'))
else:
flash('Şifrə səhvdir. Yenidən cəhd edin', 'danger')
return render_template('admin/login.html',form=form, title='login')
@app.route('/contact')
def contact():
return render_template('contact.html', title='contact')
@app.route('/compare')
def compare():
return render_template('compare.html', title='compare')
@app.route('/single-product')
def single_product():
return render_template('single-product.html',title='product')
@app.route('/brand-product')
def brand_product():
return render_template('brand-product.html',title='brand') | [
"elvinyeka@gmail.com"
] | elvinyeka@gmail.com |
0168af70be6863c8cd4c801977ae32fd19efe41d | c210b5111fbe0bafda441bdc8fa12cd8eb77a20b | /vframe/src/settings/app_cfg.py | dd37e85e67f9ddcd6cec0a47ec42e1e41b7b9db6 | [
"MIT"
] | permissive | noticeable/vframe_workshop | 345fc214bf6f755042a5f8470d002fd9a244c1a7 | 7adf822a0687b5621b04bf790d2c9e77f3371284 | refs/heads/master | 2020-04-28T19:59:32.826001 | 2019-01-29T20:28:20 | 2019-01-29T20:28:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,390 | py |
from os.path import join
import logging
from src.settings import types
from src.utils import click_utils
# -----------------------------------------------------------------------------
# Enun lists used for custom Click Params
# -----------------------------------------------------------------------------
LogLevelVar = click_utils.ParamVar(types.LogLevel)
# # data_store
DATA_STORE_WORKSHOP = 'data_store_workshop'
DIR_MODELS = join(DATA_STORE_WORKSHOP,'models')
# Frameworks
DIR_MODELS_CAFFE = join(DIR_MODELS,'caffe')
# -----------------------------------------------------------------------------
# click chair settings
# -----------------------------------------------------------------------------
DIR_COMMANDS_WORKSHOP = 'commands/workshop'
# -----------------------------------------------------------------------------
# Logging options exposed for custom click Params
# -----------------------------------------------------------------------------
LOGGER_NAME = 'vframe'
LOGLEVELS = {
types.LogLevel.DEBUG: logging.DEBUG,
types.LogLevel.INFO: logging.INFO,
types.LogLevel.WARN: logging.WARN,
types.LogLevel.ERROR: logging.ERROR,
types.LogLevel.CRITICAL: logging.CRITICAL
}
LOGLEVEL_OPT_DEFAULT = types.LogLevel.DEBUG.name
LOGFILE_FORMAT = "%(log_color)s%(levelname)-8s%(reset)s %(cyan)s%(filename)s:%(lineno)s:%(bold_cyan)s%(funcName)s() %(reset)s%(message)s"
| [
"adam@ahprojects.com"
] | adam@ahprojects.com |
054bdccad3c52d4833d907224a62b801552fde4e | 4d892dc51e2dda0fcce246ac608fc4e0ce98c52b | /FirstStepsInPython/Fundamentals/Exercice/Lists Basics/01. Invert Values.py | 94485830d6ba2cdf174235b9aeba6a63d40b5521 | [
"MIT"
] | permissive | inovei6un/SoftUni-Studies-1 | 510088ce65e2907c2755a15e427fd156909157f0 | 3837c2ea0cd782d3f79353e61945c08a53cd4a95 | refs/heads/main | 2023-08-14T16:44:15.823962 | 2021-10-03T17:30:48 | 2021-10-03T17:30:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | str_nums = input()
some_list = str_nums.split(input())
result = []
for el in some_list:
result.append((int(el) * -1))
print(result)
# INPUT 1
# 1 2 -3 -3 5
# INPUT 2
# -4 0 2 57 -101
# INPUT END
| [
"lazar_off@yahoo.com"
] | lazar_off@yahoo.com |
43a0237619f79a2a4564f43277d436d2cd99c135 | ea24104edfb276f4db780638fcc8e6cf7f7dceb8 | /setup.py | fb5b5953ca8e448d8bacb6b06e3d65a80473b5a4 | [
"MIT"
] | permissive | jmoujaes/dpaste | 6e195dc7f3a53ae2850aa4615514876597b6564d | 27d608e5da4b045ea112823ec8d271add42fd89d | refs/heads/master | 2021-01-25T14:33:06.648359 | 2018-03-03T19:57:51 | 2018-03-03T19:57:51 | 123,708,048 | 0 | 0 | MIT | 2018-03-03T16:08:54 | 2018-03-03T16:08:54 | null | UTF-8 | Python | false | false | 1,704 | py | #!/usr/bin/env python
from sys import exit
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
class Tox(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(self.test_args)
exit(errno)
long_description = u'\n\n'.join((
open('README.rst').read(),
open('CHANGELOG').read()
))
setup(
name='dpaste',
version='2.13',
description='dpaste is a Django based pastebin. It\'s intended to run '
'separately but its also possible to be installed into an '
'existing Django project like a regular app.',
long_description=long_description,
author='Martin Mahner',
author_email='martin@mahner.org',
url='https://github.com/bartTC/dpaste/',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Framework :: Django',
],
packages=find_packages(),
package_data={
'dpaste': ['static/*.*', 'templates/*.*'],
'docs': ['*'],
},
include_package_data=True,
install_requires=[
'django>=1.8,<2.0',
'pygments>=1.6',
],
tests_require=[
'tox>=1.6.1'
],
cmdclass={
'test': Tox
},
)
| [
"martin@mahner.org"
] | martin@mahner.org |
de0326f435ffc96d79b974c8650e7f1ab6479947 | cdbb5bb7cbf188f8773a3a1425c2043f0af8a423 | /Attributes_and_Methods/gym_04E/project/gym.py | a1be11e84e7dfcd76026101a0fc75df9d371e352 | [
"MIT"
] | permissive | MihailMarkovski/Python-OOP-2020 | 72ac4f150907a6fa2fb93393cfcd2f36656e7256 | f0ac610bb758c2882e452684c8d7f533fcc33fe4 | refs/heads/main | 2023-01-31T18:24:53.184269 | 2020-12-17T09:39:01 | 2020-12-17T09:39:01 | 321,456,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,507 | py | # from Attributes_and_Methods.gym_04E.project.equipment import Equipment
# from Attributes_and_Methods.gym_04E.project.exercise_plan import ExercisePlan
# from Attributes_and_Methods.gym_04E.project.subscription import Subscription
# from Attributes_and_Methods.gym_04E.project.customer import Customer
# from Attributes_and_Methods.gym_04E.project.trainer import Trainer
class Gym:
def __init__(self):
self.customers = []
self.trainers = []
self.equipment = []
self.plans = []
self.subscriptions = []
def add_customer(self, customer):
if customer not in self.customers:
self.customers.append(customer)
def add_trainer(self, trainer):
if trainer not in self.trainers:
self.trainers.append(trainer)
def add_equipment(self, equipment):
if equipment not in self.equipment:
self.equipment.append(equipment)
def add_plan(self, plan):
if plan not in self.plans:
self.plans.append(plan)
def add_subscription(self, subscription):
if subscription not in self.subscriptions:
self.subscriptions.append(subscription)
def subscription_info(self, subscription_id: int):
subscription = [s for s in self.subscriptions if s.id == subscription_id][0]
customer = [c for c in self.customers if subscription.customer_id == c.id][0]
trainer = [t for t in self.trainers if t.id == subscription.trainer_id][0]
plan = [p for p in self.plans if trainer.id == p.trainer_id][0]
equipment = [e for e in self.equipment if e.id == plan.equipment_id][0]
result = f'{subscription}\n{customer}\n{trainer}\n{equipment}\n{plan}'
return result
# all_objects = [subscription, customer, trainer, equipment, plan]
# result = ''
# for idx, obj in enumerate(all_objects):
# if idx == len(all_objects) - 1:
# result += f'{obj}'
# else:
# result += f'{obj}\n'
# return result
# customer = Customer("John", "Maple Street", "john.smith@gmail.com")
# equipment = Equipment("Treadmill")
# trainer = Trainer("Peter")
# subscription = Subscription("14.05.2020", 1, 1, 1)
# plan = ExercisePlan(1, 1, 20)
#
# gym = Gym()
#
# gym.add_customer(customer)
# gym.add_equipment(equipment)
# gym.add_trainer(trainer)
# gym.add_plan(plan)
# gym.add_subscription(subscription)
#
# print(Customer.get_next_id())
#
# print(gym.subscription_info(1))
| [
"misho5071@gmail.com"
] | misho5071@gmail.com |
4d7318f876ee78f727ddf1f05e561ff13553e914 | 26b66f2d11b28bc5f859021e011d3b5d1e1ed8ee | /old-stuff-for-reference/nightjar-base/nightjar-src/python-src/nightjar/entry/central_configurator/process.py | e72b185f4fca333d13e8607059cce6c75f42dbc8 | [
"MIT"
] | permissive | groboclown/nightjar-mesh | 8f12c8b90a0b4dd5b6f871123e2d3d0c89001db2 | 3655307b4a0ad00a0f18db835b3a0d04cb8e9615 | refs/heads/master | 2022-12-13T13:04:02.096054 | 2020-08-12T23:30:30 | 2020-08-12T23:30:30 | 207,360,091 | 3 | 1 | MIT | 2022-12-13T11:13:30 | 2019-09-09T16:59:02 | Python | UTF-8 | Python | false | false | 1,516 | py |
"""
Process templates.
"""
from typing import Iterable
from ...backend.api.deployment_map import AbcDeploymentMap
from .content_gen import (
generate_content,
)
from .input_data_gen import load_service_color_data, load_namespace_data
from ...backend.api.data_store import (
AbcDataStoreBackend,
CollectorDataStore,
ConfigurationReaderDataStore,
ConfigurationWriterDataStore,
)
from ...protect import RouteProtection
def process_templates(
backend: AbcDataStoreBackend,
deployment_map: AbcDeploymentMap,
namespaces: Iterable[str],
namespace_protections: Iterable[RouteProtection],
service_protection: RouteProtection,
) -> None:
"""
Process all the namespace and service/color templates into the envoy proxy content.
This can potentially take up a lot of memory. It's performing a trade-off of minimizing
service calls for memory.
"""
namespace_data = load_namespace_data(deployment_map, namespaces, namespace_protections)
service_color_data = load_service_color_data(deployment_map, namespaces, service_protection)
with CollectorDataStore(backend) as collector:
with ConfigurationReaderDataStore(backend) as config_reader:
with ConfigurationWriterDataStore(backend) as config_writer:
generate_content(
collector, config_reader, config_writer,
namespace_data, service_color_data,
namespace_protections,
)
| [
"matt@groboclown.net"
] | matt@groboclown.net |
645e37cf5ff4fd8435a05cd045daca41fdaa7ae3 | cd0068a04a706aa902947d5f1233a7f07bec150c | /Python/function_practice/p1.py | d6bf1b18c9618753d720d712b4fdb9a3dd78f7ea | [] | no_license | eLtronicsVilla/Miscellaneous | b0c579347faad12cee64408c2a112b33b38a6201 | e83f44af985faad93336c1307aeae8694905c3e3 | refs/heads/master | 2021-11-28T04:22:08.278055 | 2021-08-14T19:53:29 | 2021-08-14T19:53:29 | 161,993,103 | 0 | 0 | null | 2021-08-14T19:53:29 | 2018-12-16T11:11:10 | C++ | UTF-8 | Python | false | false | 462 | py | #def myfuncs(a,b,c=0,d=0):
# return sum((a,b,c,d))*0.05
#print(myfuncs(1,2,3,4))
#def myfunc(*args):
# return sum(args) * 0.05
#print(myfunc(10,20,30,40))
'''
def myfunc(*args):
for item in args:
print(item)
print(myfunc(20,30,40,50,60))
'''
def myfunc(**kwargs):
print(kwargs)
if 'fruit' in kwargs:
print('My fruit of choice is {}'.format(kwargs['fruit']))
else:
print('I did not find any fruit here')
myfunc(fruit='apple',veggie = 'lettuce')
| [
"eltronicsvilla17@gmail.com"
] | eltronicsvilla17@gmail.com |
263693b9ffd66f4793977371b94af2600527b9f6 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2411/60610/283132.py | 096e0adccc969e68233bd1a8ca9eecae10d02711 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | num=int(input())
mid=[]
for i in range(num):
mid.append(list(map(int,input().split(","))));
count=0;
if(mid[0][0]!=mid[1][0]):
k0=(mid[0][1]-mid[1][1])/(mid[0][0]-mid[1][0]);
for i in range(2,len(mid)):
k1=(mid[0][1]-mid[i][1])/(mid[0][0]-mid[i][0]);
if(k0!=k1):
count=1;
break;
else:
for i in range(2, len(mid)):
if(mid[i][0]!=mid[0][0]):
count=1;
break;
if(count==0):
print("True");
else:
print("False"); | [
"1069583789@qq.com"
] | 1069583789@qq.com |
b4d8cc119dc15ee7f84c18bfe77ddafd53fcd24d | 645050387b6d65986490045696a23b99484bfb07 | /week-15/course_management_system/course_management_system/lectures/urls.py | 073ec02e00ef9c31e4bba1d08f76b5ca8b9c2033 | [] | no_license | ViktorBarzin/HackBulgaria-Python | 73fd36299afd3fcbf5601c4a932780252db90001 | 7bae4528ded8ca3a33bb12c2a67123cc621d8b8f | refs/heads/master | 2020-12-25T15:08:23.345972 | 2017-06-07T01:19:36 | 2017-06-07T01:19:36 | 67,701,776 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | from django.conf.urls import url, include
from django.contrib import admin
from course_management_system.lectures import views
urlpatterns = [
url(r'new/$', views.create_lecture),
url(r'edit/(?P<lecture_id>[0-9]+)', views.edit_lecture),
url(r'(?P<lecture_id>[0-9]+)', views.show_lecture)
]
| [
"vbarzin@gmail.com"
] | vbarzin@gmail.com |
958567a0c2066e663ecf210b5eb2888606d39a19 | b6aa9768dbac327943e0220df1c56ce38adc6de1 | /127_word-ladder.py | 9008bcaef54814bc92db08d538d8421e074a2a43 | [] | no_license | Khrystynka/LeetCodeProblems | f86e4c1e46f70f874924de137ec5efb2f2518766 | 917bd000c2a055dfa2633440a61ca4ae2b665fe3 | refs/heads/master | 2021-03-17T00:51:10.102494 | 2020-09-28T06:31:03 | 2020-09-28T06:31:03 | 246,954,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,128 | py | # Problem Title: Word Ladder
from collections import defaultdict, deque
class Solution(object):
def ladderLength(self, beginWord, endWord, wordList):
"""
:type beginWord: str
:type endWord: str
:type wordList: List[str]
:rtype: int
"""
L = len(beginWord)
d = defaultdict(list)
for word in wordList:
for i in range(L):
d[word[:i]+'*'+word[i+1:]] += [word]
visited = set(beginWord)
queu = deque([(beginWord, 1)])
next_words = []
while queu:
(word, level) = queu.popleft()
if word == endWord:
return level
next_words = []
for i in range(L):
intermid_word = word[:i]+'*'+word[i+1:]
next_words += (d[intermid_word])
d[intermid_word] = []
# print next_words, word
for word in next_words:
if word not in visited:
visited.add(word)
queu.append((word, level+1))
# print queu
return 0
| [
"khrystyna@Khrystynas-MacBook-Pro.local"
] | khrystyna@Khrystynas-MacBook-Pro.local |
59483881abc2212735a5b71e41b3c45117bcc327 | c34308d9e283d3689baeade246b69dad13eea0c1 | /cn/wensi/file/osDemo.py | 02f4fc3e0382fce16dfa0eab109f7456893718ad | [] | no_license | michaelChen07/studyPython | d19fe5762cfbccdff17248d7d5574939296d3954 | 11a2d9dd0b730cad464393deaf733b4a0903401f | refs/heads/master | 2021-01-19T00:20:27.347088 | 2017-05-13T08:43:44 | 2017-05-13T08:43:44 | 73,004,133 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | #encoding=utf-8
import os
#获取当前工作目录
print os.getcwd()
#更改当前目录
os.chdir(r"D:\workspace\PycharmProjects\studyPython\cn\wensi\practice")
print os.getcwd()
#当前工作目录下的文件
print os.listdir(os.getcwd())
#指定目录下的文件
print os.listdir(r"D:\test")
#返回当前目录
print os.curdir
#创建指定级数的目录
def create_multiple_dir(dir_path,depth,dir_name):
try:
os.chdir(dir_path)
except Exception,e:
return -1
for i in range(depth):
os.mkdir(dir_name+str(i))
os.chdir(dir_name+str(i))
with open(dir_name+str(i)+".txt","w") as fp:
fp.write(dir_name+str(i))
return 0
print create_multiple_dir("e:\\test",5,"gloryroad")
| [
"286522215@qq.com"
] | 286522215@qq.com |
90940afeec0ca4c91817d149ca18e38d03b93486 | 2781ffdb7dd131c43d5777d33ee002643c839c28 | /DataCamp Practice/Supervised Learning With Scikit Learn/03-fine-tuning-your-model/01-metrics-for-classification.py | 0a3ce40f7a5a2daa30330b0f25b204d57c35c593 | [] | no_license | AbuBakkar32/Python-Essential-Practice | b5e820d2e27e557b04848b5ec63dd78ae5b554c4 | 8659cf5652441d32476dfe1a8d90184a9ae92b3b | refs/heads/master | 2022-11-13T23:07:16.829075 | 2020-06-27T18:46:52 | 2020-06-27T18:46:52 | 275,432,093 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,799 | py | '''
Metrics for classification
In Chapter 1, you evaluated the performance of your k-NN classifier based on its accuracy. However, as Andy discussed, accuracy is not always an informative metric. In this exercise, you will dive more deeply into evaluating the performance of binary classifiers by computing a confusion matrix and generating a classification report.
You may have noticed in the video that the classification report consisted of three rows, and an additional support column. The support gives the number of samples of the true response that lie in that class - so in the video example, the support was the number of Republicans or Democrats in the test set on which the classification report was computed. The precision, recall, and f1-score columns, then, gave the respective metrics for that particular class.
Here, you'll work with the PIMA Indians dataset obtained from the UCI Machine Learning Repository. The goal is to predict whether or not a given female patient will contract diabetes based on features such as BMI, age, and number of pregnancies. Therefore, it is a binary classification problem. A target value of 0 indicates that the patient does not have diabetes, while a value of 1 indicates that the patient does have diabetes. As in Chapters 1 and 2, the dataset has been preprocessed to deal with missing values.
The dataset has been loaded into a DataFrame df and the feature and target variable arrays X and y have been created for you. In addition, sklearn.model_selection.train_test_split and sklearn.neighbors.KNeighborsClassifier have already been imported.
Your job is to train a k-NN classifier to the data and evaluate its performance by generating a confusion matrix and classification report.
INSTRUCTIONS
100XP
Import classification_report and confusion_matrix from sklearn.metrics.
Create training and testing sets with 40% of the data used for testing. Use a random state of 42.
Instantiate a k-NN classifier with 6 neighbors, fit it to the training data, and predict the labels of the test set.
Compute and print the confusion matrix and classification report using the confusion_matrix() and classification_report() functions.
'''
# Import necessary modules
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
# Create training and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4,random_state=42)
# Instantiate a k-NN classifier: knn
knn = KNeighborsClassifier(n_neighbors=6)
# Fit the classifier to the training data
knn.fit(X_train, y_train)
# Predict the labels of the test data: y_pred
y_pred = knn.predict(X_test)
# Generate the confusion matrix and classification report
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
| [
"abu35-1994@diu.edu.bd"
] | abu35-1994@diu.edu.bd |
734fb833af5a331b634893629da08f294d27f864 | 3953a4cf5dee0667c08e1fe1250a3067090e3f24 | /mural/config/urls.py | cdebfe9af3dc61e698750dae88078701104bb273 | [] | no_license | DigitalGizmo/msm_mural_project | 41960242c84050ee578da90afabcb7f9bc1923df | 5566a2b6f7445dc53d8aaf96cf7d24236fd5ed96 | refs/heads/master | 2020-03-07T11:04:49.633149 | 2019-02-13T18:10:44 | 2019-02-13T18:10:44 | 127,447,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,082 | py | """mural URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
from django.views.generic import TemplateView
from panels import views
urlpatterns = [
# path('', TemplateView.as_view(template_name="index.html")),
path('', views.PanelListView.as_view(), name='index.html'),
path('panels/', include('panels.urls', namespace='panels')),
path('pops/', include('pops.urls', namespace='pops')),
path('admin/', admin.site.urls),
]
| [
"donpublic@digitalgizmo.com"
] | donpublic@digitalgizmo.com |
bd186ab2c1021b3099528a8ac1bc3737cae828d1 | 7738e950c103fb23b48d5e004eddcf108ea71fa1 | /Cursoemvideo/Mundo1/exercise022.py | b16615322ed621abdab7671f36f3fe2f08ba44ed | [] | no_license | pedrottoni/Studies-Python | c9bdbaf4b4aaa209bf32aa93d6ee4814a0a39c53 | f195bcb4c6868689ec0cf05c34cd4d5a6c7b3ea1 | refs/heads/master | 2021-09-26T05:23:02.398552 | 2020-02-12T04:48:23 | 2020-02-12T04:48:23 | 203,452,221 | 0 | 0 | null | 2021-09-22T18:21:51 | 2019-08-20T20:48:07 | Python | UTF-8 | Python | false | false | 899 | py | '''
Crie um programa que leia o nome completo de uma pessoa e mostre:
- O nome com todas as letras maiúsculas e minúsculas.
- Quantas letras ao todo(sem considerar espaços).
- Quantas letras tem o primeiro nome.
'''
name = str(input("Digite seu nome completo: "))
name_up = name.upper()
name_low = name.lower()
name_len_replace = len(name.replace(" ", ""))
name_len_count = len(name) - name.count(" ")
name_firstname_len_split = name.split()
name_firstname_len_find = name.find(" ")
print(
f"Seu nome em letras maiúsculas é: {name_up}\nSeu nome em letras minúsculas é: {name_low}\nSeu nome sem espaçoes possui {name_len_replace} letras, usando o replace\nSeu nome sem espaçoes possui {name_len_count} letras, usando o count\nSeu primeiro nome possui {len(name_firstname_len_split[0])} letras, usando o split\nSeu primeiro nome possui {name_firstname_len_find} letras, usando o find"
)
| [
"pedrottoni@outlook.com"
] | pedrottoni@outlook.com |
61669adcae1d0c4f10ad7a70ae68b63f3087dc03 | 060ce17de7b5cdbd5f7064d1fceb4ded17a23649 | /fn_exchange_online/fn_exchange_online/components/exchange_online_move_message_to_folder.py | 61835d74a362f1bd66d5ce67124b3febefa59fb1 | [
"MIT"
] | permissive | ibmresilient/resilient-community-apps | 74bbd770062a22801cef585d4415c29cbb4d34e2 | 6878c78b94eeca407998a41ce8db2cc00f2b6758 | refs/heads/main | 2023-06-26T20:47:15.059297 | 2023-06-23T16:33:58 | 2023-06-23T16:33:58 | 101,410,006 | 81 | 107 | MIT | 2023-03-29T20:40:31 | 2017-08-25T14:07:33 | Python | UTF-8 | Python | false | false | 4,764 | py | # (c) Copyright IBM Corp. 2010, 2021. All Rights Reserved.
# -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
"""Function implementation"""
import logging
from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError
from resilient_lib import validate_fields, RequestsCommon, ResultPayload
from fn_exchange_online.lib.ms_graph_helper import MSGraphHelper, MAX_RETRIES_TOTAL, MAX_RETRIES_BACKOFF_FACTOR, MAX_BATCHED_REQUESTS
CONFIG_DATA_SECTION = 'fn_exchange_online'
LOG = logging.getLogger(__name__)
class FunctionComponent(ResilientComponent):
"""Component that implements Resilient function 'exchange_online_move_message_to_folder"""
def load_options(self, opts):
""" Get app.config parameters and validate them. """
self.opts = opts
self.options = opts.get(CONFIG_DATA_SECTION, {})
required_fields = ["microsoft_graph_token_url", "microsoft_graph_url", "tenant_id", "client_id",
"client_secret", "max_messages", "max_users"]
validate_fields(required_fields, self.options)
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(FunctionComponent, self).__init__(opts)
self.load_options(opts)
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self.load_options(opts)
@function("exchange_online_move_message_to_folder")
def _exchange_online_move_message_to_folder_function(self, event, *args, **kwargs):
"""Function: This function will move an Exchange Online message to the specified folder in the users mailbox."""
try:
# Initialize the results payload
rp = ResultPayload(CONFIG_DATA_SECTION, **kwargs)
# Validate fields
validate_fields(['exo_email_address', 'exo_messages_id', 'exo_destination_mailfolder_id'], kwargs)
# Get the function parameters:
email_address = kwargs.get("exo_email_address") # text
message_id = kwargs.get("exo_messages_id") # text
mailfolders_id = kwargs.get("exo_mailfolders_id") # text
destination_id = kwargs.get("exo_destination_mailfolder_id") # text
LOG.info(u"exo_email_address: %s", email_address)
LOG.info(u"exo_messages_id: %s", message_id)
LOG.info(u"exo_mailfolders_id: %s", mailfolders_id)
LOG.info(u"exo_destination_id: %s", destination_id)
yield StatusMessage(u"Starting move message for email address: {} to mail folder {}".format(email_address, destination_id))
# Get the MS Graph helper class
MS_graph_helper = MSGraphHelper(self.options.get("microsoft_graph_token_url"),
self.options.get("microsoft_graph_url"),
self.options.get("tenant_id"),
self.options.get("client_id"),
self.options.get("client_secret"),
self.options.get("max_messages"),
self.options.get("max_users"),
self.options.get("max_retries_total", MAX_RETRIES_TOTAL),
self.options.get("max_retries_backoff_factor", MAX_RETRIES_BACKOFF_FACTOR),
self.options.get("max_batched_requests", MAX_BATCHED_REQUESTS),
RequestsCommon(self.opts, self.options).get_proxies())
# Call MS Graph API to get the user profile
response = MS_graph_helper.move_message(email_address, mailfolders_id, message_id, destination_id)
# If message was deleted a 201 code is returned.
if response.status_code == 201:
success = True
new_message_id = response.json().get('id')
new_web_link = response.json().get('webLink')
response_json = {'new_message_id': new_message_id,
'new_web_link': new_web_link}
else:
success = False
response_json = response.json()
results = rp.done(success, response_json)
yield StatusMessage(u"Returning delete results for email address: {}".format(email_address))
# Produce a FunctionResult with the results
yield FunctionResult(results)
except Exception as err:
LOG.error(err)
yield FunctionError(err)
| [
"shane.curtin@ie.ibm.com"
] | shane.curtin@ie.ibm.com |
ecb1ca3c00d1db51f7132f218cca24f0f9be33af | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/clouds_20200703185904.py | 374d79f890156c551577f693e911a567575c824b | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | def jumpingClouds(c):
i = 0
jumps = 0
lastCloud = len(c)-1
while i < lastCloud:
# if i+1 == lastCloud:
# jumps +=1
# i+=1
# elif c[i+2] == 0:
# jumps +=1
# i = i + 2
# else:
# jumps +=1
# i +=1
# print(jumps)
if :
print('here')
jumps +=1
i =i+2
print('c---->',c[i],'i-->',i,'jumps',jumps)
elif c[i+1] == 0:
print('here2')
jumps +=1
i +=1
print('c---->',c[i],'i-->',i,'jumps',jumps)
print(jumps)
jumpingClouds([0, 0, 0, 1, 0, 0]) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
2f4e1ef39e3ec5bc5cfffda8e34746dd2360f9c5 | 7c3116ca951c1c989fcc6cd673993ce6b1d4be5a | /modules/Eventlog/lv1_os_win_event_logs_registry_handling.py | caa2035832bf5a5e3814ef9a90fbad9d90f81c82 | [
"Apache-2.0"
] | permissive | Kimwonkyung/carpe | c8c619c29350d6edc464dbd9ba85aa3b7f847b8a | 58a8bf7a7fc86a07867890c2ce15c7271bbe8e78 | refs/heads/master | 2022-12-15T13:51:47.678875 | 2020-09-11T05:25:43 | 2020-09-11T05:25:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,234 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os, sys, re
from datetime import datetime
from utility import database
class Registry_Handling_Information:
par_id = ''
case_id = ''
evd_id = ''
task = ''
time = ''
registry_path = ''
registry_value_name = ''
old_value = ''
new_value = ''
user_sid = ''
event_id = ''
source = ''
event_id_description = ''
def EVENTLOGREGISTRYHANDLING(configuration):
#db = database.Database()
#db.open()
registry_list = []
registry_count = 0
query = r"SELECT data, event_id, time_created, source, user_sid FROM lv1_os_win_evt_total where event_id like '4657' and source like '%Security.evtx%'"
#result_query = db.execute_query_mul(query)
result_query = configuration.cursor.execute_query_mul(query)
for result_data in result_query:
registry_handling_information = Registry_Handling_Information()
try:
if result_data[1] == '4657':
registry_list.append(registry_handling_information)
registry_list[registry_count].task = 'Modified'
registry_list[registry_count].event_id = result_data[1]
registry_list[registry_count].time = result_data[2]
registry_list[registry_count].source = result_data[3]
registry_list[registry_count].user_sid = result_data[4]
registry_list[registry_count].event_id_description = 'A registry value was modified'
if 'SubjectUserName' in result_data[0]:
dataInside = r"SubjectUserName\">(.*)<"
m = re.search(dataInside, result_data[0])
registry_list[registry_count].user = m.group(1)
if 'ObjectName' in result_data[0]:
dataInside = r"ObjectName\">(.*)<"
m = re.search(dataInside, result_data[0])
registry_list[registry_count].registry_path = m.group(1)
if 'ObjectValueName' in result_data[0]:
dataInside = r"ObjectValueName\">(.*)<"
m = re.search(dataInside, result_data[0])
registry_list[registry_count].registry_value_name = m.group(1)
if r"OldValue\" " in result_data[0]:
registry_list[registry_count].old_value = ' '
if r"OldValue\">" in result_data[0]:
dataInside = r"OldValue\">(.*)<"
m = re.search(dataInside, result_data[0])
registry_list[registry_count].old_value = m.group(1)
registry_list[registry_count].registry_value_name = m.group(1)
if r"NewValue\" " in result_data[0]:
registry_list[registry_count].new_value = ' '
if 'NewValue' in result_data[0]:
dataInside = r"NewValue\">(.*)<"
m = re.search(dataInside, result_data[0])
registry_list[registry_count].new_value = m.group(1)
registry_count = registry_count + 1
except:
print("EVENT LOG REGISTRY HANDLING ERROR")
#db.close()
return registry_list
| [
"jbc0729@gmail.com"
] | jbc0729@gmail.com |
6ca185cdacbd5790f016b989c1bb9f28545a4402 | bd185738ea6a74d1e76d9fc9d8cbc59f94990842 | /helpline/migrations/0012_hotdesk_user.py | 9440e21db2fcbf2a8064e17c7132565c9c0001c3 | [
"BSD-2-Clause"
] | permissive | aondiaye/myhelpline | c4ad9e812b3a13c6c3c8bc65028a3d3567fd6a98 | d72120ee31b6713cbaec79f299f5ee8bcb7ea429 | refs/heads/master | 2020-12-22T05:32:59.576519 | 2019-10-29T08:52:55 | 2019-10-29T08:52:55 | 236,683,448 | 1 | 0 | NOASSERTION | 2020-01-28T07:50:18 | 2020-01-28T07:50:17 | null | UTF-8 | Python | false | false | 667 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-08-28 13:12
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('helpline', '0011_auto_20180828_1231'),
]
operations = [
migrations.AddField(
model_name='hotdesk',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
| [
"patrickmithamo@gmail.com"
] | patrickmithamo@gmail.com |
95ab68d99f08e0ae94963ea25c02f5a805fb9b4c | 5b058d332251bcf5fe6eafb8e98c0132ab9a4e36 | /pimr/studentapp/views.py | add5bec511e507ce3b58c30a09de43b00a7c6c6a | [] | no_license | shivaconceptsolution/DJANGO6-7-BATCH-PALAASIA | fcfa708c212966f57445361a0d23eafdcd4fcbcb | 8b1a0707cd2837efa6ccfac2ccc6385084e69770 | refs/heads/master | 2020-06-28T13:02:04.798925 | 2019-08-26T13:44:31 | 2019-08-26T13:44:31 | 200,240,920 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,240 | py | from django.shortcuts import redirect,render
from django.http import HttpResponse
from .models import Student,Reg
def index(request):
return render(request,"studentapp/index.html")
def reg(request):
s = Student(rno=request.GET['txtrno'],sname=request.GET['txtname'],branch=request.GET['txtbranch'],fees=request.GET['txtfees'])
s.save()
return render(request,"studentapp/index.html")
def login(request):
return render(request,"studentapp/login.html")
def loginlogic(request):
e = request.POST["txtemail"]
pa = request.POST["txtpass"]
s = Reg.objects.filter(email=e,passsword=pa)
if(s.count()==1):
return redirect('viewstudent')
else:
return render(request,"studentapp/login.html",{"key":"invalid userid password"})
def about(request):
return render(request,"studentapp/about.html")
def contact(request):
return render(request,"studentapp/contact.html")
def viewstudent(request):
res = Student.objects.all()
return render(request,"studentapp/viewstudent.html",{'sturec':res})
def editstu(request):
sid= request.GET["q"]
res = Student.objects.get(pk=sid)
print("value is ",res.rno)
return render(request,"studentapp/editstu.html",{'s':res})
def deletestu(request):
sid= request.GET["q"]
res = Student.objects.get(pk=sid)
res.delete()
return redirect('viewstudent')
def updatestu(request):
sid= request.POST["txtid"]
res = Student.objects.get(pk=sid)
res.rno=request.POST["txtrno"]
res.sname=request.POST["txtsname"]
res.branch=request.POST["txtbranch"]
res.fees=request.POST["txtfees"]
res.save()
return redirect('viewstudent')
def si(request):
p=12000
r=2.2
t=2
si=(p*r*t)/100
return HttpResponse("Result is "+str(si))
def add(request):
return render(request,"studentapp/add.html")
def addlogic(request):
a = request.POST["txtnum1"]
b = request.POST["txtnum2"]
if(request.POST['btnsubmit'] =="ADD"):
c = int(a)+int(b)
elif(request.POST['btnsubmit'] =="SUB"):
c = int(a)-int(b)
elif(request.POST['btnsubmit'] =="MULTI"):
c = int(a)*int(b)
else:
c = int(a)/int(b)
return render(request,"studentapp/add.html",{'key':'result is '+str(c)})
| [
"noreply@github.com"
] | shivaconceptsolution.noreply@github.com |
dbdf6c8369d0743ee83ab81a07ca6a51dbbe400f | b4a1037c86a1ef04f3172746b98a68bfb42e8361 | /fileupload/views.py | 094ad790b7db4c5ec490959f931475b1a7120508 | [] | no_license | Gathiira/authentication-system- | b51abb074d9d6f526857cbed98417ab1e42f53be | 5b0ceebc5bf0b7ca9f3eb00835a3e4f3e16ab31f | refs/heads/main | 2023-06-11T21:30:36.960821 | 2021-06-08T11:14:15 | 2021-06-08T11:14:15 | 367,470,745 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,840 | py | from rest_framework import generics, permissions, status
from rest_framework.response import Response
from fileupload.models import UploadFile
from fileupload.serializers import FileDetailSerializer
import os
class UploadFileView(generics.CreateAPIView):
queryset = UploadFile.objects.all()
serializer_class = FileDetailSerializer
permission_classes = (permissions.IsAuthenticated, )
def create(self, request):
files = request.FILES.getlist("file")
all_files = []
for file in files:
if file.size > 15360 * 1024:
return Response(
{'details': 'File is too large'},
status=status.HTTP_400_BAD_REQUEST)
allowed_extension = ['.jpeg', '.jpg', '.png', '.pdf']
extension = os.path.splitext(file.name)[1]
if extension not in allowed_extension:
return Response(
{"details": f"Invalid file format. Kindly upload {','.join(allowed_extension)} only"},
status=status.HTTP_400_BAD_REQUEST)
file_param = {
"file": file,
"filename": file.name,
}
file_inst = UploadFile.objects.create(**file_param)
all_files.append({
"id": file_inst.id,
"filename": file_inst.filename,
})
return Response(all_files)
class FileDetailsView(generics.ListAPIView):
serializer_class = FileDetailSerializer
def get_queryset(self):
payload = self.request.query_params.dict()
_files = payload.get('file', None)
if not _files:
return []
files = _files.split(',')
file_query = UploadFile.objects.filter(
id__in=files).order_by('-date_created')
return file_query
| [
"mwangyjose@gmail.com"
] | mwangyjose@gmail.com |
9a6833424861ef79177af3855dd81b325de3b4de | 685f4474699d769dae88537c69f5517ac13a8431 | /EL64.py | 0e76bdbf387850b49ca63c3ad464c238a486b1da | [] | no_license | Pumafied/Project-Euler | 7466f48e449b7314598c106398c0be0424ae72d5 | 0c3e80a956893ce1881a9694131d52b156b9d3d8 | refs/heads/master | 2016-09-05T22:45:09.733696 | 2013-04-20T04:46:48 | 2013-04-20T04:46:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,772 | py | # All square roots are periodic when written as continued fractions and can be written in the form:
# N = a0 +
# 1
# a1 +
# 1
# a2 +
# 1
# a3 + ...
# For example, let us consider 23:
# 23 = 4 + 23 — 4 = 4 +
# 1
# = 4 +
# 1
# 1
# 23—4
# 1 +
# 23 – 3
# 7
# If we continue we would get the following expansion:
# 23 = 4 +
# 1
# 1 +
# 1
# 3 +
# 1
# 1 +
# 1
# 8 + ...
# The process can be summarised as follows:
# a0 = 4,
# 1
# 23—4
# =
# 23+4
# 7
# = 1 +
# 23—3
# 7
# a1 = 1,
# 7
# 23—3
# =
# 7(23+3)
# 14
# = 3 +
# 23—3
# 2
# a2 = 3,
# 2
# 23—3
# =
# 2(23+3)
# 14
# = 1 +
# 23—4
# 7
# a3 = 1,
# 7
# 23—4
# =
# 7(23+4)
# 7
# = 8 + 23—4
# a4 = 8,
# 1
# 23—4
# =
# 23+4
# 7
# = 1 +
# 23—3
# 7
# a5 = 1,
# 7
# 23—3
# =
# 7(23+3)
# 14
# = 3 +
# 23—3
# 2
# a6 = 3,
# 2
# 23—3
# =
# 2(23+3)
# 14
# = 1 +
# 23—4
# 7
# a7 = 1,
# 7
# 23—4
# =
# 7(23+4)
# 7
# = 8 + 23—4
# It can be seen that the sequence is repeating. For conciseness, we use the notation 23 = [4;(1,3,1,8)], to indicate that the block (1,3,1,8) repeats indefinitely.
# The first ten continued fraction representations of (irrational) square roots are:
# 2=[1;(2)], period=1
# 3=[1;(1,2)], period=2
# 5=[2;(4)], period=1
# 6=[2;(2,4)], period=2
# 7=[2;(1,1,1,4)], period=4
# 8=[2;(1,4)], period=2
# 10=[3;(6)], period=1
# 11=[3;(3,6)], period=2
# 12= [3;(2,6)], period=2
# 13=[3;(1,1,1,1,6)], period=5
# Exactly four continued fractions, for N 13, have an odd period.
# How many continued fractions for N 10000 have an odd period? | [
"pumafied@gmail.com"
] | pumafied@gmail.com |
e51dfa0c0743c4bb60091b16980b697214e98e70 | 039f2c747a9524daa1e45501ada5fb19bd5dd28f | /AGC033/AGC033b.py | 6f0b0176bcb0b9a0e4cfa3997f1de464dd155b2e | [
"Unlicense"
] | permissive | yuto-moriizumi/AtCoder | 86dbb4f98fea627c68b5391bf0cc25bcce556b88 | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | refs/heads/master | 2023-03-25T08:10:31.738457 | 2021-03-23T08:48:01 | 2021-03-23T08:48:01 | 242,283,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | #AGC033b
def main():
import sys
input=sys.stdin.readline
sys.setrecursionlimit(10**6)
# map(int, input().split())
if __name__ == '__main__':
main() | [
"kurvan1112@gmail.com"
] | kurvan1112@gmail.com |
25dc6353c69fd55923f4ed203e084e1c14265a36 | cbcdf195338307b0c9756549a9bffebf3890a657 | /django-stubs/contrib/contenttypes/fields.pyi | 1b80e2e17b7a9b55545f5f6df39adf35106323a8 | [
"MIT"
] | permissive | mattbasta/django-stubs | bc482edf5c6cdf33b85005c2638484049c52851b | 8978ad471f2cec0aa74256fe491e2e07887f1006 | refs/heads/master | 2020-04-27T08:38:22.694104 | 2019-03-06T09:05:08 | 2019-03-06T09:05:24 | 174,178,933 | 1 | 0 | MIT | 2019-03-06T16:18:01 | 2019-03-06T16:18:00 | null | UTF-8 | Python | false | false | 4,573 | pyi | from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union, Generic
from django.contrib.contenttypes.models import ContentType
from django.core.checks.messages import Error
from django.db.models.base import Model
from django.db.models.fields.related import ForeignObject
from django.db.models.fields.related_descriptors import ReverseManyToOneDescriptor
from django.db.models.fields.reverse_related import ForeignObjectRel
from django.db.models.fields import Field, PositiveIntegerField
from django.db.models.fields.mixins import FieldCacheMixin
from django.db.models.query import QuerySet
from django.db.models.query_utils import FilteredRelation, PathInfo
from django.db.models.sql.where import WhereNode
class GenericForeignKey(FieldCacheMixin):
auto_created: bool = ...
concrete: bool = ...
editable: bool = ...
hidden: bool = ...
is_relation: bool = ...
many_to_many: bool = ...
many_to_one: bool = ...
one_to_many: bool = ...
one_to_one: bool = ...
related_model: Any = ...
remote_field: Any = ...
ct_field: str = ...
fk_field: str = ...
for_concrete_model: bool = ...
rel: None = ...
column: None = ...
def __init__(self, ct_field: str = ..., fk_field: str = ..., for_concrete_model: bool = ...) -> None: ...
name: Any = ...
model: Any = ...
def contribute_to_class(self, cls: Type[Model], name: str, **kwargs: Any) -> None: ...
def get_filter_kwargs_for_object(self, obj: Model) -> Dict[str, Optional[ContentType]]: ...
def get_forward_related_filter(self, obj: Model) -> Dict[str, int]: ...
def check(self, **kwargs: Any) -> List[Error]: ...
def get_cache_name(self) -> str: ...
def get_content_type(
self, obj: Optional[Model] = ..., id: Optional[int] = ..., using: Optional[str] = ...
) -> ContentType: ...
def get_prefetch_queryset(
self, instances: Union[List[Model], QuerySet], queryset: Optional[QuerySet] = ...
) -> Tuple[List[Model], Callable, Callable, bool, str, bool]: ...
def __get__(
self, instance: Optional[Model], cls: Type[Model] = ...
) -> Optional[Union[GenericForeignKey, Model]]: ...
def __set__(self, instance: Model, value: Optional[Model]) -> None: ...
class GenericRel(ForeignObjectRel):
field: GenericRelation
limit_choices_to: Optional[Union[Dict[str, Any], Callable[[], Any]]]
model: Type[Model]
multiple: bool
on_delete: Callable
parent_link: bool
related_name: str
related_query_name: None
symmetrical: bool
def __init__(
self,
field: GenericRelation,
to: Union[Type[Model], str],
related_name: None = ...,
related_query_name: Optional[str] = ...,
limit_choices_to: Optional[Union[Dict[str, Any], Callable[[], Any]]] = ...,
) -> None: ...
class GenericRelation(ForeignObject):
auto_created: bool = ...
many_to_many: bool = ...
many_to_one: bool = ...
one_to_many: bool = ...
one_to_one: bool = ...
rel_class: Any = ...
mti_inherited: bool = ...
object_id_field_name: Any = ...
content_type_field_name: Any = ...
for_concrete_model: Any = ...
to_fields: Any = ...
def __init__(
self,
to: Union[Type[Model], str],
object_id_field: str = ...,
content_type_field: str = ...,
for_concrete_model: bool = ...,
related_query_name: Optional[str] = ...,
limit_choices_to: Optional[Union[Dict[str, Any], Callable[[], Any]]] = ...,
**kwargs: Any
) -> None: ...
def check(self, **kwargs: Any) -> List[Error]: ...
def resolve_related_fields(self) -> List[Tuple[PositiveIntegerField, Field]]: ...
def get_path_info(self, filtered_relation: Optional[FilteredRelation] = ...) -> List[PathInfo]: ...
def get_reverse_path_info(self, filtered_relation: None = ...) -> List[PathInfo]: ...
def value_to_string(self, obj: Model) -> str: ...
model: Any = ...
def set_attributes_from_rel(self) -> None: ...
def get_internal_type(self) -> str: ...
def get_content_type(self) -> ContentType: ...
def get_extra_restriction(
self, where_class: Type[WhereNode], alias: Optional[str], remote_alias: str
) -> WhereNode: ...
def bulk_related_objects(self, objs: List[Model], using: str = ...) -> QuerySet: ...
class ReverseGenericManyToOneDescriptor(ReverseManyToOneDescriptor):
field: GenericRelation
rel: GenericRel
def related_manager_cls(self): ...
def create_generic_related_manager(superclass: Any, rel: Any): ...
| [
"maxim.kurnikov@gmail.com"
] | maxim.kurnikov@gmail.com |
6640f7ae8b1855e37ca2bb49587ede0bf5f2a525 | 6a41dd36ddd3e501b62ff253b40bf9bbbaa722c2 | /코딩오답/오답03.py | a9f6c42b3f113758b38a0c35737da719f644327e | [] | no_license | skysamer/first_python | 9ba79b194d838e0cdeab6f2e7a4207d71c73ed63 | 638622f51434eda65ef3300e3ce5db3a2a79db2a | refs/heads/master | 2023-02-03T08:21:23.370285 | 2020-12-27T13:39:20 | 2020-12-27T13:39:20 | 307,953,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | list_of_list=[
[1, 2, 3],
[4, 5, 6, 7],
[8, 9],
]
for line in list_of_list:
for item in line:
print(item)
# 1
# 2
# 3
# 4
# 5
# 6
# 7
# 8
# 9
| [
"skyslayer123@naver.com"
] | skyslayer123@naver.com |
5db710d5daf2c06e17c9d537bf382a6aaf8987dc | 1e7a2a03fef545619945e2dd881d9405c0959b31 | /labman/gui/handlers/auth.py | aae0833e0d7ccc4c86cabf6106b3981e712cc1e2 | [
"BSD-3-Clause"
] | permissive | nreeve17/labman | bca300ee3ce52f96d83a866e5ede455fab70c2b8 | cc643228f601236bbd7348f8215b00daa3e61358 | refs/heads/master | 2021-09-06T18:58:13.890245 | 2018-02-05T22:48:16 | 2018-02-05T22:48:16 | 120,044,962 | 0 | 0 | null | 2018-02-03T00:12:41 | 2018-02-03T00:12:41 | null | UTF-8 | Python | false | false | 1,490 | py | # ----------------------------------------------------------------------------
# Copyright (c) 2017-, labman development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from tornado.escape import json_encode
from labman.gui.handlers.base import BaseHandler
from labman.db.user import User
from labman.db.exceptions import LabmanUnknownIdError, LabmanLoginError
class LoginHandler(BaseHandler):
def get(self):
self.redirect('/')
def post(self):
username = self.get_argument('username', '').strip().lower()
passwd = self.get_argument('password', '')
error_msg = ""
user = None
try:
user = User.login(username, passwd)
except LabmanUnknownIdError:
error_msg = "Unknown user name"
except LabmanLoginError:
error_msg = "Incorrect password"
if user:
self.set_current_user(username)
self.redirect("/")
else:
self.render("index.html", message=error_msg, level='danger')
def set_current_user(self, user):
if user:
self.set_secure_cookie("user", json_encode(user))
else:
self.clear_cookie("user")
class LogoutHandler(BaseHandler):
def get(self):
self.clear_cookie("user")
self.redirect("/")
| [
"josenavasmolina@gmail.com"
] | josenavasmolina@gmail.com |
a61f8c7141e4d1f4ac5757a1ba565e2c14914c9f | c2a03f1cdc338c9078534d8eb2213b214a251e73 | /Pollapp/views.py | 356b37b17d72c90fd4396def27f7776d9614820c | [] | no_license | risification/onlinde_test | 9a2db6a945734cc74ee8bc8408ac0ce39fa9d3b3 | 3e1e7e5aca4fa59db08f6394c85ce00652c0871b | refs/heads/master | 2023-03-14T08:24:53.574738 | 2021-03-05T17:22:08 | 2021-03-05T17:22:08 | 344,850,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,225 | py | from django.shortcuts import render
from .models import Poll, Question
from .forms import AnswerForm
from django.http import HttpResponse
# Create your views here.
def poll_page(request):
poll = Poll.objects.all()
return render(request, 'Poll/poll.html', {'poll': poll})
def question_page(request, id_poll):
poll = Poll.objects.get(id=id_poll)
question = poll.question_set.all()
return render(request, 'Poll/question.html', {'questions': question})
def choice_answer_page(request, id_question):
question = Question.objects.get(id=id_question)
choice = question.choiceansw_set.all()
form = AnswerForm(initial={'question': question})
if request.method == 'POST':
form = AnswerForm(request.POST)
if form.is_valid():
form.save()
if question.true_answer == form.cleaned_data['answer']:
question.poll.points+= 5
question.poll.save()
return HttpResponse('правильный ответ ')
else:
return HttpResponse('не правильно')
return render(request, 'Poll/choice_answer.html',
{'choices': choice, 'questions': question, 'form': form,})
| [
"sultangaziev01@bk.ru"
] | sultangaziev01@bk.ru |
6c93ea7887c227aa05f4563d5391ae3dc80941a3 | 0eaab1305900d8e70dd746d676126d1667d9c314 | /winregrc/collector.py | 00cd86f31d6a14c554b9132fd0852c63cb4f0d42 | [
"Apache-2.0"
] | permissive | scudette/winreg-kb | 89ffc7f63c2630b266bed41d1c66dff64fd1d32d | f81b8bcaef8365d0c52bf3c87af2bccb4274bece | refs/heads/master | 2020-06-08T20:51:37.427445 | 2019-06-14T06:47:16 | 2019-06-14T06:47:16 | 193,304,780 | 1 | 0 | null | 2019-06-23T04:07:02 | 2019-06-23T04:07:02 | null | UTF-8 | Python | false | false | 3,913 | py | # -*- coding: utf-8 -*-
"""Windows volume collector."""
from __future__ import unicode_literals
from dfvfs.helpers import volume_scanner as dfvfs_volume_scanner
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as dfvfs_path_spec_factory
from dfvfs.resolver import resolver as dfvfs_resolver
from dfwinreg import interface as dfwinreg_interface
from dfwinreg import regf as dfwinreg_regf
from dfwinreg import registry as dfwinreg_registry
class CollectorRegistryFileReader(dfwinreg_interface.WinRegistryFileReader):
"""Collector-based Windows Registry file reader."""
def __init__(self, volume_scanner):
"""Initializes a Windows Registry file reader object.
Args:
volume_scanner (dfvfs.WindowsVolumeScanner): Windows volume scanner.
"""
super(CollectorRegistryFileReader, self).__init__()
self._volume_scanner = volume_scanner
def Open(self, path, ascii_codepage='cp1252'):
"""Opens the Windows Registry file specified by the path.
Args:
path (str): path of the Windows Registry file. The path is a Windows
path relative to the root of the file system that contains the
specific Windows Registry file, such as:
C:\\Windows\\System32\\config\\SYSTEM
ascii_codepage (Optional[str]): ASCII string codepage.
Returns:
WinRegistryFile: Windows Registry file or None the file does not exist or
cannot be opened.
"""
file_object = self._volume_scanner.OpenFile(path)
if file_object is None:
return None
registry_file = dfwinreg_regf.REGFWinRegistryFile(
ascii_codepage=ascii_codepage)
try:
registry_file.Open(file_object)
except IOError:
file_object.close()
return None
return registry_file
class WindowsRegistryCollector(dfvfs_volume_scanner.WindowsVolumeScanner):
"""Windows Registry collector.
Attributes:
registry (dfwinreg.WinRegistry): Windows Registry.
"""
def __init__(self, mediator=None):
"""Initializes a Windows Registry collector.
Args:
mediator (Optional[dfvfs.VolumeScannerMediator]): a volume scanner
mediator.
"""
super(WindowsRegistryCollector, self).__init__(mediator=mediator)
self._single_file = False
registry_file_reader = CollectorRegistryFileReader(self)
self.registry = dfwinreg_registry.WinRegistry(
registry_file_reader=registry_file_reader)
def IsSingleFileRegistry(self):
"""Determines if the Registry consists of a single file.
Returns:
bool: True if the Registry consists of a single file.
"""
return self._single_file
def OpenFile(self, windows_path):
"""Opens the file specified by the Windows path.
Args:
windows_path (str): Windows path to the file.
Returns:
dfvfs.FileIO: file-like object or None if the file does not exist.
"""
if not self._single_file:
return super(WindowsRegistryCollector, self).OpenFile(windows_path)
# TODO: check name of single file.
path_spec = dfvfs_path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=self._source_path)
if path_spec is None:
return None
return dfvfs_resolver.Resolver.OpenFileObject(path_spec)
def ScanForWindowsVolume(self, source_path):
"""Scans for a Windows volume.
Args:
source_path (str): source path.
Returns:
bool: True if a Windows volume was found.
Raises:
ScannerError: if the source path does not exists, or if the source path
is not a file or directory, or if the format of or within
the source file is not supported.
"""
result = super(WindowsRegistryCollector, self).ScanForWindowsVolume(
source_path)
if self._source_type == dfvfs_definitions.SOURCE_TYPE_FILE:
self._single_file = True
return True
return result
| [
"joachim.metz@gmail.com"
] | joachim.metz@gmail.com |
a6e3e7723b1c6314f82564eb711a6401b7bd795b | e7af0c1de1185bdda5ff43669ca465e828332581 | /public/fit.py | ffb1a4f76c2b4db8583c2b0b60173fb8995105b8 | [
"MIT"
] | permissive | KoshikawaShinya/ppwa | 511f7dbe9818039bafb8cce4b469b3c3f7349423 | b5278a9775ee12d1621021bebdcae2b271474958 | refs/heads/master | 2022-12-09T15:51:52.956493 | 2020-08-19T10:41:03 | 2020-08-19T10:41:03 | 287,700,134 | 0 | 0 | null | 2020-08-15T07:33:45 | 2020-08-15T07:33:44 | null | UTF-8 | Python | false | false | 1,854 | py | import glob
import os
import cv2
import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from tensorflow.python import keras as K
img_size = 300
img_num = 2
with open('count.pickle', 'rb') as f:
count = pickle.load(f)
label = 0
datas = []
labels = []
# 画像をテンソル化
for fold_path in glob.glob('../storage/app/public/photo_images/resized/*'):
imgs = glob.glob(fold_path + '/*')
for img_path in imgs:
img = cv2.imread(img_path)
datas.append(img)
labels.append(label)
label += 1
image_datas = np.array(datas)
image_labels = np.array(labels)
x_train, x_test, y_train, y_test = train_test_split(image_datas, image_labels)
x_train = x_train / 255
x_test = x_test / 255
model = K.Sequential([
# K.layers.Conv2D(フィルタの枚数, フィルタサイズ(a,a), インプットの形, 活性化関数)
K.layers.Conv2D(32, kernel_size=(3, 3), strides=1, input_shape=(img_size, img_size, 3), activation="relu"),
K.layers.MaxPooling2D(pool_size=(2,2)),
K.layers.Conv2D(64, (3, 3), strides=1, activation="relu"),
K.layers.MaxPooling2D(pool_size=(2,2)),
K.layers.Conv2D(64, (3, 3), strides=1, activation="relu"),
# 一次元のベクトルに変換
K.layers.Flatten(),
K.layers.Dense(64, activation="relu"),
K.layers.Dense(img_num, activation="softmax")
])
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
model.fit(x_train, y_train, epochs=5)
predicts = model.predict(x_test)
predicts = np.argmax(predicts, axis=1)
print(classification_report(y_test, predicts))
count += 1
model.save('saved_model/PredictFruit_' + str(count) + '.h5')
with open('count.pickle', 'wb') as f:
pickle.dump(count, f) | [
"satowimp0319@gmail.com"
] | satowimp0319@gmail.com |
56618462619a5fbdf553629f377f72a79e0f0732 | 07ec5a0b3ba5e70a9e0fb65172ea6b13ef4115b8 | /lib/python3.6/site-packages/pip/_vendor/requests/packages/urllib3/connectionpool.py | 944e403f7d222fde30692a4be7ee8280576fe50b | [] | no_license | cronos91/ML-exercise | 39c5cd7f94bb90c57450f9a85d40c2f014900ea4 | 3b7afeeb6a7c87384049a9b87cac1fe4c294e415 | refs/heads/master | 2021-05-09T22:02:55.131977 | 2017-12-14T13:50:44 | 2017-12-14T13:50:44 | 118,736,043 | 0 | 0 | null | 2018-01-24T08:30:23 | 2018-01-24T08:30:22 | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:7e5b35f67d58e239f0381b19cfff45d358b4f311f68195c4232c8399677e98a5
size 33591
| [
"seokinj@jangseog-in-ui-MacBook-Pro.local"
] | seokinj@jangseog-in-ui-MacBook-Pro.local |
218858f59480e0196ac56ea89486303734741c50 | c75ec82316ed5322c5844912ce9c528c24360b9f | /nsd1903/python01/day04/randpass.py | 42d359e1d3b8ae650b80e350b9b857d65d06d6d2 | [] | no_license | MrZhangzhg/nsd2019 | a94cde22f2e4bd648bb9e56ca63827f558f3c083 | 54f6d2c7b348a69f13ad5f38f2fbdc8207528749 | refs/heads/master | 2021-08-22T17:38:27.697675 | 2020-02-22T08:36:21 | 2020-02-22T08:36:21 | 183,539,489 | 21 | 24 | null | 2020-05-17T12:07:55 | 2019-04-26T02:06:16 | HTML | UTF-8 | Python | false | false | 191 | py | from random import choice
all_chs = '1234567890qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM'
result = ''
for i in range(8):
ch = choice(all_chs)
result += ch
print(result)
| [
"zhangzg@tedu.cn"
] | zhangzg@tedu.cn |
1838ac019d3d1931bbc379fca3daf7b5a624bd1c | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/request/AlipayMerchantComplainReconciliationQueryRequest.py | 9ca5de878179b1a08db1e17407bb49c9a9377f13 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 4,045 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayMerchantComplainReconciliationQueryModel import AlipayMerchantComplainReconciliationQueryModel
class AlipayMerchantComplainReconciliationQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayMerchantComplainReconciliationQueryModel):
self._biz_content = value
else:
self._biz_content = AlipayMerchantComplainReconciliationQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.merchant.complain.reconciliation.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"jishupei.jsp@alibaba-inc.com"
] | jishupei.jsp@alibaba-inc.com |
786edfe6db7cbf98fc8b5771ee273030a79fe00a | 79e19819aec49b500825f82a7de149eb6a0ba81d | /leetcode/27.py | 2d8bf84dbb95a493a05cbdcf8d0945cb7dd76212 | [] | no_license | seoyeonhwng/algorithm | 635e5dc4a2e9e1c50dc0c75d9a2a334110bb8e26 | 90406ee75de69996e666ea505ff5d9045c2ad941 | refs/heads/master | 2023-05-03T16:51:48.454619 | 2021-05-26T00:54:40 | 2021-05-26T00:54:40 | 297,548,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | # 삭제할 원소를 맨 뒤로 몰아넣고, 배열의 크기를 줄인다!
class Solution:
def removeElement(self, nums: List[int], val: int) -> int:
i = 0
n = len(nums)
while i < n:
if nums[i] == val:
nums[i] = nums[n - 1]
n -= 1
else:
i += 1
return n | [
"seoyeon@nowbusking.com"
] | seoyeon@nowbusking.com |
aa6356be04c2d65cffc046602f1a25268f5b78cf | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_supply.py | 3f1e672b7dd59f95b8e9b7d5379b7b0f045b0c6b | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py |
#calss header
class _SUPPLY():
def __init__(self,):
self.name = "SUPPLY"
self.definitions = [u'to provide something that is wanted or needed, often in large quantities and over a long period of time: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
663771fc38f04f61b54985175f6ebf9fecc4190c | 35a398d96c8433eeb8d807f155504e072a098a04 | /hwilib/devices/trezorlib/log.py | 50f778a12dabeb0f0459e24400dbadbc90bc2acd | [
"MIT"
] | permissive | Sjors/HWI | a98ea8dcd8655fc65d8a4225e1c0bf09462525d2 | b3b9f8818d9a851e9a88368f83de77ce504c522c | refs/heads/master | 2021-11-29T04:08:26.538699 | 2021-01-29T03:13:44 | 2021-01-29T03:14:02 | 148,819,675 | 2 | 0 | MIT | 2018-09-14T17:15:17 | 2018-09-14T17:15:17 | null | UTF-8 | Python | false | false | 1,799 | py | # This file is part of the Trezor project.
#
# Copyright (C) 2012-2018 SatoshiLabs and contributors
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the License along with this library.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
import logging
from typing import Optional, Set, Type
from . import protobuf
OMITTED_MESSAGES = set() # type: Set[Type[protobuf.MessageType]]
class PrettyProtobufFormatter(logging.Formatter):
def format(self, record: logging.LogRecord) -> str:
time = self.formatTime(record)
message = "[{time}] {source} {level}: {msg}".format(
time=time,
level=record.levelname.upper(),
source=record.name,
msg=super().format(record),
)
if hasattr(record, "protobuf"):
if type(record.protobuf) in OMITTED_MESSAGES:
message += " ({} bytes)".format(record.protobuf.ByteSize())
else:
message += "\n" + protobuf.format_message(record.protobuf)
return message
def enable_debug_output(handler: Optional[logging.Handler] = None):
if handler is None:
handler = logging.StreamHandler()
formatter = PrettyProtobufFormatter()
handler.setFormatter(formatter)
logger = logging.getLogger("trezorlib")
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
| [
"achow101-github@achow101.com"
] | achow101-github@achow101.com |
ed9f248c4a3a90143c4c4a9fa970e44a0d7b69c9 | a4ea525e226d6c401fdb87a6e9adfdc5d07e6020 | /src/azure-cli/azure/cli/command_modules/network/azure_stack/profile_2019_03_01_hybrid/operations/_util.py | 7f0f515e93c51f2802407cbe7f176fe90553fb6c | [
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] | permissive | Azure/azure-cli | 13340eeca2e288e66e84d393fa1c8a93d46c8686 | a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca | refs/heads/dev | 2023-08-17T06:25:37.431463 | 2023-08-17T06:00:10 | 2023-08-17T06:00:10 | 51,040,886 | 4,018 | 3,310 | MIT | 2023-09-14T11:11:05 | 2016-02-04T00:21:51 | Python | UTF-8 | Python | false | false | 592 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import importlib
def import_aaz_by_profile(module_name):
# use aaz in 2018-03-01-hybrid profile, because apis are the some.
return importlib.import_module(f"azure.cli.command_modules.network.aaz.profile_2018_03_01_hybrid.{module_name}")
| [
"noreply@github.com"
] | Azure.noreply@github.com |
cedc36ff24c7551d29d2d758e7ec05c63bdeb54f | 860c31e414c4c280b70ec0872042d715a2d56978 | /torch_ecg/augmenters/random_flip.py | 8e568a5e53279bd331a03438f99ca8d76c73c9f6 | [
"MIT"
] | permissive | DeepPSP/torch_ecg | 255e49ff436e13044a1f049141f982680e56970e | a40c65f4fefa83ba7d3d184072a4c05627b7e226 | refs/heads/master | 2023-09-01T06:47:17.153216 | 2023-08-31T18:00:47 | 2023-08-31T18:00:47 | 298,482,237 | 111 | 16 | MIT | 2023-08-21T11:25:07 | 2020-09-25T06:03:17 | Python | UTF-8 | Python | false | false | 3,705 | py | """
"""
from numbers import Real
from typing import Any, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from torch import Tensor
from .base import Augmenter
__all__ = [
"RandomFlip",
]
class RandomFlip(Augmenter):
"""Randomly flip the ECGs along the voltage axis.
Parameters
----------
fs : int, optional
Sampling frequency of the ECGs to be augmented
per_channel : bool, default True
Whether to flip each channel independently.
prob : float or Sequence[float], default ``[0.4, 0.2]``
Probability of performing flip,
the first probality is for the batch dimension,
the second probability is for the lead dimension.
inplace : bool, default True
If True, ECG signal tensors will be modified inplace.
kwargs : dict, optional
Additional keyword arguments.
Examples
--------
.. code-block:: python
rf = RandomFlip()
sig = torch.randn(32, 12, 5000)
sig, _ = rf(sig, None)
"""
__name__ = "RandomFlip"
def __init__(
self,
fs: Optional[int] = None,
per_channel: bool = True,
prob: Union[Sequence[float], float] = [0.4, 0.2],
inplace: bool = True,
**kwargs: Any
) -> None:
super().__init__()
self.fs = fs
self.per_channel = per_channel
self.inplace = inplace
self.prob = prob
if isinstance(self.prob, Real):
self.prob = np.array([self.prob, self.prob])
else:
self.prob = np.array(self.prob)
assert (self.prob >= 0).all() and (
self.prob <= 1
).all(), "Probability must be between 0 and 1"
def forward(
self,
sig: Tensor,
label: Optional[Tensor],
*extra_tensors: Sequence[Tensor],
**kwargs: Any
) -> Tuple[Tensor, ...]:
"""Forward function of the RandomFlip augmenter.
Parameters
----------
sig : torch.Tensor
The ECGs to be augmented, of shape ``(batch, lead, siglen)``.
label : torch.Tensor, optional
Label tensor of the ECGs.
Not used, but kept for consistency with other augmenters.
extra_tensors : Sequence[torch.Tensor], optional
Not used, but kept for consistency with other augmenters.
kwargs : dict, optional
Additional keyword arguments.
Not used, but kept for consistency with other augmenters.
Returns
-------
sig : torch.Tensor
The augmented ECGs.
label : torch.Tensor
The label tensor of the augmented ECGs, unchanged.
extra_tensors : Sequence[torch.Tensor], optional
Unchanged extra tensors.
"""
batch, lead, siglen = sig.shape
if not self.inplace:
sig = sig.clone()
if self.prob[0] == 0:
return (sig, label, *extra_tensors)
if self.per_channel:
flip = torch.ones((batch, lead, 1), dtype=sig.dtype, device=sig.device)
for i in self.get_indices(prob=self.prob[0], pop_size=batch):
flip[i, self.get_indices(prob=self.prob[1], pop_size=lead), ...] = -1
sig = sig.mul_(flip)
else:
flip = torch.ones((batch, 1, 1), dtype=sig.dtype, device=sig.device)
flip[self.get_indices(prob=self.prob[0], pop_size=batch), ...] = -1
sig = sig.mul_(flip)
return (sig, label, *extra_tensors)
def extra_repr_keys(self) -> List[str]:
return [
"per_channel",
"prob",
"inplace",
] + super().extra_repr_keys()
| [
"wenh06@gmail.com"
] | wenh06@gmail.com |
02477477bced69dfa14b2f606809e5fa1938461d | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-apig/huaweicloudsdkapig/v2/model/update_backend_instances_v2_request.py | 474669c5d89f50b1eda068f6274602635a9fff48 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 5,109 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class UpdateBackendInstancesV2Request:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'vpc_channel_id': 'str',
'body': 'VpcMemberModify'
}
attribute_map = {
'instance_id': 'instance_id',
'vpc_channel_id': 'vpc_channel_id',
'body': 'body'
}
def __init__(self, instance_id=None, vpc_channel_id=None, body=None):
"""UpdateBackendInstancesV2Request
The model defined in huaweicloud sdk
:param instance_id: 实例ID,在API网关控制台的“实例信息”中获取。
:type instance_id: str
:param vpc_channel_id: VPC通道的编号
:type vpc_channel_id: str
:param body: Body of the UpdateBackendInstancesV2Request
:type body: :class:`huaweicloudsdkapig.v2.VpcMemberModify`
"""
self._instance_id = None
self._vpc_channel_id = None
self._body = None
self.discriminator = None
self.instance_id = instance_id
self.vpc_channel_id = vpc_channel_id
if body is not None:
self.body = body
@property
def instance_id(self):
"""Gets the instance_id of this UpdateBackendInstancesV2Request.
实例ID,在API网关控制台的“实例信息”中获取。
:return: The instance_id of this UpdateBackendInstancesV2Request.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this UpdateBackendInstancesV2Request.
实例ID,在API网关控制台的“实例信息”中获取。
:param instance_id: The instance_id of this UpdateBackendInstancesV2Request.
:type instance_id: str
"""
self._instance_id = instance_id
@property
def vpc_channel_id(self):
"""Gets the vpc_channel_id of this UpdateBackendInstancesV2Request.
VPC通道的编号
:return: The vpc_channel_id of this UpdateBackendInstancesV2Request.
:rtype: str
"""
return self._vpc_channel_id
@vpc_channel_id.setter
def vpc_channel_id(self, vpc_channel_id):
"""Sets the vpc_channel_id of this UpdateBackendInstancesV2Request.
VPC通道的编号
:param vpc_channel_id: The vpc_channel_id of this UpdateBackendInstancesV2Request.
:type vpc_channel_id: str
"""
self._vpc_channel_id = vpc_channel_id
@property
def body(self):
"""Gets the body of this UpdateBackendInstancesV2Request.
:return: The body of this UpdateBackendInstancesV2Request.
:rtype: :class:`huaweicloudsdkapig.v2.VpcMemberModify`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this UpdateBackendInstancesV2Request.
:param body: The body of this UpdateBackendInstancesV2Request.
:type body: :class:`huaweicloudsdkapig.v2.VpcMemberModify`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateBackendInstancesV2Request):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
269a453e77012b0ffb441e1a0f07293fec6eb70c | afc85d75ac2115f33a00d535a0f08104571a1e4a | /Ex87.py | 404c5a22a285e0a880bf24c609529823cf1c801d | [] | no_license | BrunoCerberus/Algoritmo | aa80651920705a88248fa32d700555672964dae4 | 8fad13cf936eb7120f26a699bca4a8ad76d1a53f | refs/heads/master | 2016-09-08T01:23:09.210110 | 2015-07-08T06:39:52 | 2015-07-08T06:39:52 | 29,315,141 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | """
Programa que calcula o total de elementos separadamente
por linha, e os exibe.
"""
a = [[1,2],[3,4,5],[6,7]]
total1, total2, total3 = 0,0,0
# percorre os elementos da primeira liha
for column in range(len(a[0])):
total1 += a[0][column]
# percorre os elementos da segunda linha
for column in range(len(a[1])):
total2 += a[1][column]
# percorre os elementos da terceira linha
for column in range(len(a[2])):
total3 += a[2][column]
print("Total da primeira linha e",total1)
print("Total da segunda linha e",total2)
print("Total da terceira linha e",total3)
| [
"bruno.guitarpro@gmail.com"
] | bruno.guitarpro@gmail.com |
a8a774633139c03938895ba6eb2851db9c02b02f | 495531870c08ea3495bb45393b05f907366f052e | /x7-src/dashboard/steer/steer/dashboards/engine/instances_and_volumes/tests.py | 7aca763ebbbf750b2f4f736e1d46d747c87480ad | [
"Apache-2.0"
] | permissive | wendy-king/x7_venv | 5fcb326cf3ecaa26d3b839af743b027d23af29e0 | d8266c1dc474935c54126ce36d1a6410a7e452f5 | refs/heads/master | 2021-01-01T06:33:24.605851 | 2012-01-19T15:54:44 | 2012-01-19T15:54:44 | 3,209,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,638 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from django import http
from django.contrib import messages
from django.core.urlresolvers import reverse
from mox import IsA, IgnoreArg
from x7x.api import exceptions as api_exceptions
from steer import api
from steer import test
class InstancesAndVolumesViewTest(test.BaseViewTests):
def setUp(self):
super(InstancesAndVolumesViewTest, self).setUp()
server = api.Server(None, self.request)
server.id = 1
server.name = 'serverName'
volume = api.Volume(self.request)
volume.id = 1
self.servers = (server,)
self.volumes = (volume,)
def test_index(self):
self.mox.StubOutWithMock(api, 'server_list')
self.mox.StubOutWithMock(api, 'volume_list')
api.server_list(IsA(http.HttpRequest)).AndReturn(self.servers)
api.volume_list(IsA(http.HttpRequest)).AndReturn(self.volumes)
self.mox.ReplayAll()
res = self.client.get(
reverse('steer:engine:instances_and_volumes:index'))
self.assertTemplateUsed(res,
'engine/instances_and_volumes/index.html')
self.assertItemsEqual(res.context['instances'], self.servers)
def test_index_server_list_exception(self):
self.mox.StubOutWithMock(api, 'server_list')
self.mox.StubOutWithMock(api, 'volume_list')
exception = api_exceptions.ApiException('apiException')
api.server_list(IsA(http.HttpRequest)).AndRaise(exception)
api.volume_list(IsA(http.HttpRequest)).AndReturn(self.volumes)
self.mox.ReplayAll()
res = self.client.get(
reverse('steer:engine:instances_and_volumes:index'))
self.assertTemplateUsed(res,
'engine/instances_and_volumes/index.html')
self.assertEqual(len(res.context['instances']), 0)
| [
"king_wendy@sina.com"
] | king_wendy@sina.com |
e280b5a3a149a8222d07e449c825e541ea713970 | 3058fa7653137ea32b552d800377e19927dbc86b | /Subject4_Science/E3_Sci_StreetView/3_StreetView_Sci_CNN.py | 4ae55feeff5676dcf1e76772514b879b58452f22 | [] | no_license | heatherbaier/schoolCNN | 9b1a3301b8e49294f298c384e9f69fe25c1bf4eb | df1120d07b37881df801a2a828fc7715b1ea74af | refs/heads/master | 2020-12-09T04:31:40.800407 | 2020-01-27T08:24:40 | 2020-01-27T08:24:40 | 232,407,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,460 | py | from __future__ import print_function, division
from torchvision import datasets, models, transforms
from imgaug import parameters as iap
from imgaug import augmenters as iaa
from torch.optim import lr_scheduler
import matplotlib.pyplot as plt
import torch.optim as optim
import torch.nn as nn
import imgaug as ia
import numpy as np
import torchvision
import pickle
import joblib
import torch
import copy
import time
import os
plt.ion() # interactive mode
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
epoch_num = 0
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
# Save each epoch that achieves a higher accuracy than the current best_acc in case the model crashes mid-training
model_name = './clean/Subject4_Science/E3_Sci_StreetView/epochs/StreetViewResNeXt101_Sci_Epoch' + str(epoch_num) + '.sav'
pickle.dump(model, open(model_name, 'wb'))
epoch_num += 1
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
def visualize_model(model, num_images=6):
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure()
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders['val']):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
for j in range(inputs.size()[0]):
images_so_far += 1
ax = plt.subplot(num_images//2, 2, images_so_far)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
imshow(inputs.cpu().data[j])
if images_so_far == num_images:
model.train(mode=was_training)
return
model.train(mode=was_training)
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
#### TRANSFORM DATA ####
class ImgAugTransform:
def __init__(self):
self.aug = iaa.Sequential([
iaa.Scale((224, 224)),
iaa.Sometimes(0.30, iaa.GaussianBlur(sigma=(0, 3.0))),
iaa.Sometimes(0.25, iaa.Multiply((0.5, 1.5), per_channel=0.5)),
iaa.Sometimes(0.20, iaa.Invert(0.25, per_channel=0.5)),
iaa.Sometimes(0.25, iaa.ReplaceElementwise(
iap.FromLowerResolution(iap.Binomial(0.1), size_px=8),
iap.Normal(128, 0.4*128),
per_channel=0.5)
),
iaa.Sometimes(0.30, iaa.AdditivePoissonNoise(40)),
iaa.Fliplr(0.5),
iaa.Affine(rotate=(-20, 20), mode='symmetric'),
iaa.Sometimes(0.30,
iaa.OneOf([iaa.Dropout(p=(0, 0.1)),
iaa.CoarseDropout(0.1, size_percent=0.5)])),
iaa.AddToHueAndSaturation(value=(-10, 10), per_channel=True)
])
def __call__(self, img):
img = np.array(img)
return self.aug.augment_image(img)
data_transforms = {
'train': transforms.Compose([
ImgAugTransform(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
ImgAugTransform(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = './clean/Subject4_Science/E3_Sci_StreetView/data/'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=100,
shuffle=True, num_workers=0)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[class_names[x] for x in classes])
model_ft = models.resnext101_32x8d(pretrained=True)
num_ftrs = model_ft.fc.in_features
# Here the size of each output sample is set to 2.
# Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)).
model_ft.fc = nn.Linear(num_ftrs, 2)
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=50)
visualize_model(model_ft)
final_model_name = './clean/Subject4_Science/E3_Sci_StreetView/models/StreetViewResNeXt101_Sci_10epoch.sav'
pickle.dump(model_ft, open(final_model_name, 'wb'))
| [
"hmbaier@email.wm.edu"
] | hmbaier@email.wm.edu |
50f7434229ae157f44b1726a7e098f852970cab7 | 78f43f8bd07ae0fc91738a63cd7bbca08ae26066 | /leetcode/twopointer/two_sum_ii_input_array_is_sorted_twopointer.py | d3fbe0fe52b8997b0ce0589ddcc703213e1ff0cf | [] | no_license | hanrick2000/LeetcodePy | 2f3a841f696005e8f0bf4cd33fe586f97173731f | b24fb0e7403606127d26f91ff86ddf8d2b071318 | refs/heads/master | 2022-04-14T01:34:05.044542 | 2020-04-12T06:11:29 | 2020-04-12T06:11:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | from typing import List
class Solution:
def twoSum(self, numbers: List[int], target: int) -> List[int]:
L = len(numbers)
p1, p2 = 0, L-1
if L <= 1: return []
while p1 < p2:
diff = target - numbers[p1]
if diff < numbers[p2]:
p2 -= 1
elif diff > numbers[p2]:
p1 += 1
else:
return [p1+1,p2+1]
return []
| [
"dofu@ebay.com"
] | dofu@ebay.com |
14f4eccc41465e450e48c6dc0efbf946375796c6 | 50f15c1d9bdd580dcd7d082d723a7f78aae696de | /ave/components/requirements.py | b859c2eab7dc231c498eb07a62aa651b0db0f099 | [
"MIT"
] | permissive | AVEgame/AVE | 78107942056eafd0dced09f3927d45b092727984 | d9292af865ddebe125ede1aabba9fb360bfa03f7 | refs/heads/master | 2021-01-22T07:07:22.164595 | 2020-07-24T13:40:16 | 2020-07-24T13:40:16 | 29,246,001 | 3 | 0 | MIT | 2020-07-24T13:40:18 | 2015-01-14T13:43:17 | Python | UTF-8 | Python | false | false | 3,483 | py | """Conditions that are required to show a line, option or item name."""
from .numbers import Constant
class Requirement:
"""A base requirement."""
def has(self, character):
"""Check if the character satisifies this."""
raise NotImplementedError()
def get_all(self):
"""Get all items involved in this requirement."""
raise NotImplementedError()
class RequiredItem(Requirement):
"""The character must have an item."""
def __init__(self, item):
"""Make the requirement."""
self.item = item
def has(self, character):
"""Check if the character satisifies this."""
return character.has(self.item)
def get_all(self):
"""Get all items involved in this requirement."""
return [self.item]
class RequiredNumber(Requirement):
"""A numerical variable must satisfy a condition."""
def __init__(self, v1, sign=">", v2=Constant(0)):
"""Make the requirement."""
self.v1 = v1
self.sign = sign
self.v2 = v2
def has(self, character):
"""Check if the character satisifies this."""
v1 = self.v1.get_value(character)
v2 = self.v2.get_value(character)
if self.sign == ">":
return v1 > v2
if self.sign == "<":
return v1 < v2
if self.sign == ">=":
return v1 >= v2
if self.sign == "<=":
return v1 <= v2
if self.sign == "=" or self.sign == "==":
return v1 == v2
def get_all(self):
"""Get all items involved in this requirement."""
return self.v1.get_all_variables() + self.v1.get_all_variables()
class Or(Requirement):
"""One of a set of Requirements must be satisfied."""
def __init__(self, *items):
"""Make the requirement."""
self.items = items
def has(self, character):
"""Check if the character satisifies this."""
for i in self.items:
if i.has(character):
return True
return False
def get_all(self):
"""Get all items involved in this requirement."""
out = []
for i in self.items:
out += i.get_all()
return out
class And(Requirement):
"""A set of Requirements must all be satisfied."""
def __init__(self, *items):
"""Make the requirement."""
self.items = items
def has(self, character):
"""Check if the character satisifies this."""
for i in self.items:
if not i.has(character):
return False
return True
def get_all(self):
"""Get all items involved in this requirement."""
out = []
for i in self.items:
out += i.get_all()
return out
class Not(Requirement):
"""The negation of another Requirement."""
def __init__(self, item):
"""Make the requirement."""
self.item = item
def has(self, character):
"""Check if the character satisifies this."""
return not self.item.has(character)
def get_all(self):
"""Get all items involved in this requirement."""
return self.item.get_all()
class Satisfied(Requirement):
"""This requirement is always satisfied."""
def has(self, character):
"""Check if the character satisifies this."""
return True
def get_all(self):
"""Get all items involved in this requirement."""
return []
| [
"matthew.w.scroggs@gmail.com"
] | matthew.w.scroggs@gmail.com |
7975bc63573e9c2bae7ca54b0b9772152873656a | 12ecd25d75023b7269e9eb103c5cab01b9798859 | /questions/migrations/0001_initial.py | 3fb589ac7a7434f86b4ca8c86b18895480b46ae1 | [
"MIT"
] | permissive | alexyvassili/otuspy-hasker | dfe8881cc6d150dd98298cd308af19bc3d06a068 | b094401fd863eab8006c6d4a92d2c08efb815f95 | refs/heads/master | 2023-02-17T18:28:00.002133 | 2022-06-17T21:10:49 | 2022-06-17T21:10:49 | 133,680,455 | 0 | 1 | MIT | 2023-02-15T18:45:59 | 2018-05-16T14:44:23 | JavaScript | UTF-8 | Python | false | false | 781 | py | # Generated by Django 2.0.5 on 2018-05-23 09:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('avatar', models.ImageField(default='users/avatar.jpg', upload_to='users/')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"escantor@gmail.com"
] | escantor@gmail.com |
f2e2e84612c442fb0b08b491827aeed5390ab984 | bc539788b876773e294383863252c1637de9eb7f | /scrapy/PycharmProjects/Reptile/ven/6-python发送邮件-测试.py | cb5c6ed0e152de7335e4a7db2b59cd04278b645c | [] | no_license | umsung/scrapy | 4eb56bf74f3e617e49dcdec61cf77010eb912f4f | deacd9f289159c5af114b0dd3110448ad7eb43e8 | refs/heads/master | 2020-05-31T14:11:46.530793 | 2019-10-16T01:32:25 | 2019-10-16T01:32:25 | 190,321,772 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,812 | py | import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
def send_mail(subject='subject', content='content'):
# mail server
mail_host = 'smtp.sina.com'
mail_port = 25
mail_user = '' # 账号
mail_pwd = '' # 密码
# mail message
# email模块 负责构造邮件
# 三个参数:第一个为邮件正文文本内容,第二个 plain 设置文本格式,第三个 utf-8 设置编码
message = MIMEText(content, 'plain', 'utf-8')
message['Subject'] = subject # 邮件的主题
message['From'] = '' # 发送者 填写发送邮箱者地址
message['To'] = '' # 接收者 填写接收邮箱者地址, #收件人为多个收件人,通过join将列表转换为以;为间隔的字符串
# send mail
# smtplib模块负责发送邮件,是一个发送邮件的动作
try:
# 实例化SMTP()
smtp_obj = smtplib.SMTP()
# 实例化SMTP(),连接邮箱服务器, mail_host是邮箱服务器地址,mail_port是端口,
# 新浪邮箱:smtp.sina.com,
# 新浪VIP:smtp.vip.sina.com,
# 搜狐邮箱:smtp.sohu.com,
# 126邮箱:smtp.126.com,
# 139邮箱:smtp.139.com,
# 163网易邮箱:smtp.163.com。
smtp_obj.connect(mail_host, mail_port) # SMTP协议默认端口是25
smtp_obj.login(mail_user, mail_pwd)
# as_string()message(MIMEText对象或者MIMEMultipart对象)变为str
smtp_obj.sendmail(message['From'], message['To'], message.as_string())
smtp_obj.quit()
except smtplib.SMTPException as e:
print(e)
s = 'Please study hard'
c = 'My name is Teacher hou, I teach python'
send_mail(subject=s, content=c)
| [
"545699233@qq.com"
] | 545699233@qq.com |
23cac2a90471800dcb06f401d6accba6a9b068b5 | 49a6f4076d287af69834f22b9af0b4c05ad32556 | /docs/conf.py | 6005be4b757601f5a22187dc8949749ba13bfcea | [
"Zlib"
] | permissive | ywg121020/libusb | fcd9597c10a9a857fa4934cb4c94296813a64820 | 0755dc25c60bfb92fa41dfe460c1b9fa638be913 | refs/heads/master | 2020-06-11T00:34:04.840221 | 2019-02-15T13:47:05 | 2019-02-15T13:47:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,320 | py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
import sys
import os
top_dir = os.path.join(os.path.dirname(__file__), os.pardir)
with open(os.path.join(top_dir, "src", "libusb", "__about__.py")) as f:
class about: exec(f.read(), None)
def setup(app):
pass
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = about.__title__
project_ = project.replace('.', '_')
copyright = about.__copyright__
author = about.__author__
# The short X.Y version
version = '{0.major}.{0.minor}'.format(about.__version_info__)
# The full version, including alpha/beta/rc tags
release = about.__version__
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
#'sphinx.ext.todo',
#'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinxdoc' # 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = project_ + '_' + 'doc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, project_ + '.tex', project + ' Documentation',
author, 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, project_.lower(), project + ' Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, project_, project + ' Documentation',
author, project_, about.__summary__,
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
# intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
| [
"akarpierz@gmail.com"
] | akarpierz@gmail.com |
8d5788663ac2a8631c337da7c2251331c5de6e80 | 3fbd26091ebbc13913f9c7be1aaf10d477c79536 | /week01/zuoye/requests_maoyan/.history/maoyan_20200628012930.py | b4d45d5f82cd1ac6af42e71cf24f78dcfed08446 | [] | no_license | shuncon/Python001-class01 | d28faf3d5d8e9ea4cee93bcae7143a26fd8c472e | df19758181cdaf37f30d4b518600fc4612590499 | refs/heads/master | 2022-11-13T19:31:27.019214 | 2020-07-10T14:58:25 | 2020-07-10T14:58:25 | 273,135,541 | 0 | 0 | null | 2020-06-18T03:46:56 | 2020-06-18T03:46:55 | null | UTF-8 | Python | false | false | 1,583 | py | #-*-conding:utf-8 -*-
import requests
import lxml
from bs4 import BeautifulSoup as bfs
headers = {
'user-agent' :'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
'Cookie': '__mta=143555850.1593182389402.1593186588642.1593186951752.13; _lxsdk_cuid=172f111f51562-0c65d9babc0209-3a65460c-1fa400-172f111f516c8; uuid_n_v=v1; uuid=E1D4A130B7BA11EAB4E28FB161D5B82BB28615396FEA473FAA79466FF93A0ADC; _lxsdk=E1D4A130B7BA11EAB4E28FB161D5B82BB28615396FEA473FAA79466FF93A0ADC; mojo-uuid=8bd33533b8dd1a759d17cadf1be0eefb; _lx_utm=utm_source%3DBaidu%26utm_medium%3Dorganic; _csrf=0d6a79576c8af13e864ce4bc16256224b781a4cddb5105facfa01b80cc9314b6; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1593182389,1593186370,1593276349; mojo-session-id={"id":"e7c501439a3b4a3b79e13dd84a3f3791","time":1593276349044}; mojo-trace-id=3; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1593276558; __mta=143555850.1593182389402.1593186951752.1593276557953.14; _lxsdk_s=172f6abba85-f3d-41e-9d2%7C%7C6'
}
myurl = 'https://maoyan.com/films?showType=3'
response = requests.get(myurl,headers=headers)
print(f'返回状态码:{response.status_code}')
selector = lxml.etree.HTML(response.text)
bs_info = bfs(response.text,'html.parser')
# for tags in bs_info.find_all('div', arrts={'class':'movie-item-title'}):
# for ttags in tags.find_all('p' ,arrts={'class' : 'name'}):
# for atag in ttags.find_all('a',):
# print (atag.get('href'))
# print (atag.get('titile'))
for ttitle in bs_info.find_all()('div',arrts={}) | [
"1428834423@qq.com"
] | 1428834423@qq.com |
7073414c50df76a49a5045cbf9c5f48b0a0da33a | 1bc41de1c91561f37284d7de25c01fb1ff0373a4 | /django_core_utils/constants.py | cbbf827350f7f9ca3dce81c4eabbe604652680e6 | [
"MIT"
] | permissive | ajaniv/django-core-utils | 7e36022956849cb0fc5afdc1440fd3413740cf02 | 4ae9fc325f4bc9e3a3e723207d48133f772804c3 | refs/heads/master | 2020-04-04T21:17:45.840269 | 2019-03-11T23:35:31 | 2019-03-11T23:35:31 | 52,732,860 | 1 | 0 | MIT | 2019-03-11T23:35:32 | 2016-02-28T17:12:35 | Python | UTF-8 | Python | false | false | 313 | py | """
.. module:: django_core_utils.constants
:synopsis: django_core_utils constants module.
django_core_utils constants module.
The *constants* module contains public constant definitions.
"""
HTTP_GET = "GET"
HTTP_POST = "POST"
HTTP_DELETE = "DELETE"
HTTP_PUT = "PUT"
SITE_LABEL = "sl"
UNKNOWN = "UNKNOWN"
| [
"amnon.janiv@ondalear.com"
] | amnon.janiv@ondalear.com |
76d55cda48cfc76fb1f21ee550827b7267464301 | 9c315e3762961668a1fe58ad811ae87c5fbf7539 | /apertium-tools/scraper/scrp-azathabar.py | 0f0d7586b0f74692b378218abc3e342939da4b40 | [] | no_license | frankier/apertium | f2b893115c413203b1194e5c0d4feb0adf2b1b3e | d3f5515bf2455f3046314a62ea564457bcf504b8 | refs/heads/gnulib | 2021-01-20T21:00:53.139135 | 2016-05-27T17:30:01 | 2016-05-27T17:30:01 | 59,847,975 | 0 | 1 | null | 2016-07-07T12:39:01 | 2016-05-27T16:21:14 | HTML | UTF-8 | Python | false | false | 5,564 | py | #!/usr/bin/env python3
from datetime import date, timedelta
from urllib import request
#from lxml import etree
import lxml.html
import lxml.html.clean
from scrapers import ScraperAzathabar
from scraper_classes import Source, Writer
import urllib.error
import http.client
import curses
import sys
import signal
domain = "www.azathabar.com"
urltemplate = "/archive/%s/%s%02d%02d/%s/%s.html"
topics = { 2239: ("news", "day"),
2246: ("Turkmenistan", "days"),
2247: ("asia", "days"),
2248: ("international", "days"),
2275: ("special_programs", "days"),
2289: ("Multimedia", "days"),
2756: ("Blogs", "days"),
#2240: ("commentary", "month"),
#2244: ("interview", "month"),
2241: ("politics", "days"),
2242: ("economics", "days"),
2243: ("society", "days"),
#2245: ("culture", "month"),
#2800: ("History", "year"),
#2276: ("newscast", "month"),
#2249: ("analytical", "month"),
2250: ("neighbors", "days"),
#2251: ("style", "month"),
#2252: ("youth", "month"),
#2253: ("economy", "month"),
2254: ("spotlight", "days"),
#2255: ("women", "month"),
#2256: ("rights", "month"),
2257: ("health", "days"),
2258: ("time", "days"),
#2259: ("review", "month"),
#2261: ("music", "month"),
#2263: ("personalities", "month"),
#2984: ("No_Comment", "??"),
3264: ("Voice_of_people", "days"),
}
startyear = 2010
endyear = 2010
minmonth = 1
maxmonth = 12
startnum = 1
def get_urls(monthurl, pagetype): # get the URLS for a given month
global domain
sys.stdout.write("\rGetting %s." % monthurl)
sys.stdout.flush()
conn = http.client.HTTPConnection(domain)
conn.request("GET", monthurl)
res = conn.getresponse()
if res.status != 200:
print(monthurl, res.status, res.reason)
return
contents = res.read().decode('utf-8')
sys.stdout.write(".")
sys.stdout.flush()
doc = lxml.html.fromstring(contents)
mid = doc.find_class("middlePart")[0]
curdate = ""
urls = []
for el in mid.findall(".//li"):
#if "class" in el.attrib:
# if "archive_listrow_date" in el.attrib['class'].split():
# curdate = el.text
#if curdate != "":
if "class" in el.attrib:
classes = el.attrib['class'].split()
if pagetype == "days":
if "archive_listrow_date" in classes:
curdate = el.text
#if "archive_listrow_date" in el.attrib['class'].split():
# curdate = el.text
#if curdate != "":
# elif "zoomMe" in classes and "date" not in classes:
if "zoomMe" in classes and "date" not in classes:
title = None
url = None
for a in el.findall(".//a"):
if "style" not in a.attrib:
title = a.text
url = a.attrib["href"]
if title == None or url == None:
for a in el.findall(".//a"):
title = a.text
url = a.attrib["href"]
#print(lxml.html.tostring(el)) #lxml.html.document_fromstring(lxml.html.clean.clean_html(lxml.html.tostring(el).decode('utf-8'))))
if title != None and url != None:
urls.append((url, title))
#print(url, title)
sys.stdout.write("%s urls" % len(urls))
sys.stdout.write(".\n")
sys.stdout.flush()
conn.close()
return urls
def get_allurls(startyear, endyear, minmonth, maxmonth): # get all urls for given date range
allurls = []
for year in range(startyear, endyear+1):
for month in range(minmonth, maxmonth+1):
for num, (topic, pagetype) in topics.items():
if pagetype=="day":
for day in range(1, 32):
dayurl = urltemplate % (topic, year, month, day, num, num)
urls = get_urls(dayurl, pagetype)
if urls is not None:
for url in urls:
allurls.append(url)
elif pagetype=="days":
dayurl = urltemplate % (topic, year, month, 1, num, num)
urls = get_urls(dayurl, pagetype)
if urls is not None:
for url in urls:
allurls.append(url)
return allurls
def main():
global startyear, endyear, minmonth, maxmonth, domain
sys.stdout.write("\rGenerating urls...\n")
sys.stdout.flush()
allurls = get_allurls(startyear, endyear, minmonth, maxmonth)
sys.stdout.write("\r%d articles total\n" % len(allurls))
conn = http.client.HTTPConnection(domain)
ids = None
root = None
this = 0
w = Writer()
def term_handler(sigNum, frame):
print("\nReceived a SIGTERM signal. Closing the program.")
w.close()
sys.exit(0)
signal.signal(signal.SIGTERM, term_handler)
try:
for (url, title) in allurls:
#sys.stdout.write("\r"+url+" "+title+"\n")
#sys.stdout.flush()
this += 1
try:
source = Source(url, title=title, scraper=ScraperAzathabar, conn=conn)
source.makeRoot("./", ids=ids, root=root, lang="tuk")
msg = "(%s/%s)" % (this, len(allurls))
source.add_to_archive(msg=msg)
if ids is None: # if not ids:
ids = source.ids
if root is None: # if not root:
root = source.root
except Exception as e:
sys.stdout.write(" — %s \n" % e)
sys.stdout.flush()
raise
except KeyboardInterrupt:
print("\nReceived a keyboard interrupt. Closing the program.")
w.close()
conn.close()
def tryOneArticle(url):
global domain
root = None
ids = None
conn = http.client.HTTPConnection(domain)
w = Writer()
source = Source(url, title="", scraper=ScraperAzathabar, conn=conn)
source.makeRoot("./", ids=ids, root=root, lang="tuk")
source.add_to_archive()
w.close()
conn.close()
main()
#tryOneArticle("http://www.azathabar.com/archive/news/20111231/2239/2239.html?id=24439101")
#tryOneArticle("http://www.azathabar.com/content/article/24437444.html")
#tryOneArticle("http://www.azathabar.com/content/article/24425908.html")
#tryOneArticle("http://www.azathabar.com//content/article/2306850.html")
| [
"unhammer@72bbbca6-d526-0410-a7d9-f06f51895060"
] | unhammer@72bbbca6-d526-0410-a7d9-f06f51895060 |
d736a4ccfdcc6273173962b84962e7197e204b18 | 866dee1b3d01b863c31332ec81330d1b5ef5c6fa | /openquake.hazardlib/openquake/hazardlib/tests/gsim/raghukanth_iyengar_2007_test.py | ade25256d8d0e00119510016353fae82d27aa2b9 | [
"MIT",
"AGPL-3.0-only"
] | permissive | rainzhop/ConvNetQuake | 3e2e1a040952bd5d6346905b83f39889c6a2e51a | a3e6de3f7992eac72f1b9883fec36b8c7fdefd48 | refs/heads/master | 2020-08-07T16:41:03.778293 | 2019-11-01T01:49:00 | 2019-11-01T01:49:00 | 213,527,701 | 0 | 0 | MIT | 2019-10-08T02:08:00 | 2019-10-08T02:08:00 | null | UTF-8 | Python | false | false | 4,430 | py | # The Hazard Library
# Copyright (C) 2012-2016 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Module
:mod:`openquake.hazardlib.gsim.raghukanth_iyengar_2007_test`
defines
:class:`RaghukanthIyengar2007TestCase`
:class:`RaghukanthIyengar2007KoynaWarnaTestCase`
:class:`RaghukanthIyengar2007SouthernTestCase`
:class:`RaghukanthIyengar2007WesternCentralTestCase`
for testing of
:class:`openquake.hazardlib.gsim.raghukanth_iyengar_2007.RaghukanthIyengar2007`
and subclasses of same.
"""
import warnings
import numpy as np
from openquake.hazardlib import gsim
from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase
from openquake.hazardlib.gsim.raghukanth_iyengar_2007 import (
RaghukanthIyengar2007,
RaghukanthIyengar2007KoynaWarna,
RaghukanthIyengar2007Southern,
RaghukanthIyengar2007WesternCentral,
)
class RaghukanthIyengar2007TestCase(BaseGSIMTestCase):
"""
Mean value data obtained by digitizing Figure 5 using
http://arohatgi.info/WebPlotDigitizer/app/ .
"""
GSIM_CLASS = RaghukanthIyengar2007
MEAN_FILE = 'RAIY07/RAIY07_PI_MEAN.csv'
SIGMA_FILE = 'RAIY07/RAIY07_PI_STD_TOTAL.csv'
TOL_PERCENT = 11.
def test_mean(self):
"""
Ensure that means match reference dataset.
"""
self.check(self.MEAN_FILE, max_discrep_percentage=self.TOL_PERCENT)
def test_std_total(self):
"""
Ensure that standard deviations match reference dataset.
"""
self.check(self.SIGMA_FILE, max_discrep_percentage=self.TOL_PERCENT)
def test_warning(self):
"""
Warning should be thrown for any vs30 below limit for NEHRP class D.
"""
rctx = gsim.base.RuptureContext()
sctx = gsim.base.SitesContext()
dctx = gsim.base.DistancesContext()
# set reasonable default values
gmpe = self.GSIM_CLASS()
rctx.mag = np.array([6.5])
dctx.rhypo = np.array([100.])
im_type = sorted(gmpe.COEFFS_BEDROCK.sa_coeffs.keys())[0]
std_types = list(gmpe.DEFINED_FOR_STANDARD_DEVIATION_TYPES)
# set critical value to trigger warning
sctx.vs30 = np.array([170.])
with warnings.catch_warnings(record=True) as warning_stream:
warnings.simplefilter('always')
mean = gmpe.get_mean_and_stddevs(
sctx, rctx, dctx, im_type, std_types)[0]
# confirm type and content of warning
assert len(warning_stream) == 1
assert issubclass(warning_stream[-1].category, UserWarning)
assert 'not supported' in str(warning_stream[-1].message).lower()
assert np.all(np.isnan(mean))
class RaghukanthIyengar2007KoynaWarnaTestCase(RaghukanthIyengar2007TestCase):
"""
Mean bedrock motions obtained by digitizing Figure 3 using
http://arohatgi.info/WebPlotDigitizer/app/ .
"""
GSIM_CLASS = RaghukanthIyengar2007KoynaWarna
MEAN_FILE = 'RAIY07/RAIY07_KW_MEAN.csv'
SIGMA_FILE = 'RAIY07/RAIY07_KW_STD_TOTAL.csv'
TOL_PERCENT = 1.5
class RaghukanthIyengar2007SouthernTestCase(RaghukanthIyengar2007TestCase):
"""
Mean bedrock motions obtained by digitizing Figure 3 using
http://arohatgi.info/WebPlotDigitizer/app/ .
"""
GSIM_CLASS = RaghukanthIyengar2007Southern
MEAN_FILE = 'RAIY07/RAIY07_SI_MEAN.csv'
SIGMA_FILE = 'RAIY07/RAIY07_SI_STD_TOTAL.csv'
TOL_PERCENT = 10.
class RaghukanthIyengar2007WesternCentralTestCase(
RaghukanthIyengar2007TestCase):
"""
Mean bedrock motions obtained by digitizing Figure 3 using
http://arohatgi.info/WebPlotDigitizer/app/ .
"""
GSIM_CLASS = RaghukanthIyengar2007WesternCentral
MEAN_FILE = 'RAIY07/RAIY07_WC_MEAN.csv'
SIGMA_FILE = 'RAIY07/RAIY07_WC_STD_TOTAL.csv'
TOL_PERCENT = 2.
| [
"rainzhop@gmail.com"
] | rainzhop@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.