blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
014373df5e1938c8b39fd5fcaacbbd0655dfd64d | 4a53ae5afb11850196ac08763afc637a81ce1dbd | /turbo-entabulator/turbo_entabulator/utilities.py | 2e1a8a52db3511f74cc1a59121cdb99fb9f55ff6 | [] | no_license | xinyli-cumulus/TE_update | 5983309ef66f5316f56298fc3d7b1ef4fb0719d7 | 4e5b9299d3159bbcdd715c33cc08820ad2c8e3fb | refs/heads/master | 2021-05-26T10:49:14.229467 | 2020-04-08T14:10:19 | 2020-04-08T14:10:19 | 254,102,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,936 | py | #!/usr/bin/env python3
"""
Turbo-Entabulator utilities.
This file contains utilities used by the Turbo-Entabulator suite that don't
fall under the 'detections' or 'discovery' categories.
"""
# Copyright(c) 2018, 2019, 2020 Cumulus Networks, Inc
# John Fraizer <jfraizer@cumulusnetworks.com>
import json
import os
import random
import re
import sys
from turbo_entabulator.m_logger import logger
def check_dependencies(funcname, required, satisfied): # noqa
"""
Validate that list 'requirements' is a subset of list 'satisfied'.
:param funcname
:param required
:param satisfied
:return bool
"""
name = sys._getframe().f_code.co_name
logger.debug("This is {}().".format(name))
logger.debug("Checking dependencies: {} for function [{}]."
.format(required, funcname))
if not set(required).issubset(set(satisfied)):
missing = list(set(required).difference(set(satisfied)))
logger.debug("Required dependencies {} for [{}] have not been "
"satisfied!".format(missing, funcname))
return False
else:
logger.debug("Dependencies satisfied.")
return True
def expand_frr_ec(deprecated, satisfied, includes, problems, # noqa
regex_matches):
"""
Try to provide suggestions for ECs from FRR.
:param deprecated:
:param satisfied:
:param includes:
:param problems:
:param regex_matches:
:return:
"""
# Get function name (accesses private sys function, no better way)
name = sys._getframe().f_code.co_name
logger.debug("This is {}().".format(name))
if name in deprecated:
logger.debug("[{}] is deprecated. Skipping".format(name))
return satisfied, problems, {}
reqs = ['detect_log_sigs']
if not check_dependencies(name, reqs, satisfied):
return satisfied, problems, {}
if 'Uncategorized FRR Error' not in regex_matches:
logger.debug("No matches to look up! Skipping".format(name))
return satisfied, problems, {}
# variable initialization not needed
# db = {}
filename = includes + "/frr/ec.json"
if not os.path.isfile(filename):
logger.debug("Could not open {} .".format(filename))
problems.append('* * * TE CONFIG ERROR * * * Could not find {}! '
'Please verify that Turbo-Entabulator '
'is installed properly.'.format(filename))
return satisfied, problems, {}
logger.debug('Reading in {}...'.format(filename))
with open(filename) as fh:
db = json.load(fh)
fh.close()
# Dict to hold suggestions.
suggestions = []
count = 0
for match in regex_matches['Uncategorized FRR Error']:
_, ec = match.split(' ')
count = count + 1
if count > 1:
suggestions.append('-' * 76)
# Does FRR contain the expanded error description?
if ec in db:
suggestions.append(match + ':\t' + db[ec]['title'])
suggestions.append('Description:\t' + db[ec]['description'])
suggestions.append('Suggestion:\t' + db[ec]['suggestion'])
else:
suggestions.append(match + ':\t' + 'Unknown Error Code')
suggestions.append('Description:\t' + 'Not found in FRR error DB')
suggestions.append('Suggestion:\t' + 'Please File bug with FRR '
'team to add detail for ' +
match)
msg = ('FILE-A-BUG: [' + match + '] not found in FRR Error '
'Codes. Please file a bug with '
'FRR team to have error detail '
'added.')
problems.append(msg)
satisfied.append(name)
# Then, return:
return satisfied, problems, suggestions
def find_frr_path(deprecated, satisfied, support_path): # noqa
# Determine the ?.show_running file we need to parse.
name = sys._getframe().f_code.co_name
logger.debug("This is {}().".format(name))
if name in deprecated:
logger.debug("[{}] is deprecated. Skipping".format(name))
return (satisfied, None)
reqs = ['find_support_path']
if not check_dependencies(name, reqs, satisfied):
return (satisfied, None)
frr_files = ['frr.show_running', 'quagga.show_running',
'Quagga.show_running', 'zebra.config']
for F in frr_files:
filename = support_path + F
if os.path.isfile(filename):
logger.debug("Found {} .".format(filename))
satisfied.append(name)
return (satisfied, filename)
logger.debug("Unable to find ?.show_running file to parse FRR data!")
return (satisfied, None)
def find_ifquery_path(deprecated, satisfied, support_path): # noqa
# Determine the ifquery file we need to parse.
name = sys._getframe().f_code.co_name
logger.debug("This is {}().".format(name))
if name in deprecated:
logger.debug("[{}] is deprecated. Skipping".format(name))
return (satisfied, None)
reqs = ['find_support_path']
if not check_dependencies(name, reqs, satisfied):
return (satisfied, None)
ifquery_files = ['ifquery', 'ifquery-a']
for F in ifquery_files:
filename = support_path + F
if os.path.isfile(filename):
logger.debug("Found {} .".format(filename))
satisfied.append(name)
return (satisfied, filename)
logger.debug("Unable to find ifquery file to parse data!")
return (satisfied, None)
def find_support_path(deprecated, satisfied, CL): # noqa
# This function verifies that we can find "support/" or "Support/"
# in the cl_support directory that has been passed to the script. It will
# return the full path to the support directory or False.
# We're at the top of the food-chain here so, we have no dependencies.
name = sys._getframe().f_code.co_name
logger.debug("This is {}().".format(name))
if name in deprecated:
logger.debug("[{}] is deprecated. Skipping".format(name))
return (satisfied, None)
reqs = []
if not check_dependencies(name, reqs, satisfied):
return (satisfied, None)
# We need to verify that the cl_support directory we were passed is
# actually a directory.
if not os.path.isdir(CL):
logger.debug("{} is not a directory!".format(CL))
return (satisfied, None)
satisfied.append('CL')
support_paths = ['Support/', 'support/']
for P in support_paths:
support_path = CL + "/" + P
if os.path.isdir(support_path):
logger.debug("Found {} .".format(support_path))
satisfied.append(name)
return (satisfied, support_path)
else:
logger.debug("{} is not a directory!".format(support_path))
return (satisfied, None)
def generate_report(result, print_logs, print_suggestions): # noqa
"""
Generate human readable report.
"""
if not result:
logger.error("Results are empty! Shit's broke!")
exit(1)
# Common section dividers
section_start_divider = '='*76 + '\n'
section_end_divider = '='*76 + '\n\n'
# Generate the report
interested = ['Script Version', 'hostname', 'eth0_ip', 'uptime',
'cl_support', 'Command line', 'Reason', 'license',
'lsb-release', 'image-release', 'upgraded with apt-get',
'sysinfo', 'platform.detect', 'switch-architecture',
'vendor', 'model', 'cpld_version', 'onie_version', 'bios',
'service_tag', 'chipset', 'ports', 'capabilities', 'caveats',
'datasheet'
]
msg = "[Overview]".center(76, '=') + '\n'
for item in interested:
if 'discovered' in result and item in result['discovered']:
if 'sysinfo' in item:
for item2 in result['discovered'][item]:
msg = msg + ('{:>21}: {}\n'
.format(item2.upper(),
result['discovered'][item][item2]))
elif 'bios' in item:
msg = msg + ('{:>21}: ['.format('BIOS'))
for item2 in result['discovered'][item]:
msg = msg + (' {}: {} '
.format(item2,
result['discovered'][item][item2]))
msg = msg + ' ]\n'
else:
msg = msg + ('{:>21}: {}\n'.format(item.upper(),
result['discovered'][item]))
msg = msg + section_end_divider
# print problems
if 'problems' in result.keys():
msg = msg + "[Problems]".center(76, '=') + '\n'
for item in result['problems']:
msg = msg + item + '\n'
msg = msg + section_end_divider
# print warnings
if 'warnings' in result.keys():
msg = msg + "[Warnings]".center(76, '=') + '\n'
for item in result['warnings']:
msg = msg + item + '\n'
msg = msg + section_end_divider
# print info
if 'info' in result.keys():
msg = msg + "[Informational]".center(76, '=') + '\n'
for item in result['info']:
msg = msg + item + '\n'
msg = msg + section_end_divider
# print logs
if print_logs and 'logs' in result.keys():
if 'problems' in result['logs'].keys():
msg = msg + ('Logs of interest [Problems]:\n')
msg = msg + section_start_divider
for item in result['logs']['problems']:
msg = msg + item + '\n'
msg = msg + section_end_divider
if 'warnings' in result['logs'].keys():
msg = msg + ('Logs of interest [Warnings]:\n')
msg = msg + section_start_divider
for item in result['logs']['warnings']:
msg = msg + item + '\n'
msg = msg + section_end_divider
if 'info' in result['logs'].keys():
msg = msg + ('Logs of interest [Informational]:\n')
msg = msg + section_start_divider
for item in result['logs']['info']:
msg = msg + item + '\n'
msg = msg + section_end_divider
# print frr error codes
if print_suggestions and 'suggestions' in result.keys():
msg = msg + ('Expanded FRR Error Codes:\n')
msg = msg + section_start_divider
for item in result['suggestions']:
msg = msg + item + '\n'
msg = msg + section_start_divider
return msg
def glob_to_numbers(glob): # noqa
"""
Given a string containing single numbers and ranges, return a sorted
list of deduplicated integers.
glob - A string of digits and ranges
>>> glob_to_numbers('3-4,7,10-12,17,22,4001-4003,7777,8000-8004')
[3, 4, 7, 10, 11, 12, 17, 22, 4001, 4002, 4003, 7777, 8000, 8001, 8002,
8003, 8004]
"""
assert isinstance(glob, (str)), "glob={0}".format(glob)
# Using split(',') instead of the replacement could yield empty strings in
# the result.
glob_list = glob.replace(',', ' ').split()
numbers = set()
range_re = re.compile(r"""^(\d+)-(\d+)$""") # ex. 4-6
for x in glob_list:
if x.isdigit():
numbers.add(int(x))
else:
range_match = range_re.match(x)
if range_match is None:
# The substring is neither a digit nor a range.
print("Globs must consist of numbers or ranges, but {0} is "
"neither. We were given glob '{1}'.".format(x, glob))
return []
else:
min_range = int(range_match.group(1))
max_range = int(range_match.group(2))
if max_range >= min_range:
numbers.update(range(min_range, max_range + 1))
else:
# print("Glob \"{0}\" contains the invalid range \"{1}\"."
# .format(glob, x)) # ex. 6-4
return []
return sorted(numbers) # A sorted list
def ifname_expand_glob(ifname): # noqa
if not isinstance(ifname, (str)):
raise TypeError("This function takes a string and returns a list of "
"strings. type(ifname)={0}".format(type(ifname)))
return ifname_expand_glob_helper(ifname, [])
def ifname_expand_glob_helper(ifname, result): # noqa
""" This function is recursive. """
if ifname == '':
# Base case 1
return result
if not ifname_is_glob(ifname):
# Base case 2: non-globish input
result.append(ifname)
return result
# Get the first glob component. This could be a single name, like "bridge"
# or it could be a range with commas and hyphens. For example, given
# "swp1-7,9", get the entire string.
# Given "swp1-7,9,eth0", get "swp1-7,9,".
glob = ''
# Subinterface base and range?
m = (re.match(
r"""(?P<base>[a-zA-Z0-9-]+?\-?(?:\d+s)?\d+\.)(?P<glob>(?:0(?!\d)|[1-9]\d*)((,|-)\d+)+,?)""", # noqa
ifname)) # noqa
if m is None:
# Non-subinterface base and range?
m = (re.match(
r"""(?P<base>[a-zA-Z0-9-]+?\-?(?:\d+s)?)(?P<glob>(?:0(?!\d)|[1-9]\d*)((,|-)\d+)+,?)""", # noqa
ifname)) # noqa
if m is None:
m = re.match(r"""(?P<base>\S+?),""", ifname)
if m is not None:
# The input begins with a component that doesn't have a range.
# Ex: lo, bridge, peer-group, Bond-T, server02, etc.
glob = None
else:
raise ValueError("Couldn't parse '{0}'.".format(ifname))
# Append the expanded substring of interfaces to the result.
base = m.group('base')
assert not ifname_is_glob(base), "base = {0}".format(base)
if glob is None:
# Append a single interface name to the result.
result.append(base)
else:
# Append a multiple interface names to the result.
glob = m.group('glob').rstrip(',')
for number in glob_to_numbers(glob):
result.append('{0}{1}'.format(base, number))
# Recurse with the remaining input string.
return ifname_expand_glob_helper(ifname[len(m.group()):], result)
def ifname_is_glob(ifname): # noqa
assert isinstance(ifname, str), "ifname={0}".format(ifname)
# The empty string and strings with spaces are not globs.
if not ifname or ' ' in ifname:
return False
if re.search(r"""\S,\S""", ifname) is not None:
# Strings with comma-separated components are always a glob.
return True
# Strings with hyphens might be globs.
re_range = re.search(r"""(?<!-)(\d+)-(\d+)(,|$)""", ifname)
if re_range is not None:
start_range = re_range.group(1)
end_range = re_range.group(2)
if ((len(start_range) > 1 and start_range.startswith('0')) or
end_range.startswith('0')):
# Valid ranges do not contain lead zeros.
# '0' is not valid as the end range.
return False
if int(end_range) > int(start_range):
return True
return False
def test_check_dependencies(deprecated, satisfied): # noqa
# This function tests the check_dependencies function.
# This is a test list of satisfied modules.
name = sys._getframe().f_code.co_name
logger.debug("This is {}().".format(name))
if name in deprecated:
logger.debug("[{}] is deprecated. Skipping".format(name))
return(satisfied)
test = ['module1', 'module2']
# This is a list of reqs that should be satisfied by test.
should_pass = ['module1', 'module2']
# This is a list of reqs that should not be satisfied by test.
should_fail = ['module2', 'module3']
if not check_dependencies('TEST: should_pass', should_pass, test):
logger.error("ERROR! Function check_dependencies is broken! "
"False Negative")
exit(1)
if check_dependencies('TEST: should_fail', should_fail, test):
logger.error("ERROR! Function check_dependencies is broken! "
"False Positive")
exit(1)
satisfied.append(name)
return satisfied
def verify_path(path):
"""
Verify the normalized directory or file path exists.
:param path:
:return normalized path:
"""
path = os.path.abspath(os.path.expanduser(path))
# if path location does not exist, exit.
if not os.path.exists(path):
logger.error("Filesystem path {} invalid.".format(path))
exit(1)
else:
return path
def wisdom(deprecated, satisfied, info):
"""TE-WISDOM is just a fun little function that adds a one-liner."""
name = sys._getframe().f_code.co_name
logger.debug("This is {}().".format(name))
if name in deprecated:
logger.debug("[{}] is deprecated. Skipping".format(name))
return(satisfied, info)
reqs = ['find_support_path']
if not check_dependencies(name, reqs, satisfied):
return(satisfied, info)
wisdom = [
'This CL-SUPPORT Analysis is brought to you by Coors Light... '
'Taste the Rockies!',
'# rm -rf / ; reboot - Because its never too late to start again!',
'Nothing makes a person more productive than the LAST MINUTE!',
'I had my patience tested. I\'m negative.',
'Interviewer: "What do you make at your current job?" '
'ME: "Mostly mistakes!"',
'Dear Karma, I have a list of people you missed!!!',
'Don\'t forget to shout "JENGA" when everything falls apart...',
'Calories: Tiny creatures that live in your closet and sew your '
'clothes a little tighter every night.',
'A little bit goes a long way says the Big-Endian...',
'My backup plan is just my original plan - with more ALCOHOL!',
'Light travels faster than sound. This is why some people appear '
'bright until you hear them speak.',
'Silence is golden. Duct-tape is silver.',
'If at first, you don\'t succeed, skydiving is not for you!',
'My imaginary friend says that you need a therapist!',
'My neighbor\'s diary says that I have boundary issues...',
'I clapped because it\'s finished, not because I liked it.',
'What do you mean I\'m not in shape? Round is a shape!',
'I\'m smiling. That alone should scare you!',
'Common sense is a flower that doesn\'t grow in everyone\'s garden...',
'Your trial license for Turbo-Entabulator has expired. Generating '
'random false-positives.',
]
rand = random.randrange(0, len(wisdom))
info.append('TE-WISDOM: {}'.format(wisdom[rand]))
return(satisfied, info)
| [
"noreply@github.com"
] | xinyli-cumulus.noreply@github.com |
07e30b5ca44e0780d580e0e6e6bb3d6b3d5b027e | 031b1c5b0c404f23ccd61a08845695bd4c3827f2 | /python/pyfiles/算术运算符.py | 39efec4aa582072f142c44bd1bc23d687686d1e0 | [] | no_license | AndyFlower/zixin | c8d957fd8b1e6ca0e1ae63389bc8151ab93dbb55 | 647705e5f14fae96f82d334ba1eb8a534735bfd9 | refs/heads/master | 2022-12-23T21:10:44.872371 | 2021-02-10T07:15:21 | 2021-02-10T07:15:21 | 232,578,547 | 1 | 0 | null | 2022-12-16T15:41:14 | 2020-01-08T14:13:25 | Java | UTF-8 | Python | false | false | 795 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 1 22:57:02 2020
@author: sanglp
"""
# +运算符
print(3+5)
print(3.4+4.5)
print((3+4j)+(4+5j))
print('abc'+'def')
print([1,2]+[3,4])
print((1,2)+(3,))
# -运算符
print(7.9 -4.5) # 浮点数有误差
print(5-3)
num = 3
print(-num)
print(--num)
print({1,2,3}-{3,4,5}) #计算差集
# *运算符
print(3333*5555)
print((3+4j)*(5+6j))
print('重要的事情说3遍'*3)
print([0]*5)
print((0,)*3)
# /和//运算符
print(17 / 4)
print(17 // 4) #4
print((-17) / 4)
print((-17) // 4) #-5
# %运算符
print(365 %7)
print(365 %2)
print('%c,%c,%c' %(65,97,48)) # 数字格式化为字符 A,a,0
# **运算符
print(2 ** 4)
print(3 ** 3 ** 3)
print(3 ** (3**3))
print((3**3)**3)
print(9**0.5)
print((-1)**0.5) # 对负数计算平方根得到负数 | [
"1308445442@qq.com"
] | 1308445442@qq.com |
4b23d796e4e6e0eaf5c71897207ec14d8b6168c5 | 5dc20d163ac874bef45f8aeadbdc8bef1697ea64 | /python-asyncio/src/asyncio/basic.py | bdbc3208817df584096fd5999af8abb797074038 | [] | no_license | bartfrenk/sandbox | 67241860ea35437a0f032a1b656a63908289fe19 | 563fc0051e742cc735c5da4b58a66ccf926e2b16 | refs/heads/master | 2022-12-10T04:51:56.396228 | 2020-02-23T11:18:28 | 2020-02-23T11:18:28 | 60,007,554 | 0 | 0 | null | 2016-05-30T12:12:09 | 2016-05-30T12:12:09 | null | UTF-8 | Python | false | false | 427 | py | import asyncio
import sys
class Test:
def __init__(self, number):
self.number = number
async def run(self):
print("The magic number is...", end=" ")
sys.stdout.flush()
await asyncio.sleep(1)
print(self.number)
async def main():
print("Hello")
await asyncio.sleep(1)
print("... World!")
if __name__ == "__main__":
test = Test(5)
asyncio.run(test.run())
| [
"bart.frenk@gmail.com"
] | bart.frenk@gmail.com |
bfd774f91b26d227ba70c15082fed0194b86585b | c02e5e0730a04b0a16c68d8aad928daedd770948 | /App/forms.py | 6803dbdd618672636a38b52907a07a83f6c8f902 | [
"MIT"
] | permissive | ashtonfei/flask-mini-app | 11c005f05496d655078a259cc70347c9664e0738 | 5a825665caef257d1f0fe3a670fcd1cea650688e | refs/heads/main | 2023-03-30T18:12:44.430341 | 2021-04-09T15:46:06 | 2021-04-09T15:46:06 | 355,441,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,392 | py | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SelectField, SubmitField
from wtforms.validators import DataRequired, Length, EqualTo, Email
class LoginForm(FlaskForm):
email = StringField(label='Email', validators=[DataRequired(), Email()])
password = PasswordField(label='Password', validators=[
DataRequired(), Length(min=6)])
submit = SubmitField(label='Log In')
class RegisterForm(FlaskForm):
username = StringField(label='User name', validators=[
Length(min=3, max=12), DataRequired()])
email = StringField(label='Email', validators=[DataRequired(), Email()])
password = PasswordField(label='Password', validators=[
Length(min=6), DataRequired()])
password_confirm = PasswordField(
label='Confirm password', validators=[EqualTo('password'), DataRequired()])
first_name = StringField(label='First name', validators=[DataRequired()])
middle_name = StringField(label='Middle name', validators=[])
last_name = StringField(label='Last name', validators=[DataRequired()])
phone = StringField(label='Phone', validators=[DataRequired()])
gender = SelectField(label='Gender', choices=['Male', 'Female'], validators=[
DataRequired()], default="Male")
submit = SubmitField(label='Register')
| [
"yunjia.fei@gmail.com"
] | yunjia.fei@gmail.com |
0ef417ef2ea2ab51e1240c4fc86e2f26be2e0302 | 509d717f18caad77e00c3261dcf1934f7e5bd95d | /venv/css_selectors/sports_bet_page_locators.py | 65e85e02577a73a8730a604977beb681bc7cbdcc | [] | no_license | Swingyboy/pronet_design_testing | 8aee2f42e2452ca178fbe34e7a51ce7377156e08 | ad3dc5a58983ed6d6c9cef91a40ea8160f699dd0 | refs/heads/master | 2023-05-06T05:34:47.438023 | 2020-09-15T09:17:36 | 2020-09-15T09:17:36 | 281,055,876 | 1 | 1 | null | 2021-06-02T02:56:51 | 2020-07-20T08:12:21 | Python | UTF-8 | Python | false | false | 403 | py | from selenium.webdriver.common.by import By
class SportsBetPageLocators():
UPCOMING_EVENTS_BAR =(By.CSS_SELECTOR, 'upcoming-events > div > div.modul-header')
LIVE_BET_BAR = (By.CSS_SELECTOR, 'live-at-now > div > div.modul-header')
ESPORTS_BAR = (By.CSS_SELECTOR, 'app-esports > div > div.modul-header')
TODAY_EVENT_BAR = (By.CSS_SELECTOR, 'todays-sport-types > div > div.modul-header') | [
"kedonosec@gmail.com"
] | kedonosec@gmail.com |
bda1259acf1f9e58440de1958bf26bb65f5b568f | 144590772aaa89e5ead8936512b0bd035c215c7b | /resilient-circuits/tests/selftest_tests/mocked_success_script.py | 3ea0cb2eea6c0d23aebd1c0b1c2c4e1b72008c59 | [
"MIT"
] | permissive | ibmresilient/resilient-python-api | f65dad3f3c832581127026fa3e626eaf3d4749a7 | 84e8c6d9140ceac0bf47ce0b98e11c7953d95e61 | refs/heads/main | 2023-07-23T12:36:49.551506 | 2023-07-11T15:15:43 | 2023-07-11T15:15:43 | 101,414,862 | 37 | 31 | MIT | 2023-09-07T14:00:34 | 2017-08-25T14:59:45 | Python | UTF-8 | Python | false | false | 276 | py |
def selftest(opts):
"""
Placeholder for selftest function. An example use would be to test package api connectivity.
Suggested return values are be unimplemented, success, or failure.
"""
return {
"state": "success",
"reason": None
} | [
"Ryan.Gordon1@ibm.com"
] | Ryan.Gordon1@ibm.com |
7b330a04a2dde22bdff089a6ed4a3ec386cbc41c | a78fa01825c57797d45d57f7e7143ef91024aa1e | /db_tools/import_category_data.py | 970b412ad79f732fb880182e770e6ca7a2c8a308 | [] | no_license | giwatest/MxShop | 8e68fb917d7ccc3f6cca24bc654cb1868f0c1409 | 4500eb6d4c85110ed3c97209c007be35ceb1cd6b | refs/heads/master | 2020-07-19T01:53:49.427411 | 2020-02-23T14:58:34 | 2020-02-23T14:58:34 | 206,355,008 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | # encoding: utf-8
__author__ = 'GIWA'
#批量导入商品类目
#
#
import sys
import os
pwd = os.path.dirname(os.path.realpath(__file__))
sys.path.append(pwd+'../')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "MxShop.settings")
import django
django.setup()
from goods.models import GoodsCategory
from db_tools.data.category_data import row_data
for lev1_cat in row_data:
lev1_instance = GoodsCategory()
lev1_instance.name = lev1_cat['name']
lev1_instance.code = lev1_cat['code']
lev1_instance.category_type = 1
lev1_instance.save()
for lev2_cat in lev1_cat['sub_categorys']:
lev2_instance = GoodsCategory()
lev2_instance.name = lev2_cat['name']
lev2_instance.code = lev2_cat['code']
lev2_instance.category_type = 2
lev2_instance.parent_category = lev1_instance
lev2_instance.save()
for lev3_cat in lev2_cat['sub_categorys']:
lev3_instance = GoodsCategory()
lev3_instance.name = lev3_cat['name']
lev3_instance.code = lev3_cat['code']
lev3_instance.category_type = 3
lev3_instance.parent_category = lev2_instance
lev3_instance.save() | [
"bingna.liu@xinchan.com"
] | bingna.liu@xinchan.com |
b9ac3eaf94bdd09fd0832248e58d306bcfe3a66b | d8f44692c9f9f0a9a391a49db0f4f659a2ef6fe8 | /jsBuilds/jsBuilder.py | c6d54a0f8bcd6b1b48141c786117bd378dc21f5d | [
"MIT"
] | permissive | skylarkgit/sql2phpclass | 045e71963574b719313fc98882f5c710435f101f | a79e7f3cfda8cb41ba00e8cbba0de33e9be759d6 | refs/heads/master | 2020-03-19T02:34:34.229287 | 2018-07-04T18:58:28 | 2018-07-04T18:58:28 | 135,640,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,062 | py | import sys
sys.path.append('..')
from jsBuilds.jsTemplates import *
from jsBuilds.jsSupport import *
from lib.fileOps import *
from dtfSupport import *
import jsbeautifier
tables=None
DEPENDENCIES="$scope,archonAPI,ToolBag,$http,$window,$filter,$mdDialog"
def getSelectServices(tableSurface):
tableName=tableSurface.alias
varList=tableSurface.getForiegnOTMKeys()
code=""
for v in varList.values():
code+=(ARCHONCALL('"select"',"'"+tables[v.keyReference].alias+"'",'""',POSTSUBMISSION(SCOPE(v.alias+"Select")+'=response.data.data;',ONFAILURE('"COULDN\'t FETCH DATA FROM '+tableName+' : "+response.data.data'))))
varList=tableSurface.getForiegnOTOKeys()
for v in varList.values():
code+=getSelectServices(tables[v.keyReference])
return code
def getShowService(tableSurface):
tableName=tableSurface.alias
varList=tableSurface.getForiegnOTMKeys()
code=ARCHONCALL('"Get"',"'"+tableName+"'",'""',POSTSUBMISSION(SCOPE(tableName+"Data")+'=response.data.data;',ONFAILURE('"COULDN\'t FETCH DATA FROM '+tableName+' : "+response.data.data')))
return code
def setTables(tableSurfaces):
global tables
tables=tableSurfaces
def getSubmission(tableSurface):
#code="var obj={"+createObjFromScope(tableSurface.getSettable())+"};\n"
NV=getAllSettables(tables,tableSurface,{})
print(",".join(NV))
code=SUBMISSION('"add"',CALL('ToolBag.objToCallArgs',createObjFromScope(NV)),"'"+tableSurface.alias+"'",POSTSUBMISSION(ONSUCCESS('"Data Saved"'),ONFAILURE('response.data.data')))
return code
def getUpdation(tableSurface):
#code="var obj={"+createObjFromScope(tableSurface.getSettable())+"};\n"
NV=getAllVars(tables,tableSurface,{})
print(",".join(NV))
code=SUBMISSION('"update"',CALL('ToolBag.objToCallArgs',createObjFromScope(NV)),"'"+tableSurface.alias+"'",POSTSUBMISSION(ONSUCCESS('"Data Saved"'),ONFAILURE('response.data.data')))
return code
def getFetchById(tableSurface,obj,code):
#code="var obj={"+createObjFromScope(tableSurface.getSettable())+"};\n"
code=ARCHONCALL("'fetch'","'"+tableSurface.alias+"'",CALL('ToolBag.objToCallArgs',createObjFromScope(obj)),POSTSUBMISSION(code,ONFAILURE('response.data.data')))
return code
def createAddController(tableSurface):
tableName=tableSurface.alias
varList=tableSurface.getSettable()
code=SCOPE(VALIDITY(SCOPE('add'+tableName+'Controller')))
code+=SCOPE('showAdvanced')+'=ToolBag.showAdvanced;\n'
code+=getSelectServices(tableSurface)
code+=getSubmission(tableSurface)
return OBJ('app',CONTROLLER(CONTROLLERNAME('add',tableName),DEPENDENCIES,code))
def buildShowController(tableSurface):
tableName=tableSurface.alias
code=SCOPE('showAdvanced')+'=ToolBag.showAdvanced;\n'
code+=getShowService(tableSurface)
return OBJ('app',CONTROLLER(CONTROLLERNAME('show',tableName),DEPENDENCIES,code))
def buildUpdateController(tables,tableSurface):
tableName=tableSurface.alias
varList=tableSurface.getSettable()
keys=tableSurface.getKeys()
code=argsToScope(keys)
code+=getFetchById(tableSurface,keys,responseToScope(getAllVars(tables,tableSurface,{})))
code+=SCOPE(VALIDITY(SCOPE('update'+tableName+'Controller')))
code+=SCOPE('showAdvanced')+'=ToolBag.showAdvanced;\n'
code+=getSelectServices(tableSurface)
code+=getUpdation(tableSurface)
return OBJ('app',CONTROLLER(CONTROLLERNAME('update',tableName),DEPENDENCIES+","+",".join(keys),code))
def buildControllers(tableSurfaces):
global tables
tables=tableSurfaces
code=""
pc=""
touchd('js')
for t in tables.values():
code+=createAddController(t)
pc+=CASE("'"+CONTROLLERNAME('add',t.alias)+"'",'return '+CONTROLLERNAME('add',t.alias)+';')
for t in tables.values():
code+=buildShowController(t)
pc+=CASE("'"+CONTROLLERNAME('show',t.alias)+"'",'return '+CONTROLLERNAME('show',t.alias)+';')
for t in tables.values():
code+=buildUpdateController(tables,t)
pc+=CASE("'"+CONTROLLERNAME('update',t.alias)+"'",'return '+CONTROLLERNAME('update',t.alias)+';')
pc=SWITCH('ctrl',pc)
pc='obj.controllerProvider=function(ctrl){{{code}}}'.format(code=pc)
f=open('js\controllers.js','w')
f.write(jsbeautifier.beautify(pc+code))
| [
"abhay199658@gmail.com"
] | abhay199658@gmail.com |
c69507f367aefa1127fc150a6f8ecc701ddc571a | 6aed964b224292fb1d76f9b5dacb0883abe929fc | /ablog/theblog/migrations/0006_auto_20200927_2319.py | 2a457ec8fc67c8216b10e0e78141980f6a740de7 | [] | no_license | satish-313/OurBlog | e575585c4e0960a552628164f1cd8dee7d99a0c5 | ce497682ba1f6be725be7cdb1d1e02241059843f | refs/heads/master | 2023-02-09T22:33:47.816592 | 2020-12-22T16:22:18 | 2020-12-22T16:22:18 | 319,668,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | # Generated by Django 3.1.1 on 2020-09-27 17:49
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('theblog', '0005_auto_20200926_2239'),
]
operations = [
migrations.AlterField(
model_name='post',
name='body',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
]
| [
"pradhansatish53@gmail.com"
] | pradhansatish53@gmail.com |
c27426914cd5012c8f9639773cee57f0c16aeee3 | 57972581decd1707834a58eefa3b77e9ed24bf28 | /service_2/__init__.py | 4e910ebf3bd4d91cd523d337209cb03068cfa80c | [] | no_license | abhyasgiri/milestone-serverless-project | 8e671288adf3abbf71217e42c3984152da967571 | 3e1a67859fd0289ae56772b4b5079fa43452a1a4 | refs/heads/main | 2023-02-23T01:30:23.132175 | 2021-02-02T16:44:01 | 2021-02-02T16:44:01 | 335,349,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | import logging
import random
from string import ascii_lowercase
import azure.functions as func
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
letters = ""
for _ in range(5):
letters += random.choice(ascii_lowercase)
return func.HttpResponse(
letters,
status_code=200
) | [
"abhyasgiri@outlook.com"
] | abhyasgiri@outlook.com |
f91e0e107d8ae9b3d2e01fcae49d69d459b91219 | 4ac3571fed09a6f475448ce7555abfe0daf00151 | /lettercount.py | 562fd93c1e54f46ef9fca3b9d017397542448363 | [] | no_license | ThorHlavaty/pythondicts | fb21ffd490ae2c0faa6ea43abcda0cead3f7ba97 | d7bc47a28ce1d04b212da277e05a5c624dc1feec | refs/heads/master | 2022-12-12T08:24:39.300182 | 2020-09-04T18:15:46 | 2020-09-04T18:15:46 | 290,031,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 887 | py | def letter_counter(a_string):
letter_count = {}
for letter in a_string:
letter_count[letter] = a_string.count(letter)
return letter_count
def word_histogram(a_string):
word_count = {}
this_thing = a_string.lower().split()
for word in this_thing:
word_count[word] = this_thing.count(word)
return word_count and this_thing
def histogram_rank(a_string):
word_count = {}
this_thing = a_string.lower().split()
for word in this_thing:
word_count[word] = this_thing.count(word)
word_count_sorted = sorted(word_count.items(), key=lambda x: x[1])
print(f'The top three words are:\n{word_count_sorted[-1]}\n{word_count_sorted[-2]}\n{word_count_sorted[-3]}')
print(letter_counter("Bananas"))
print(word_histogram("To be or not to be"))
histogram_rank("to be or not to be or to be or maybe even to not to be lol")
| [
"thorthebore@gmail.com"
] | thorthebore@gmail.com |
79722b7ad6e4e2c4ed519da6d093a3f52c9824bf | f56d915f46d779b9ed07a8b6bb048b688865cd7b | /passette.py | 97f4c671482303943a387b2184856c6fc5a118fc | [] | no_license | deskofcraig/spotifyplayer | 42a252a92ab5d9cc3a1ed7bb68b886579bd1178a | 759deb180e345712c09a07e1d5e0fa55dd747e3a | refs/heads/master | 2020-04-11T05:42:33.882169 | 2019-01-19T02:47:32 | 2019-01-19T02:47:32 | 161,557,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,788 | py | #import the GPIO and time package
import RPi.GPIO as GPIO
import time
import os
#from mopidy import core
GPIO.setmode(GPIO.BOARD)
#yellow/back button
GPIO.setup(37, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
#red/pause button
GPIO.setup(36, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
#green/play button
GPIO.setup(33, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
#white/next button
GPIO.setup(32, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
#encoder | A - red | C - black | B - yellow
#pinA
GPIO.setup(29, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
#pinB
GPIO.setup(31, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
count = 0
counter = 10
pinALast = GPIO.input(29)
pinBLast = GPIO.input(31)
pinCLast = pinALast ^ pinBLast
encoderMin = 0
encoderMax = 100
inc = 1
last_state = pinALast * 4 + pinBLast * 2 + pinCLast * 1
#system on start
os.system("mpc volume 10")
os.system("mpc add spotify:track:6jXPZid0KLorvgIDP6TiSo")
os.system("mpc add spotify:track:5GjPQ0eI7AgmOnADn1EO6Q")
os.system("mpc add spotify:track:6r20M5DWYdIoCDmDViBxuz")
os.system("mpc add spotify:track:17S4XrLvF5jlGvGCJHgF51")
while True:
#back/yellow button
if GPIO.input(37) == GPIO.HIGH:
os.system("mpc prev")
print("'back' was pushed!")
time.sleep(.3)
#pause/red button
if GPIO.input(36) == GPIO.HIGH:
os.system("mpc pause")
print("'pause' was pushed!")
time.sleep(.3)
#play/green button
if GPIO.input(33) == GPIO.HIGH:
os.system("mpc toggle")
print("'play' was pushed!")
time.sleep(.3)
#next/white button
if GPIO.input(32) == GPIO.HIGH:
os.system("mpc next")
print("'next' was pushed!")
time.sleep(.3)
#encoder
pinA = GPIO.input(29)
pinB = GPIO.input(31)
pinC = pinA ^ pinB
new_state = pinA * 4 + pinB * 2 + pinC * 1
delta = (new_state - last_state) % 4
# delta | pinA | pinB | pinC | new_state
# ======================================
# 0 | 0 | 0 | 0 | 0
# 1 | 1 | 0 | 1 | 5
# 2 | 1 | 1 | 0 | 6
# 3 | 0 | 1 | 1 | 3
# https://bobrathbone.com/raspberrypi/documents/Raspberry%20Rotary%20Encoders.pdf
if (new_state != last_state):
count += 1
if (count % 4 == 1):
if (delta == 3):
counter += inc
if (counter > encoderMax):
counter = 100
else:
counter -= inc
if (counter < encoderMin):
counter = 0
volume = "mpc volume " + str(int(counter))
os.system(volume)
last_state = new_state
| [
"noreply@github.com"
] | deskofcraig.noreply@github.com |
b0af71064e926490ac415e9930d72e7cccec1d8c | 7464f15c33c74454f2a98dceb7f603919abba4d1 | /happy.py | 01383a2a50c7506bb341600a3deaf9076a692953 | [] | no_license | willingc/my-bit | 374bece797c59956e500504cd62940a2c1718013 | 535768dcb09297f1028e0e111fd062b91e8032c6 | refs/heads/master | 2016-08-08T21:26:22.119643 | 2015-11-30T03:23:59 | 2015-11-30T03:23:59 | 47,053,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | """
happy.py
by Carol Willing
November 28, 2015
Public Domain
Use this to display a 'Happy Face' image on micro:bit's 5x5 pixel grid of LEDs.
Remember... Writing a program is similar to planning a birthday party.
Program Birthday party
------- --------------
'Prepare' Prepare the room with balloons; order food; pick up a cake.
'Do' Do things during the party -- sing, dance, play videogames.
'Clean' Clean the table. Tidy up after the party. Take out the rubbish.
"""
from microbit import *
# Prepare. Put the preinstalled images into user friendly variables
my_happy_face = Image.HAPPY
my_sad_face = Image.SAD
# Do things! ----> Show the images on the display.
display.show(my_happy_face)
sleep(8000)
display.show(my_sad_face)
sleep(8000)
display.show(my_happy_face)
sleep(4000)
# Clean up stuff. Display 'BYE' and clear display. (Clean your room too.)
display.scroll("BYE")
display.clear()
| [
"carolcode@willingconsulting.com"
] | carolcode@willingconsulting.com |
e9ad5aee994bfbdd74e6f30e6aa132036122e60b | 437b5b668d6d2c6f089bbeabb2676db46d8cdd07 | /temp_ach_multiprocess.py | 0037b9c2e1f8db810f86f2e2c4c54c41b7643c81 | [
"MIT"
] | permissive | marcelosalles/idf-creator | 4e53d185ab42b7f14dbd2d83e9ffe7e015ef07c3 | 399a68dbee9d275e79df75c55acdec5f246ff07f | refs/heads/master | 2020-03-18T13:03:40.804518 | 2018-12-21T19:33:49 | 2018-12-21T19:33:49 | 134,758,816 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,378 | py | # Return EHF from multiple simulation results of Operative Temperature
import argparse
import csv
import datetime
import glob
from multiprocessing import Pool
import os
import pandas as pd
FOLDER_STDRD = 'cluster'
LEN_FOLDER_NAME = len(FOLDER_STDRD) + 2
BASE_DIR = '/media/marcelo/OS/LabEEE_1-2/idf-creator/single_12_20/'
# BASE_DIR = 'D:/LabEEE_1-2/idf-creator/sobol_single'
MONTH_MEANS = pd.read_csv('month_means_8760.csv')
MAX_THREADS = 18
# SIMULATIONS = 108000
# N_CLUSTERS = 18
# batch = SIMULATIONS/N_CLUSTERS
def process_folder(folder):
line = 0
folder_name = folder[len(folder)-LEN_FOLDER_NAME:]
# folder_name = folder[len(folder)-LEN_FOLDER_NAME:]
os.chdir(folder) # BASE_DIR+'/'+
# pre_epjson_files = glob.glob('*.epJSON')
# i_cluster = int(folder[-1])
# ok_list = ['sobol_single_'+'{:05.0f}'.format(i)+'.epJSON' for i in range(int(i_cluster*batch),int(i_cluster*batch+batch))]
epjson_files = glob.glob('*.err') # epJSON') # []
print(len(epjson_files))
# for f in pre_epjson_files:
# if f in ok_list:
# epjson_files.append(f)
df_temp = {
'folder': [],
'file': [],
'temp': [],
'ach': [],
'ehf': []
}
for file in epjson_files:
print(line,' ',file, end='\r')
line += 1
csv_file = file[:-7]+'out.csv'
df = pd.read_csv(csv_file)
df_temp['file'].append(file[:-7])
df_temp['folder'].append(folder_name)
df_temp['temp'].append((df['OFFICE:Zone Operative Temperature [C](Hourly)'][df['SCH_OCUPACAO:Schedule Value [](Hourly)'] > 0]).mean())
df_temp['ach'].append((df['OFFICE:AFN Zone Infiltration Air Change Rate [ach](Hourly)'][df['SCH_OCUPACAO:Schedule Value [](Hourly)'] > 0]).mean())
df['E_hot'] = -1
df['sup_lim'] = MONTH_MEANS['mean_temp'] + 3.5
df.loc[df['OFFICE:Zone Operative Temperature [C](Hourly)'] > df['sup_lim'], 'E_hot'] = 1
df.loc[df['OFFICE:Zone Operative Temperature [C](Hourly)'] <= df['sup_lim'], 'E_hot'] = 0
df_temp['ehf'].append(df['E_hot'][df['SCH_OCUPACAO:Schedule Value [](Hourly)'] > 0].mean())
df_output = pd.DataFrame(df_temp)
df_output.to_csv('means_{}.csv'.format(folder_name), index=False)
print('\tDone processing folder \'{}\''.format(folder_name))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process output data from Energyplus.')
parser.add_argument('-t',
action='store',
type=int,
help='runs T threads')
args = parser.parse_args()
folders = glob.glob(BASE_DIR+FOLDER_STDRD+'*')
print('Processing {} folders in \'{}\':'.format(len(folders), BASE_DIR))
for folder in folders:
print('\t{}'.format(folder))
start_time = datetime.datetime.now()
if args.t:
p = Pool(args.t)
p.map(process_folder, folders)
else:
num_folders = len(folders)
p = Pool(min(num_folders, MAX_THREADS))
p.map(process_folder, folders)
end_time = datetime.datetime.now()
total_time = (end_time - start_time)
print("Total processing time: " + str(total_time))
| [
"marcelosalles@github.com"
] | marcelosalles@github.com |
dc2a482aa68540bad4c20f140f3fc7b3df59ceef | 0712a5355fb7b2110df802f0630db3f421ffa08e | /MyoGrapher/__init__.py | 5637e8c629fbc81d87154322b678fdf91bbc7f9a | [
"MIT"
] | permissive | nullp0tr/MyoGrapher | 6f404bc29034ce585742b0eac7798981d82867ee | 153dc26a77585181e8a25f427c7c5493767e20ab | refs/heads/master | 2021-01-01T18:13:54.648687 | 2017-12-14T17:32:28 | 2017-12-14T17:32:28 | 98,281,130 | 1 | 1 | MIT | 2017-12-14T17:32:29 | 2017-07-25T08:06:36 | Python | UTF-8 | Python | false | false | 3,156 | py | import pygame
class MyoGrapher(object):
def __init__(self, width=1200, height=400):
self.width, self.height = width, height
self.screen = pygame.display.set_mode((self.width, self.height))
self.last_values = []
self.dlast_values = []
def dplot(self, vals, shifts, colors):
division_lines = 4
drift = 5
self.screen.scroll(-drift)
self.screen.fill((0, 0, 0),
(self.width - drift, 0, self.width, self.height))
for n, values in enumerate(vals):
values = [val / float(shifts[n]) for val in values]
if len(self.dlast_values) < len(vals):
self.dlast_values.append(values)
return
for i, (u, v) in enumerate(zip(self.dlast_values[n], values)):
pygame.draw.line(self.screen, colors[n],
(self.width - drift, int(
self.height / division_lines * (
i + 1 - u))),
(self.width, int(
self.height / division_lines * (
i + 1 - v))))
pygame.draw.line(self.screen, (255, 255, 255),
(self.width - drift, int(
self.height / division_lines * (
i + 1))),
(self.width, int(
self.height / division_lines * (
i + 1))))
self.dlast_values[n] = values
def plot(self, values, drawlines=False, curve=True):
if self.last_values is None:
self.last_values = values
return
division_lines = len(values)
drift = 5
self.screen.scroll(-drift)
self.screen.fill((0, 0, 0), (self.width - drift, 0, self.width, self.height))
for i, (u, v) in enumerate(zip(self.last_values, values)):
if drawlines:
pygame.draw.line(self.screen, (0, 255, 0),
(self.width - drift, int(self.height / division_lines * (i + 1 - u))),
(self.width, int(self.height / division_lines * (i + 1 - v))))
pygame.draw.line(self.screen, (255, 255, 255),
(self.width - drift, int(self.height / division_lines * (i + 1))),
(self.width, int(self.height / division_lines * (i + 1))))
else:
c = int(255 * max(0, min(1, v)))
self.screen.fill((c, c, c), (self.width - drift, i * self.height / division_lines, drift,
(i + 1) * self.height / division_lines - i * self.height / division_lines))
if curve:
self.last_values = values
pygame.display.flip()
def emg_plot(self, emg, shift=512, drawlines=False, curve=True):
self.plot([e / float(shift) for e in emg], drawlines=drawlines, curve=curve)
| [
"ahmeds2000x@gmail.com"
] | ahmeds2000x@gmail.com |
88ed4535cc1d89f37f97af16d48dceabab6add6f | 1e39bbec23e4200d84237cb2446e4285736cbf98 | /options.py | b17459e6a9edea456a043196dec7c461421c41c3 | [] | no_license | JRiyaz/password-manager | 00617c4f16f7438c392baf972d66d77eca11e519 | 215947d5ce5934bd04d11f3cf1d035cf457a5fa9 | refs/heads/main | 2023-05-31T12:05:06.772659 | 2021-07-05T09:54:44 | 2021-07-05T09:54:44 | 379,824,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,787 | py | import sys
from colors import Colors
from security import PasswordManager
class Options:
# username of the user
__username = ''
# Show welcome message for first time
__welcome = True
@staticmethod
def __ask_username() -> None:
""" This method prompt the user to enter username through command line """
print(f"{Colors.CYAN}Please register here{Colors.END}")
Options.__username = input("Please enter username: ")
@staticmethod
def __ask_password() -> None:
""" This method prompt the user to enter password through command line """
pm = PasswordManager()
secure = False
while True:
msg = secure is True and 'Enter new strong password' or 'Enter your new password'
password = input(f"{msg}: ")
set_password = pm.set_password(password)
if set_password == -1:
print(f'{Colors.WARNING}Password must contain at-lease 6 characters{Colors.END}')
elif set_password == 1:
print(f'{Colors.WARNING}New password must be secured than your old password{Colors.END}')
elif set_password == 2:
print(f'{Colors.WARNING}Password already exists, it cannot be set.{Colors.END}')
else:
message = Options.__welcome and 'Your password is set' or 'Password has changed'
print(f'{Colors.GREEN}{message}{Colors.END}')
break
@staticmethod
def __show_options() -> int:
"""
This method continuously prompt the user to select from given option
until the user selects the correct option through command line
"""
wrong = False
selection = 0
while True:
if not wrong:
print(f'{Colors.BLUE}NOTE: please select from following options{Colors.END}')
else:
print(f'{Colors.WARNING}please select correct option{Colors.END}')
options = (
'1. Show all my passwords\n'
'2. Get current password\n'
'3. Set new password\n'
'4. Security level of my current password\n'
'5. Logout\n')
try:
selection = int(input(options))
except ValueError as e:
wrong = True
continue
else:
if selection in [1, 2, 3, 4, 5]:
break
else:
wrong = True
continue
return selection
@classmethod
def check_password(cls) -> bool:
"""
This method continuously prompt the user to enter current password to perform the selected
action for 3. If you enter wrong password for 3rd time program will terminate
"""
pm = PasswordManager()
pwd = input(f'{cls.__username.title()} Enter your current password to perform the action: ')
chances = 0
while True:
if pm.is_correct(pwd):
return True
else:
if chances > 2:
sys.exit('\nYour account is blocked')
print(f'{Colors.WARNING}You have entered wrong password{Colors.END}')
pwd = input(f'{Colors.FAIL}You have {3 - chances} attempts left. Please try again: {Colors.END}')
chances += 1
@classmethod
def main_menu(cls) -> None:
""" This method prompt the user to select correct options through command line """
pm = PasswordManager()
if not cls.__username:
cls.__ask_username()
if not pm.get_password():
print(f'{Colors.WARNING}You have not set password for you account{Colors.END}')
cls.__ask_password()
if cls.__welcome:
print(f'\n{Colors.GREEN}', 10 * '*', 'Welcome to Password Manager', 10 * '*', f'{Colors.END}\n')
cls.__welcome = False
while True:
selection = cls.__show_options()
if selection == 1 and cls.check_password():
print(f'{Colors.CYAN}{pm.get_all_passwords()}{Colors.END}')
elif selection == 2 and cls.check_password():
print(f'{Colors.CYAN}Your current password: {pm.get_password()}{Colors.END}')
elif selection == 3 and cls.check_password():
cls.__ask_password()
elif selection == 4 and cls.check_password():
level = pm.get_level()
strength = level == 0 and 'WEAK' or level == 1 and 'STRONG' or 'VERY STRONG'
print(f'{Colors.CYAN}Your password is: {strength}{Colors.END}')
elif selection == 5:
sys.exit('\nYou are logged out')
| [
"j.riyazu@gmail.com"
] | j.riyazu@gmail.com |
5527d366f62eeca1618526aeba69c022f62e9b48 | fb887b712b05c5e2a3ab1a02d9349c246fc06922 | /app/migrations/0003_auto_20210316_2151.py | 5cba198802eba0fe273af13aee79f4ead5b43c65 | [] | no_license | kousik-prabu-git/SafeNest | 3347b139488053d42abd4997cf6f9e3a11542e1d | b9368a497e37b2076683eb00e7d8f8644647b903 | refs/heads/master | 2023-04-14T12:15:47.598980 | 2021-05-02T08:28:27 | 2021-05-02T08:28:27 | 347,817,753 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 903 | py | # Generated by Django 3.1.3 on 2021-03-16 16:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app', '0002_auto_20210316_1056'),
]
operations = [
migrations.AddField(
model_name='activity',
name='reporter',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='Reporter', to='auth.user'),
preserve_default=False,
),
migrations.AlterField(
model_name='activity',
name='volunteer',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='Volunteer', to=settings.AUTH_USER_MODEL),
),
]
| [
"uniqfocuz@gmail.com"
] | uniqfocuz@gmail.com |
96d06f52d129d45476dbb90da29283a0859e2776 | f6ff601089f678fecbfa22a4d95c1de225bc34b5 | /code12.py | eea027367cb9c55e95dee5542ce2b7d981997d76 | [] | no_license | Kumar1998/github-upload | 94c1fb50dc1bce2c4b76d83c41be2e0ce57b7fa6 | ab264537200791c87ef6d505d90be0c0a952ceff | refs/heads/master | 2021-07-05T10:14:13.591139 | 2020-07-26T15:47:30 | 2020-07-26T15:47:30 | 143,553,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | x=int(input("Enter 1st number:"))
y=int(input("Enter 2nd number:"))
sum=x+y
average=sum/2
print("Sum of the given two numbers is:",sum)
print("Average of the given two numbers is:",average) | [
"noreply@github.com"
] | Kumar1998.noreply@github.com |
be23dca58eab757909e1b01ac74a7f2f65028785 | ee9ddec6307ab76a567b4001cee47278d503b3da | /01. Naive Bayes/01. Spam filtering/classifiers/NaiveBayesClassifier.py | ba39bc3088615798e97486a6ab994a7f35f41607 | [] | no_license | Phil9l/probabilistic-graphical-models | ebf6f6366169f6e4cec72a0199a330a1e350818d | 9471b79ad7d8f0a511ae94a3719132592c5f79a7 | refs/heads/master | 2021-01-18T17:23:56.035485 | 2017-03-31T08:26:30 | 2017-03-31T08:26:30 | 86,793,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | from collections import defaultdict
__all__ = ['NaiveBayesClassifier']
class NaiveBayesClassifier:
def __init__(self):
self._class_data = defaultdict(dict)
def train(self, data, cls):
raise NotImplementedError
def predict(self, data):
raise NotImplementedError
| [
"phil9lne@gmail.com"
] | phil9lne@gmail.com |
e5a9e28f6005491c144002425c212dd0d5803423 | a2e11ec88ef3c83b9f07129e76a3681a676d164f | /sessionproject3/sessionproject3/wsgi.py | a7a02fa35437ef303c13922290ff105dce0051b2 | [] | no_license | qwertypool/lofo | dadd7cd5b149a3a200b7111d803b1d0195d76642 | 3bc7bd125e7ea5a67f51dd6dd654e38a5f218055 | refs/heads/master | 2022-05-18T09:31:11.456634 | 2020-04-18T14:47:44 | 2020-04-18T14:47:44 | 256,773,858 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """
WSGI config for sessionproject3 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sessionproject3.settings')
application = get_wsgi_application()
| [
"deepapandey364@gmail.com"
] | deepapandey364@gmail.com |
2bfaceaec7ad594a098bc8fdcd309b8ee2a0c70d | 6e42b85d0deb68eeddf18bb1849daf0ee6fc0df1 | /main/tests/test_views.py | 978e99dee0b28789949a209162916459f411d351 | [] | no_license | NotSecretEmmet/TipsCalculator | 8980be28412cf4c7353d5a3a4260c19c436a9a85 | 5251d1ddeaf56e9f968e8d45f265c97ddd328698 | refs/heads/main | 2023-03-13T06:51:31.422859 | 2021-03-03T16:32:19 | 2021-03-03T16:32:19 | 316,184,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 726 | py | from django.test import TestCase, Client
from django.urls import reverse
from django.contrib.auth.models import User
class TestViews(TestCase):
def setUp(self):
self.client = Client()
self.user = User.objects.create_user('johnlennon', 'lennon@thebeatles.com', 'johnpassword')
def test_home_view_GET(self):
self.client.force_login(self.user)
response = self.client.get(reverse('main-home'))
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'main/home.html')
def test_faq_view_GET(self):
self.client.force_login(self.user)
response = self.client.get(reverse('main-faq'))
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'main/faq.html')
| [
"emmet@emkit.nl"
] | emmet@emkit.nl |
d3542bde55fffcec1c5d1a1f2685e6561647f06c | 616133580e0f01adaa6ac4117329e93e6f7ad931 | /Main.py | edb84ae88794d8a239d611b73066c6e7c6e636bc | [] | no_license | ckarnell/kings_cup_app | df09d0c94e9f288eee3df9da72f2b0f32dd52a98 | 9f1eb5ecf7ea6f1f8403887218f37c3d135150ec | refs/heads/master | 2021-01-19T11:26:25.571172 | 2017-02-17T01:33:35 | 2017-02-17T01:33:35 | 82,243,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,492 | py | from Tkinter import *
from time import sleep
from controller.deck import Deck
from static.static import Static
class App:
def __init__(self):
self.deck = Deck()
self.player = 0
self.root = Tk()
self.root.title('King\'s Cup')
# Pack the initial card image.
logo = PhotoImage(file="./static/gifs/K_S.gif")
self.card_image = Label(self.root, image=logo)
self.card_image.image = logo
self.card_image.pack(side='left')
explanation = "King's Cup is a drinking game!\nJust follow the instructions."
self.rule = Label(self.root,
width = 25,
justify=LEFT,
padx = 20,
text=explanation)
self.rule.pack(side="left")
self.button = Button(self.root,
text="Draw!",
command=lambda: self.change())
self.root.bind('<Return>', self.change)
self.button.pack(side='left')
self.root.mainloop()
def change(self, event=None):
card = self.deck.getCard()
# Set the card image.
card_image = PhotoImage(file='./static/gifs/%s_%s.gif' % (card['val'], card['suit']))
self.card_image.config(image=card_image)
self.card_image.image = card_image
self.rule.config(text = Static.cardRules[card['val']])
self.rule.text = Static.cardRules[card['val']]
if __name__ == '__main__':
App()
| [
"cohen.karnell@gmail.com"
] | cohen.karnell@gmail.com |
105ba1f6775de7f1b066de7bcf5b3977007dfca9 | 851763767750eea0565b46a339cee37c1273b457 | /Interview Questions/LinkedList/IntersectionPointTwoLinkedList.py | ccd068dd9751c34704630b385017c8ae544406f6 | [] | no_license | sunamya/Data-Structures-in-Python | f076e4b2febe24fee31b05b83574e4e1f344014e | 6c3eec7a4184b93bb18f54071bc0232cb0a76a08 | refs/heads/main | 2023-06-21T02:54:23.652347 | 2021-07-18T16:35:12 | 2021-07-18T16:35:12 | 379,672,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,503 | py | #User function Template for python3
'''
Function to return the value at point of intersection
in two linked list, connected in y shaped form.
Function Arguments: head_a, head_b (heads of both the lists)
Return Type: value in NODE present at the point of intersection
or -1 if no common point.
Contributed By: Nagendra Jha
{
# Node Class
class Node:
def __init__(self, data): # data -> value stored in node
self.data = data
self.next = None
}
'''
def size(node):
cnt=0
while node:
cnt+=1
node=node.next
return cnt
def npo(head1,head2,d):
for i in range(d):
head1=head1.next
while head1 and head2:
if head1==head2:
return head1.data
head1=head1.next
head2=head2.next
return None
#Function to find intersection point in Y shaped Linked Lists.
def intersetPoint(head1,head2):
#code here
diff=size(head1)-size(head2)
if diff<0: #Second list is bigger
return npo(head2,head1,diff)
else:
return npo(head1,head2,diff)
#Another approach using hasking
#Function to find intersection point in Y shaped Linked Lists.
def intersetPoint(head1,head2):
nodes=set()
#code here
while head1:
nodes.add(head1)
head1 = head1.next
# now traverse the second list and find the first node that is
# already present in the set
while head2:
# return the current node if it is found in the set
if head2 in nodes:
return head2.data
head2=head2.next
# we reach here if lists do not intersect
return None
#{
# Driver Code Starts
#Initial Template for Python 3
#Contributed by : Nagendra Jha
import atexit
import io
import sys
_INPUT_LINES = sys.stdin.read().splitlines()
input = iter(_INPUT_LINES).__next__
_OUTPUT_BUFFER = io.StringIO()
sys.stdout = _OUTPUT_BUFFER
@atexit.register
def write():
sys.__stdout__.write(_OUTPUT_BUFFER.getvalue())
# Node Class
class Node:
def __init__(self, data): # data -> value stored in node
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
temp=None
# creates a new node with given value and appends it at the end of the linked list
def append(self, new_node):
if self.head is None:
self.head = new_node
self.temp = self.head
return
else:
self.temp.next = new_node
self.temp = self.temp.next
if __name__ == '__main__':
t=int(input())
for cases in range(t):
x,y,z = map(int,input().strip().split())
a = LinkedList() # create a new linked list 'a'.
b = LinkedList() # create a new linked list 'b'.
nodes_a = list(map(int, input().strip().split()))
nodes_b = list(map(int, input().strip().split()))
nodes_common = list(map(int, input().strip().split()))
for x in nodes_a:
node=Node(x)
a.append(node) # add to the end of the list
for x in nodes_b:
node=Node(x)
b.append(node) # add to the end of the list
for i in range(len(nodes_common)):
node=Node(nodes_common[i])
a.append(node) # add to the end of the list a
if i== 0:
b.append(node) # add to the end of the list b, only the intersection
print(intersetPoint(a.head,b.head))
# } Driver Code Ends | [
"sunamyagupta@gmail.com"
] | sunamyagupta@gmail.com |
c70b445d6d1bb1da816fcacacadb68decd13d563 | b424a13f032d5a607e6df4dd78bc47ad1d06a147 | /astroquery/simbad/tests/test_simbad.py | fe66d82dc76fea148ff9163e36a89ec61940870a | [] | no_license | EnjoyLifeFund/macSierra-py36-pkgs | 1e7eeb9b55415da6eb12465d67730d76e9cc619a | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | refs/heads/master | 2021-01-20T10:23:50.044019 | 2017-09-05T02:53:26 | 2017-09-05T02:53:26 | 90,333,987 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 17,899 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import re
from astropy.extern import six
import pytest
import astropy.units as u
from astropy.table import Table
import numpy as np
from ... import simbad
from ...utils.testing_tools import MockResponse
from ...utils import commons
from ...exceptions import TableParseError
from .test_simbad_remote import multicoords
GALACTIC_COORDS = commons.GalacticCoordGenerator(l=-67.02084, b=-29.75447,
unit=(u.deg, u.deg))
ICRS_COORDS = commons.ICRSCoordGenerator("05h35m17.3s -05h23m28s")
FK4_COORDS = commons.FK4CoordGenerator(ra=84.90759, dec=-80.89403,
unit=(u.deg, u.deg))
FK5_COORDS = commons.FK5CoordGenerator(ra=83.82207, dec=-80.86667,
unit=(u.deg, u.deg))
DATA_FILES = {
'id': 'query_id.data',
'coo': 'query_coo.data',
'cat': 'query_cat.data',
'bibobj': 'query_bibobj.data',
'bibcode': 'query_bibcode.data',
'objectids': 'query_objectids.data',
'error': 'query_error.data',
'sample': 'query_sample.data',
'region': 'query_sample_region.data',
}
class MockResponseSimbad(MockResponse):
query_regex = re.compile(r'query\s+([a-z]+)\s+')
def __init__(self, script, cache=True, **kwargs):
# preserve, e.g., headers
super(MockResponseSimbad, self).__init__(**kwargs)
self.content = self.get_content(script)
def get_content(self, script):
match = self.query_regex.search(script)
if match:
filename = DATA_FILES[match.group(1)]
content = open(data_path(filename), "rb").read()
return content
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
@pytest.fixture
def patch_post(request):
try:
mp = request.getfixturevalue("monkeypatch")
except AttributeError: # pytest < 3
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(simbad.SimbadClass, '_request', post_mockreturn)
return mp
def post_mockreturn(self, method, url, data, timeout, **kwargs):
response = MockResponseSimbad(data['script'], **kwargs)
class last_query(object):
pass
self._last_query = last_query()
self._last_query.data = data
return response
@pytest.mark.parametrize(('radius', 'expected_radius'),
[('5d0m0s', '5.0d'),
('5d', '5.0d'),
('5.0d', '5.0d'),
(5 * u.deg, '5.0d'),
(5.0 * u.deg, '5.0d'),
(1.2 * u.deg, '1.2d'),
(0.5 * u.deg, '30.0m'),
('0d1m12s', '1.2m'),
(0.003 * u.deg, '10.8s'),
('0d0m15s', '15.0s')
])
def test_parse_radius(radius, expected_radius):
actual = simbad.core._parse_radius(radius)
assert actual == expected_radius
@pytest.mark.parametrize(('ra', 'dec', 'expected_ra', 'expected_dec'),
[(ICRS_COORDS.ra, ICRS_COORDS.dec, u'5:35:17.3',
u'-80:52:00')
])
def test_to_simbad_format(ra, dec, expected_ra, expected_dec):
actual_ra, actual_dec = simbad.core._to_simbad_format(ra, dec)
assert (actual_ra, actual_dec) == (expected_ra, expected_dec)
@pytest.mark.parametrize(('coordinates', 'expected_frame'),
[(GALACTIC_COORDS, 'GAL'),
(ICRS_COORDS, 'ICRS'),
(FK4_COORDS, 'FK4'),
(FK5_COORDS, 'FK5')
])
def test_get_frame_coordinates(coordinates, expected_frame):
actual_frame = simbad.core._get_frame_coords(coordinates)[2]
assert actual_frame == expected_frame
if actual_frame == 'GAL':
l, b = simbad.core._get_frame_coords(coordinates)[:2]
np.testing.assert_almost_equal(float(l) % 360, -67.02084 % 360)
np.testing.assert_almost_equal(float(b), -29.75447)
def test_parse_result():
result1 = simbad.core.Simbad._parse_result(
MockResponseSimbad('query id '), simbad.core.SimbadVOTableResult)
assert isinstance(result1, Table)
with pytest.raises(TableParseError) as ex:
simbad.core.Simbad._parse_result(MockResponseSimbad('query error '),
simbad.core.SimbadVOTableResult)
assert str(ex.value) == ('Failed to parse SIMBAD result! The raw response '
'can be found in self.last_response, and the '
'error in self.last_table_parse_error. '
'The attempted parsed result is in '
'self.last_parsed_result.\n Exception: 7:115: '
'no element found')
assert isinstance(simbad.Simbad.last_response.text, six.string_types)
assert isinstance(simbad.Simbad.last_response.content, six.binary_type)
votable_fields = ",".join(simbad.core.Simbad.get_votable_fields())
@pytest.mark.parametrize(('args', 'kwargs', 'expected_script'),
[(["m [0-9]"], dict(wildcard=True,
caller='query_object_async'),
("\nvotable {" + votable_fields + "}\n"
"votable open\n"
"query id wildcard m [0-9] \n"
"votable close"
)),
(["2006ApJ"], dict(caller='query_bibcode_async',
get_raw=True),
("\n\nquery bibcode 2006ApJ \n"))
])
def test_args_to_payload(args, kwargs, expected_script):
script = simbad.Simbad._args_to_payload(*args, **kwargs)['script']
assert script == expected_script
@pytest.mark.parametrize(('epoch', 'equinox'),
[(2000, 'thousand'),
('J-2000', None),
(None, '10e3b')
])
def test_validation(epoch, equinox):
with pytest.raises(ValueError):
# only one of these has to raise an exception
if equinox is not None:
simbad.core.validate_equinox(equinox)
if epoch is not None:
simbad.core.validate_epoch(epoch)
@pytest.mark.parametrize(('bibcode', 'wildcard'),
[('2006ApJ*', True),
('2005A&A.430.165F', None)
])
def test_query_bibcode_async(patch_post, bibcode, wildcard):
response1 = simbad.core.Simbad.query_bibcode_async(bibcode,
wildcard=wildcard)
response2 = simbad.core.Simbad().query_bibcode_async(bibcode,
wildcard=wildcard)
assert response1 is not None and response2 is not None
assert response1.content == response2.content
def test_query_bibcode_class(patch_post):
result1 = simbad.core.Simbad.query_bibcode("2006ApJ*", wildcard=True)
assert isinstance(result1, Table)
def test_query_bibcode_instance(patch_post):
S = simbad.core.Simbad()
result2 = S.query_bibcode("2006ApJ*", wildcard=True)
assert isinstance(result2, Table)
def test_query_objectids_async(patch_post):
response1 = simbad.core.Simbad.query_objectids_async('Polaris')
response2 = simbad.core.Simbad().query_objectids_async('Polaris')
assert response1 is not None and response2 is not None
assert response1.content == response2.content
def test_query_objectids(patch_post):
result1 = simbad.core.Simbad.query_objectids('Polaris')
result2 = simbad.core.Simbad().query_objectids('Polaris')
assert isinstance(result1, Table)
assert isinstance(result2, Table)
def test_query_bibobj_async(patch_post):
response1 = simbad.core.Simbad.query_bibobj_async('2005A&A.430.165F')
response2 = simbad.core.Simbad().query_bibobj_async('2005A&A.430.165F')
assert response1 is not None and response2 is not None
assert response1.content == response2.content
def test_query_bibobj(patch_post):
result1 = simbad.core.Simbad.query_bibobj('2005A&A.430.165F')
result2 = simbad.core.Simbad().query_bibobj('2005A&A.430.165F')
assert isinstance(result1, Table)
assert isinstance(result2, Table)
def test_query_catalog_async(patch_post):
response1 = simbad.core.Simbad.query_catalog_async('m')
response2 = simbad.core.Simbad().query_catalog_async('m')
assert response1 is not None and response2 is not None
assert response1.content == response2.content
def test_query_catalog(patch_post):
result1 = simbad.core.Simbad.query_catalog('m')
result2 = simbad.core.Simbad().query_catalog('m')
assert isinstance(result1, Table)
assert isinstance(result2, Table)
@pytest.mark.parametrize(('coordinates', 'radius', 'equinox', 'epoch'),
[(ICRS_COORDS, None, 2000.0, 'J2000'),
(GALACTIC_COORDS, 5 * u.deg, 2000.0, 'J2000'),
(FK4_COORDS, '5d0m0s', 2000.0, 'J2000'),
(FK5_COORDS, None, 2000.0, 'J2000'),
(multicoords, 0.5*u.arcsec, 2000.0, 'J2000'),
])
def test_query_region_async(patch_post, coordinates, radius, equinox, epoch):
response1 = simbad.core.Simbad.query_region_async(
coordinates, radius=radius, equinox=equinox, epoch=epoch)
response2 = simbad.core.Simbad().query_region_async(
coordinates, radius=radius, equinox=equinox, epoch=epoch)
assert response1 is not None and response2 is not None
assert response1.content == response2.content
@pytest.mark.parametrize(('coordinates', 'radius', 'equinox', 'epoch'),
[(ICRS_COORDS, None, 2000.0, 'J2000'),
(GALACTIC_COORDS, 5 * u.deg, 2000.0, 'J2000'),
(FK4_COORDS, '5d0m0s', 2000.0, 'J2000'),
(FK5_COORDS, None, 2000.0, 'J2000')
])
def test_query_region(patch_post, coordinates, radius, equinox, epoch):
result1 = simbad.core.Simbad.query_region(coordinates, radius=radius,
equinox=equinox, epoch=epoch)
result2 = simbad.core.Simbad().query_region(coordinates, radius=radius,
equinox=equinox, epoch=epoch)
assert isinstance(result1, Table)
assert isinstance(result2, Table)
@pytest.mark.parametrize(('coordinates', 'radius', 'equinox', 'epoch'),
[(ICRS_COORDS, 0, 2000.0, 'J2000')])
def test_query_region_radius_error(patch_post, coordinates, radius,
equinox, epoch):
with pytest.raises(u.UnitsError):
simbad.core.Simbad.query_region(
coordinates, radius=radius, equinox=equinox, epoch=epoch)
with pytest.raises(u.UnitsError):
simbad.core.Simbad().query_region(
coordinates, radius=radius, equinox=equinox, epoch=epoch)
@pytest.mark.parametrize(('coordinates', 'radius', 'equinox', 'epoch'),
[(ICRS_COORDS, "0d", 2000.0, 'J2000'),
(GALACTIC_COORDS, 1.0 * u.marcsec, 2000.0, 'J2000')
])
def test_query_region_small_radius(patch_post, coordinates, radius,
equinox, epoch):
result1 = simbad.core.Simbad.query_region(coordinates, radius=radius,
equinox=equinox, epoch=epoch)
result2 = simbad.core.Simbad().query_region(coordinates, radius=radius,
equinox=equinox, epoch=epoch)
assert isinstance(result1, Table)
assert isinstance(result2, Table)
@pytest.mark.parametrize(('object_name', 'wildcard'),
[("m1", None),
("m [0-9]", True)
])
def test_query_object_async(patch_post, object_name, wildcard):
response1 = simbad.core.Simbad.query_object_async(object_name,
wildcard=wildcard)
response2 = simbad.core.Simbad().query_object_async(object_name,
wildcard=wildcard)
assert response1 is not None and response2 is not None
assert response1.content == response2.content
@pytest.mark.parametrize(('object_name', 'wildcard'),
[("m1", None),
("m [0-9]", True),
])
def test_query_object(patch_post, object_name, wildcard):
result1 = simbad.core.Simbad.query_object(object_name,
wildcard=wildcard)
result2 = simbad.core.Simbad().query_object(object_name,
wildcard=wildcard)
assert isinstance(result1, Table)
assert isinstance(result2, Table)
def test_list_votable_fields():
simbad.core.Simbad.list_votable_fields()
simbad.core.Simbad().list_votable_fields()
def test_get_field_description():
simbad.core.Simbad.get_field_description('bibcodelist(y1-y2)')
simbad.core.Simbad().get_field_description('bibcodelist(y1-y2)')
with pytest.raises(Exception):
simbad.core.Simbad.get_field_description('xyz')
def test_votable_fields():
simbad.core.Simbad.add_votable_fields('rot', 'ze', 'z')
assert (set(simbad.core.Simbad.get_votable_fields()) ==
set(['main_id', 'coordinates', 'rot', 'ze', 'z']))
try:
simbad.core.Simbad.add_votable_fields('z')
except KeyError:
pass # this is the expected response
assert (set(simbad.core.Simbad.get_votable_fields()) ==
set(['main_id', 'coordinates', 'rot', 'ze', 'z']))
simbad.core.Simbad.remove_votable_fields('rot', 'main_id', 'coordinates')
assert set(simbad.core.Simbad.get_votable_fields()) == set(['ze', 'z'])
simbad.core.Simbad.remove_votable_fields('rot', 'main_id', 'coordinates')
assert set(simbad.core.Simbad.get_votable_fields()) == set(['ze', 'z'])
simbad.core.Simbad.remove_votable_fields('ze', 'z')
assert (set(simbad.core.Simbad.get_votable_fields()) ==
set(['main_id', 'coordinates']))
simbad.core.Simbad.add_votable_fields('rot', 'ze', 'z')
simbad.core.Simbad.reset_votable_fields()
assert (set(simbad.core.Simbad.get_votable_fields()) ==
set(['main_id', 'coordinates']))
def test_query_criteria1(patch_post):
Simbad = simbad.core.Simbad()
result = Simbad.query_criteria(
"region(box, GAL, 49.89 -0.3, 0.5d 0.5d)", otype='HII')
assert isinstance(result, Table)
assert "region(box, GAL, 49.89 -0.3, 0.5d 0.5d)" in Simbad._last_query.data['script']
def test_query_criteria2(patch_post):
S = simbad.core.Simbad()
S.add_votable_fields('ra(d)', 'dec(d)')
S.remove_votable_fields('coordinates')
assert S.get_votable_fields() == ['main_id', 'ra(d)', 'dec(d)']
result = S.query_criteria(otype='SNR')
assert isinstance(result, Table)
assert 'otype=SNR' in S._last_query.data['script']
def test_simbad_settings1():
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
simbad.core.Simbad.add_votable_fields('ra', 'dec(5)')
simbad.core.Simbad.remove_votable_fields('ra', 'dec')
assert (simbad.Simbad.get_votable_fields() ==
['main_id', 'coordinates', 'dec(5)'])
simbad.core.Simbad.reset_votable_fields()
def test_simbad_settings2():
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
simbad.core.Simbad.add_votable_fields('ra', 'dec(5)')
simbad.core.Simbad.remove_votable_fields('ra', 'dec', strip_params=True)
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
def test_regression_votablesettings():
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
simbad.core.Simbad.add_votable_fields('ra', 'dec(5)')
# this is now allowed:
simbad.core.Simbad.add_votable_fields('ra(d)', 'dec(d)')
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates',
'ra', 'dec(5)', 'ra(d)',
'dec(d)']
# cleanup
simbad.core.Simbad.remove_votable_fields('ra', 'dec', strip_params=True)
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
def test_regression_votablesettings2():
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
simbad.core.Simbad.add_votable_fields('fluxdata(J)')
simbad.core.Simbad.add_votable_fields('fluxdata(H)')
simbad.core.Simbad.add_votable_fields('fluxdata(K)')
assert (simbad.Simbad.get_votable_fields() ==
['main_id', 'coordinates',
'fluxdata(J)', 'fluxdata(H)', 'fluxdata(K)'])
simbad.core.Simbad.remove_votable_fields('fluxdata', strip_params=True)
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
def test_regression_issue388():
# This is a python-3 issue: content needs to be decoded?
response = MockResponseSimbad('\nvotable {main_id,coordinates}\nvotable '
'open\nquery id m1 \nvotable close')
with open(data_path('m1.data'), "rb") as f:
response.content = f.read()
parsed_table = simbad.Simbad._parse_result(response,
simbad.core.SimbadVOTableResult)
assert parsed_table['MAIN_ID'][0] == b'M 1'
assert len(parsed_table) == 1
| [
"raliclo@gmail.com"
] | raliclo@gmail.com |
fdece734fd20e95f571e250da5d3d0bb56e27b8a | 5b029c81490df1cd2988108ed23e71aca40a9816 | /MusicPredictiveAnalysis_EE660_USCFall2015-master/Code/Machine_Learning_Algos/10k_Tests/ml_classification_simple_pca.py | a9f5f7a9d11effe0c958f66898e4d78676120bad | [
"MIT"
] | permissive | lianghaol/Machine-Learning | e9b9ea009629ee1a0e24a0e55d90a3ed3c92ec05 | e3353252ca54b62ff8f5ada87566bab4f373c260 | refs/heads/master | 2021-12-10T19:19:34.914637 | 2016-09-16T02:48:30 | 2016-09-16T02:48:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,835 | py | __author__ = 'NishantNath'
# !/usr/bin/env python
'''
Using : Python 2.7+ (backward compatibility exists for Python 3.x if separate environment created)
Required files : hdf5_getters.py
Required packages : numpy, pandas, matplotlib, sklearn
Steps:
1.
# Uses Simple PCA to find the most important features
# Uses Simple PCA Iteratively to find performance based on number of components
'''
import pandas
import matplotlib.pyplot as mpyplot
import pylab
import numpy
from itertools import cycle
def plot_2D(data, target, target_names):
colors = cycle('rgbcmykw')
target_ids = range(len(target_names))
mpyplot.figure()
for i, c, label in zip(target_ids, colors, target_names):
mpyplot.scatter(data[target == i, 0], data[target == i, 1],c=c, label=label)
mpyplot.legend()
# mpyplot.show(p)
# [0: 'CLASSICAL', 1: 'METAL', 2: 'HIPHOP', 3: 'DANCE', 4: 'JAZZ']
# [5:'FOLK', 6: 'SOUL', 7: 'ROCK', 8: 'POP', 9: 'BLUES']
col_input=['genre', 'year', 'col1', 'col2', 'col3', 'col4', 'col5', 'col6', 'col7', 'col8', 'col9', 'col10', 'col11', 'col12', 'col13', 'col14', 'col15', 'col16', 'col17', 'col18', 'col19', 'col20', 'col21', 'col22', 'col23', 'col24', 'col25', 'col26', 'col27', 'col28', 'col29', 'col30', 'col31', 'col32', 'col33', 'col34', 'col35', 'col36', 'col37', 'col38', 'col39', 'col40', 'col41', 'col42', 'col43', 'col44', 'col45', 'col46', 'col47', 'col48', 'col49', 'col50', 'col51', 'col52', 'col53', 'col54', 'col55', 'col56', 'col57', 'col58', 'col59', 'col60', 'col61', 'col62', 'col63', 'col64', 'col65', 'col66', 'col67', 'col68', 'col69', 'col70', 'col71', 'col72']
df_input = pandas.read_csv('pandas_output_missing_data_fixed.csv', header=None, delimiter = ",", names=col_input)
# range(2,74) means its goes from col 2 to col 73
df_input_data = df_input[list(range(2, 74))]
df_input_target = df_input[list(range(0, 1))]
colors = numpy.random.rand(len(df_input_target))
# Simple PCA
from sklearn.decomposition import PCA
pca = PCA(n_components=6) #from optimal pca components chart n_components=6
pca.fit(df_input_data)
# Relative weights on features
print pca.explained_variance_ratio_
print pca.components_
# performance of number of components vs variance
pca2 = PCA().fit(df_input_data)
# Plotting Simple PCA
mpyplot.figure(1)
p1 = mpyplot.plot(numpy.cumsum(pca2.explained_variance_ratio_))
mpyplot.xlabel('number of components')
mpyplot.ylabel('cumulative explained variance')
mpyplot.show(p1)
# Reduced Feature Set
df_input_data_reduced = pca.transform(df_input_data)
# Plotting Reduced Feature Set
mpyplot.figure(2)
p2 = mpyplot.scatter(df_input_data_reduced[:, 0], df_input_data_reduced[:, 1], c=colors)
mpyplot.colorbar(p2)
mpyplot.show(p2)
# Plotting in 2D - fix this
mpyplot.figure(3)
plot_2D(df_input_data_reduced, df_input_target, pandas.unique(df_input_target)) | [
"ozbekahmetcan@gmail.com"
] | ozbekahmetcan@gmail.com |
f8f8a93e2b53a4b74d0c41930fd04e417f2189c8 | 2f418a0f2fcca40f84ec0863b31ff974b574350c | /scripts/addons_extern/cut_mesh-master/op_slice/slice_datastructure.py | 6c86f20d47db1178d36c9ecde0f011a0e1296f6c | [] | no_license | JT-a/blenderpython279 | 57a81b55564218f3b1417c2ffa97f5161897ec79 | 04846c82f794c22f87d677d9eb8cec1d05c48cda | refs/heads/master | 2021-06-25T06:58:07.670613 | 2017-09-11T11:14:36 | 2017-09-11T11:14:36 | 103,723,697 | 4 | 2 | null | 2017-09-16T04:09:31 | 2017-09-16T04:09:31 | null | UTF-8 | Python | false | false | 7,750 | py | '''
Created on Oct 8, 2015
@author: Patrick
'''
import time
import bpy
import bmesh
from mathutils import Vector, Matrix, kdtree
from mathutils.bvhtree import BVHTree
from mathutils.geometry import intersect_point_line, intersect_line_plane
from bpy_extras import view3d_utils
from ..bmesh_fns import grow_selection_to_find_face, flood_selection_faces, edge_loops_from_bmedges
from ..cut_algorithms import cross_section_2seeds_ver1, path_between_2_points
from ..geodesic import geodesic_walk, continue_geodesic_walk, gradient_descent
from .. import common_drawing
class Slice(object):
'''
A class which manages user placed points on an object to create a
piecewise path of geodesics, adapted to the objects surface.
'''
def __init__(self,context, cut_object):
self.cut_ob = cut_object
self.bme = bmesh.new()
self.bme.from_mesh(cut_object.data)
self.bme.verts.ensure_lookup_table()
self.bme.edges.ensure_lookup_table()
self.bme.faces.ensure_lookup_table()
#non_tris = [f for f in self.bme.faces if len(f.verts) > 3]
#bmesh.ops.triangulate(self.bme, faces = non_tris, quad_method = 0, ngon_method = 0)
#non_tris = [f for f in self.bme.faces if len(f.verts) > 3]
#if len(non_tris):
#geom = bmesh.ops.connect_verts_concave(self.bme, non_tris)
self.bme.verts.ensure_lookup_table()
self.bme.edges.ensure_lookup_table()
self.bme.faces.ensure_lookup_table()
self.bvh = BVHTree.FromBMesh(self.bme)
self.seed = None
self.seed_loc = None
self.target = None
self.target_loc = None
self.path = []
def reset_vars(self):
'''
'''
self.seed = None
self.seed_loc = None
self.target = None
self.target_loc = None
self.geo_data = [dict(), set(), set(), set()] #geos, fixed, close, far
self.path = []
def grab_initiate(self):
if self.target != None :
self.grab_undo_loc = self.target_loc
self.target_undo = self.target
self.path_undo = self.path
return True
else:
return False
def grab_mouse_move(self,context,x,y):
region = context.region
rv3d = context.region_data
coord = x, y
view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)
ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, coord)
ray_target = ray_origin + (view_vector * 1000)
mx = self.cut_ob.matrix_world
imx = mx.inverted()
if bversion() < '002.077.000':
loc, no, face_ind = self.cut_ob.ray_cast(imx * ray_origin, imx * ray_target)
else:
res, loc, no, face_ind = self.cut_ob.ray_cast(imx * ray_origin, imx * ray_target - imx * ray_origin)
loc2, no2, face_ind2, d = self.bvh.ray_cast(imx * ray_origin, view_vector)
if loc != None and loc2 != None:
print((loc - loc2).length)
if face_ind == -1:
self.grab_cancel()
return
self.target = self.bme.faces[face_ind]
self.target_loc = loc
vrts, eds, ed_cross, f_cross, error = path_between_2_points(self.bme, self.bvh, mx,mx* self.seed_loc,mx*self.target_loc,
max_tests = 10000, debug = True,
prev_face = None, use_limit = True)
if not error:
self.path = vrts
#else:
#self.path = []
def grab_cancel(self):
self.target_loc = self.grab_undo_loc
self.target = self.target_undo
self.path = self.path_undo
return
def grab_confirm(self):
self.grab_undo_loc = None
self.target_undo = None
self.path_undo = []
return
def click_add_seed(self,context,x,y):
'''
x,y = event.mouse_region_x, event.mouse_region_y
this will add a point into the bezier curve or
close the curve into a cyclic curve
'''
region = context.region
rv3d = context.region_data
coord = x, y
view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)
ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, coord)
ray_target = ray_origin + (view_vector * 1000)
mx = self.cut_ob.matrix_world
imx = mx.inverted()
if bversion() < '002.077.000':
loc, no, face_ind = self.cut_ob.ray_cast(imx * ray_origin, imx * ray_target)
else:
res, loc, no, face_ind = self.cut_ob.ray_cast(imx * ray_origin, imx * ray_target - imx * ray_origin)
if face_ind == -1:
self.selected = -1
return
self.seed = self.bme.faces[face_ind]
self.seed_loc = loc
self.geo_data = [dict(), set(), set(), set()]
def click_add_target(self, context, x, y):
region = context.region
rv3d = context.region_data
coord = x, y
view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)
ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, coord)
ray_target = ray_origin + (view_vector * 1000)
mx = self.cut_ob.matrix_world
imx = mx.inverted()
if bversion() < '002.077.000':
loc, no, face_ind = self.cut_ob.ray_cast(imx * ray_origin, imx * ray_target)
else:
res, loc, no, face_ind = self.cut_ob.ray_cast(imx * ray_origin, imx * ray_target - imx * ray_origin)
if face_ind == -1: return
self.target = self.bme.faces[face_ind]
self.target_loc = loc
vrts, eds, ed_cross, f_cross, error = path_between_2_points(self.bme, self.bvh, mx,mx* self.seed_loc,mx*self.target_loc,
max_tests = 10000, debug = True,
prev_face = None, use_limit = True)
if not error:
self.path = vrts
else:
self.path = []
return
def draw(self,context):
if len(self.path):
mx = self.cut_ob.matrix_world
pts = [mx * v for v in self.path]
common_drawing.draw_polyline_from_3dpoints(context, pts, (.2,.1,.8,1), 3, 'GL_LINE')
if self.seed_loc != None:
mx = self.cut_ob.matrix_world
common_drawing.draw_3d_points(context, [mx * self.seed_loc], 8, color = (1,0,0,1))
if self.target_loc != None:
mx = self.cut_ob.matrix_world
common_drawing.draw_3d_points(context, [mx * self.target_loc], 8, color = (0,1,0,1))
class PolyCutPoint(object):
def __init__(self,co):
self.co = co
self.no = None
self.face = None
self.face_region = set()
def find_closest_non_manifold(self):
return None
class NonManifoldEndpoint(object):
def __init__(self,co, ed):
if len(ed.link_faces) != 1:
return None
self.co = co
self.ed = ed
self.face = ed.link_faces[0]
| [
"meta.androcto1@gmail.com"
] | meta.androcto1@gmail.com |
82a251d9f29c640dcd3ba0e0881292074c885c57 | caeedf133282db88bb11d5a4ae6fb20fee609103 | /AdvpythonDay3/demomutliprocessing/pshttpclient.py | d97fe5bdfba00f99929584fd95bea398c6a04b01 | [] | no_license | Shital-andhalkar/Advance_python_course | 81190bfdaaf6b4da1f43b592ebe496f65461a05c | d49e25efe52ae13713108572493f15cb4d96ea9d | refs/heads/master | 2020-04-03T20:36:25.822808 | 2018-11-01T07:07:33 | 2018-11-01T07:07:33 | 155,551,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | py | import requests
import multiprocessing
from requests.exceptions import ConnectionError
def web_crawler(q):
""""""
try:
p_name=multiprocessing.current_process().name
url = q.get()
payload=requests.get(url).content
print("{}:{}:{}".format(p_name,url,payload[:128]))
except ConnectionError as err:
print(err)
def main():
"""parent process"""
queue_obj= multiprocessing.Queue()
urls=['http://python.org','http://linux.org', 'http://kernel.org/', 'http://google.com']
for url in urls:
child=multiprocessing.Process(target=web_crawler,args=(queue_obj,))
child.start()
for url in urls:
queue_obj.put(url)#add urls in to queue
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | Shital-andhalkar.noreply@github.com |
42cb4acff470deea4ca9e3b4cc937a546a964c39 | dc75ed733ecd023aebc2989c1f956ca575bd9e14 | /load_testing/mixed_tomcat.py | 2828e6f44380488c2e1649f73646d64b6a838316 | [] | no_license | deven810/Web-Projects | fd5e9f1e5fd9c348f4731219052e6f680638e311 | 4e8cc3f889d95bf3a37513291da50bae7a704918 | refs/heads/master | 2020-04-17T04:55:20.054883 | 2018-12-05T22:38:29 | 2018-12-05T22:38:29 | 166,253,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | # mixed_tomcat.py
import sys, random
from locust import HttpLocust, TaskSet
def readRequest(locust):
postid = random.randint(1, 500)
locust.client.get('/editor/post?action=open&username=cs144&postid='+str(postid), name='/editor/post?action=open')
def writeRequest(locust):
postid = random.randint(1, 500)
locust.client.post('/editor/post?action=open&username=cs144&postid='+str(postid)+'&title=Loading%20Test&body=***Hello%20World!***',
name='/editor/post?action=save')
class MyTaskSet(TaskSet):
""" the class MyTaskSet inherits from the class TaskSet, defining the behavior of the user """
tasks = {writeRequest:1, readRequest:9}
class MyLocust(HttpLocust):
""" the class MyLocust inherits from the class HttpLocust, representing an HTTP user """
task_set = MyTaskSet
min_wait = 1000
max_wait = 2000 | [
"devenagrawal.810@gmail.com"
] | devenagrawal.810@gmail.com |
ce51ad1ecc38aea688ceb967a158f3a5b6e99f01 | ed12b8d91b207d4bb5cd5bf114184e08c4a9237c | /pe033.py | bab1aa10b62766d72e4407e8447b1d4db87c8972 | [] | no_license | Rynant/project-euler | 544b4b48dda63913abf7d61201fe3ea0961b118f | c19090a6e0e8db3422c47dcce0fb886840493428 | refs/heads/master | 2021-01-10T20:43:16.782124 | 2014-06-11T18:11:28 | 2014-06-11T18:11:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | from primes import gcd
def answer():
numerator = denominator = 1
for i in range(10, 100):
if not i % 10: continue
for j in range(i+1, 100):
if not j % 10: continue
k, l = str(i), str(j)
if(k.find(l[0]) >= 0):
k, l = float(k[(k.find(l[0])+1)%2]), float(l[1])
elif(k.find(l[1]) >= 0):
k, l = float(k[(k.find(l[1])+1)%2]), float(l[0])
else: continue
if(i / j == k / l):
numerator *= k
denominator *= l
return denominator / gcd(numerator, denominator)
if __name__=='__main__':
print(answer()) | [
"rgrant@garnethill.com"
] | rgrant@garnethill.com |
90e46fcc82a3f160f8cd2bcfbc49f9442619ab7d | 10b43efca8647c86ac0ea9df1dd8368db5dff931 | /gen_data.py | 0d5b03686f6b71f86f251a59605ce223d2bdd756 | [
"MIT"
] | permissive | ruiyangio/latency-graph | ed0244c87d9b6d3d3bff7fa4aaaca1a07f9e40e2 | ba0414b11c31f565a5ca41b29e1d0aad9e545aa2 | refs/heads/master | 2020-03-25T03:36:41.982072 | 2018-08-03T04:20:27 | 2018-08-03T04:20:27 | 143,350,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | import csv
import random
import numpy as np
import string
nodes = []
for i in range(120):
nodes.append(''.join((random.choice(string.ascii_uppercase), random.choice(string.ascii_uppercase), random.choice(string.digits))))
edges = []
for i in range(15000):
edges.append( random.choice(nodes) + "\t" + random.choice(nodes) + "\t" + str(np.random.uniform(400)) + "\n" )
with open('data.tsv', 'w') as file:
for edge in edges:
file.write(edge) | [
"ruiyangwind@gmail.com"
] | ruiyangwind@gmail.com |
33a16862ec2f40db072c68c1e4c243096bce805a | abb614790bdf41c7db9d09dfdea4385f78c2be52 | /rtk-RQA/rtk/hardware/component/connection/Socket.py | c1454c5a9c43e324ac69b5e3c374fd2decff5864 | [
"BSD-3-Clause"
] | permissive | codacy-badger/rtk | f981bb75aadef6aaeb5a6fa427d0a3a158626a2a | bdb9392164b0b32b0da53f8632cbe6e3be808b12 | refs/heads/master | 2020-03-19T02:46:10.320241 | 2017-10-26T20:08:12 | 2017-10-26T20:08:12 | 135,659,105 | 0 | 0 | null | 2018-06-01T02:43:23 | 2018-06-01T02:43:23 | null | UTF-8 | Python | false | false | 5,321 | py | #!/usr/bin/env python
"""
######################################################
Hardware.Component.Connection Package IC Socket Module
######################################################
"""
# -*- coding: utf-8 -*-
#
# rtk.hardware.component.connection.Socket.py is part of the RTK
# Project
#
# All rights reserved.
import gettext
import locale
try:
import Configuration
import Utilities
from hardware.component.connection.Connection import Model as Connection
except ImportError: # pragma: no cover
import rtk.Configuration as Configuration
import rtk.Utilities as Utilities
from rtk.hardware.component.connection.Connection import Model as \
Connection
__author__ = 'Andrew Rowland'
__email__ = 'andrew.rowland@reliaqual.com'
__organization__ = 'ReliaQual Associates, LLC'
__copyright__ = 'Copyright 2007 - 2015 Andrew "weibullguy" Rowland'
# Add localization support.
try:
locale.setlocale(locale.LC_ALL, Configuration.LOCALE)
except locale.Error: # pragma: no cover
locale.setlocale(locale.LC_ALL, '')
_ = gettext.gettext
class Socket(Connection):
"""
The Socket connection data model contains the attributes and methods of an
IC socket connection component. The attributes of an IC socket connection
are:
:cvar int subcategory: the Connection subcategory.
:ivar float base_hr: the MIL-HDBK-217FN2 base/generic hazard rate.
:ivar str reason: the reason(s) the Connection is overstressed.
:ivar float piE: the MIL-HDBK-217FN2 operating environment factor.
Hazard Rate Models:
# MIL-HDBK-217FN2, section 15.3.
"""
# MIL-HDBK-217FN2 hazard rate calculation variables.
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
_piQ = [1.0, 2.0]
_piE = [1.0, 3.0, 14.0, 6.0, 18.0, 8.0, 12.0, 11.0, 13.0, 25.0, 0.5, 14.0,
36.0, 650.0]
_lambdab_count = [0.0019, 0.0058, 0.027, 0.012, 0.035, 0.015, 0.023, 0.021,
0.025, 0.048, 0.00097, 0.027, 0.070, 1.3]
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
subcategory = 74 # Subcategory ID in the common DB.
def __init__(self):
"""
Method to initialize a IC Socket connection data model instance.
"""
super(Socket, self).__init__()
# Define private dictionary attributes.
# Define private list attributes.
# Define private scalar attributes.
# Define public dictionary attributes.
# Define public list attributes.
# Define public scalar attributes.
self.n_active_contacts = 0
self.piP = 0.0
self.base_hr = 0.00042
def set_attributes(self, values):
"""
Method to set the Multi-Pin Connection data model attributes.
:param tuple values: tuple of values to assign to the instance
attributes.
:return: (_code, _msg); the error code and error message.
:rtype: tuple
"""
_code = 0
_msg = ''
(_code, _msg) = Connection.set_attributes(self, values[:133])
try:
self.base_hr = 0.00042
self.piP = float(values[133])
self.n_active_contacts = int(values[134])
except IndexError as _err:
_code = Utilities.error_handler(_err.args)
_msg = "ERROR: Insufficient input values."
except(TypeError, ValueError) as _err:
_code = Utilities.error_handler(_err.args)
_msg = "ERROR: Converting one or more inputs to correct data type."
return(_code, _msg)
def get_attributes(self):
"""
Method to retrieve the current values of the Multi-Pin Connection data
model attributes.
:return: (n_active_contacts, piP)
:rtype: tuple
"""
_values = Connection.get_attributes(self)
_values = _values + (self.piP, self.n_active_contacts)
return _values
def calculate_part(self):
"""
Method to calculate the hazard rate for the Multi-Pin Connection data
model.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
from math import exp
self.hazard_rate_model = {}
if self.hazard_rate_type == 1:
self.hazard_rate_model['equation'] = 'lambdab * piQ'
# Quality factor.
self.piQ = self._piQ[self.quality - 1]
elif self.hazard_rate_type == 2:
self.hazard_rate_model['equation'] = 'lambdab * piE * piP'
# Active pins correction factor.
if self.n_active_contacts >= 2:
self.piP = exp(((self.n_active_contacts - 1) / 10.0)**0.51064)
else:
self.piP = 0.0
self.hazard_rate_model['piP'] = self.piP
# Environmental correction factor.
self.piE = self._piE[self.environment_active - 1]
return Connection.calculate_part(self)
| [
"arowland@localhost.localdomain"
] | arowland@localhost.localdomain |
4f2d80280f1710eb34ef81a47bbccef522f62c15 | af5f6d99a23711725ccf0431a62ca37b96acccf3 | /manage.py | 0b1d71a459fa7ee0fc39cd93cffd8801fdc5a6cf | [
"MIT"
] | permissive | nimowairimu/Django-IP1 | daa58f53c1d94e7d1d8dcbbead081be506c75343 | 0def901a0a2f34f644ed42bd4d1c8f883743cffe | refs/heads/main | 2023-03-23T17:21:50.203551 | 2021-03-23T17:23:30 | 2021-03-23T17:23:30 | 348,981,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "vetdaily.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"nimowairimu25@gmail.com"
] | nimowairimu25@gmail.com |
a0e34d34734d4acd75e8ed1f3ea57119148e7c08 | 2b5cb00bda71b5e76843baa84a9ce1ca6be9e13b | /clustering/acquire_zillow.py | 930e20b94a5ce08c8b410da6e0c4e28d9207feef | [] | no_license | CodyBrettWatson/ds-methodologies-exercises | 07e851b2d08c6c889db4bd849d2d12cc4cc97ecc | 902e880b31d2b76eedca8d80ff0de9e0aa0dcd0f | refs/heads/master | 2020-05-02T15:29:39.784446 | 2019-05-20T12:58:06 | 2019-05-20T12:58:06 | 178,042,617 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,613 | py | ##########################################
## WILL NEED TO PIPE ALL THESE FUNCTIONS##
##########################################
# Getting data from SQL databases
from env import host, user, password
import pandas as pd
from sqlalchemy import create_engine
def get_db_url(
host: str, user: str, password: str, db_name: str
) -> str:
"""
return url for accessing a mysql database
"""
return f"mysql+pymysql://{user}:{password}@{host}/{db_name}"
def get_sql_conn(host: str, user: str, password: str, db_name: str):
"""
return a mysql connection object
"""
return create_engine(get_db_url(host, user, password, db_name))
def df_from_sql(query: str, url: str) -> pd.DataFrame:
"""
return a Pandas DataFrame resulting from a sql query
"""
return pd.read_sql(query, url)
def get_zillow_data() -> pd.DataFrame:
idb = "zillow"
query = ("SELECT * "
"FROM properties_2016 "
"JOIN properties_2017 USING(parcelid);")
url = get_db_url(host, user, password, idb)
return df_from_sql(query, url)
def get_2016_zillow():
idb = "zillow"
query = ('\
SELECT p16.*, pred16.logerror, act.airconditioningdesc, ast.architecturalstyledesc, \
bct.buildingclassdesc, hst.heatingorsystemdesc, plut.propertylandusedesc, \
st.storydesc, tct.typeconstructiondesc FROM properties_2016 p16 \
JOIN predictions_2016 pred16 \
ON pred16.parcelid = p16.parcelid \
LEFT JOIN airconditioningtype act \
ON p16.airconditioningtypeid = act.airconditioningtypeid\
LEFT JOIN architecturalstyletype ast \
ON p16.architecturalstyletypeid = ast.architecturalstyletypeid\
LEFT JOIN buildingclasstype bct \
ON p16.buildingclasstypeid = bct.buildingclasstypeid\
LEFT JOIN heatingorsystemtype hst \
ON p16.heatingorsystemtypeid = hst.heatingorsystemtypeid\
LEFT JOIN propertylandusetype plut \
ON p16.propertylandusetypeid = plut.propertylandusetypeid\
LEFT JOIN storytype st \
ON p16.storytypeid = st.storytypeid\
LEFT JOIN typeconstructiontype tct \
ON p16.typeconstructiontypeid = tct.typeconstructiontypeid;')
url = get_db_url(host, user, password, idb)
return df_from_sql(query, url)
def get_2017_zillow():
idb = "zillow"
query = ('\
SELECT p17.*, pred17.logerror, act.airconditioningdesc, ast.architecturalstyledesc, \
bct.buildingclassdesc, hst.heatingorsystemdesc, plut.propertylandusedesc, \
st.storydesc, tct.typeconstructiondesc FROM properties_2017 p17 \
JOIN predictions_2017 pred17 \
ON pred17.parcelid = p17.parcelid \
LEFT JOIN airconditioningtype act \
ON p17.airconditioningtypeid = act.airconditioningtypeid\
LEFT JOIN architecturalstyletype ast \
ON p17.architecturalstyletypeid = ast.architecturalstyletypeid\
LEFT JOIN buildingclasstype bct \
ON p17.buildingclasstypeid = bct.buildingclasstypeid\
LEFT JOIN heatingorsystemtype hst \
ON p17.heatingorsystemtypeid = hst.heatingorsystemtypeid\
LEFT JOIN propertylandusetype plut \
ON p17.propertylandusetypeid = plut.propertylandusetypeid\
LEFT JOIN storytype st \
ON p17.storytypeid = st.storytypeid\
LEFT JOIN typeconstructiontype tct \
ON p17.typeconstructiontypeid = tct.typeconstructiontypeid;')
url = get_db_url(host, user, password, idb)
return df_from_sql(query, url)
def merge_dfs():
df16 = get_2016_zillow()
df17 = get_2017_zillow()
df = pd.concat([df16, df17])
return df
def turn_to_csv():
df = merge_dfs()
df.to_csv('zillow_16_17.csv', sep='\t', index=False)
def drop_columns(df):
df = df.drop(columns=(['id',
'airconditioningtypeid',
'architecturalstyletypeid',
'buildingclasstypeid',
'buildingqualitytypeid',
'decktypeid',
'heatingorsystemtypeid',
'propertylandusetypeid',
'storytypeid',
'typeconstructiontypeid']))
return df
def reindex_df (df):
df = df.reindex(columns=[
'parcelid','logerror',
'bathroomcnt','bedroomcnt','calculatedbathnbr','fullbathcnt','roomcnt',
'calculatedfinishedsquarefeet','lotsizesquarefeet',
'unitcnt','propertylandusedesc','propertycountylandusecode','propertyzoningdesc',
'latitude','longitude','regionidcity','regionidcounty','fips','regionidneighborhood','regionidzip',
'yearbuilt',
'structuretaxvaluedollarcnt','taxvaluedollarcnt','landtaxvaluedollarcnt','taxamount','assessmentyear',
'rawcensustractandblock','censustractandblock',
'airconditioningdesc','heatingorsystemdesc',
'garagecarcnt','garagetotalsqft',
'basementsqft',
'finishedfloor1squarefeet','finishedsquarefeet12','finishedsquarefeet13',
'finishedsquarefeet15','finishedsquarefeet50','finishedsquarefeet6',
'fireplacecnt','hashottuborspa',
'poolcnt','poolsizesum','pooltypeid10','pooltypeid2','pooltypeid7',
'threequarterbathnbr',
'yardbuildingsqft17','yardbuildingsqft26',
'fireplaceflag',
'taxdelinquencyflag','taxdelinquencyyear',
'architecturalstyledesc',
'buildingclassdesc',
'numberofstories',
'storydesc',
'typeconstructiondesc',
])
return df
| [
"codywatson@codys-MacBook-Pro.local"
] | codywatson@codys-MacBook-Pro.local |
4c54b23822c77598fc8746f24f4c1bf18cdad087 | d9fb6c246965cbf290186268298859ddb913ee6e | /190813/03_mod.py | 3a21a5da1950eb762f029d3aa591e49c9be98f49 | [] | no_license | 91hongppie/algorithm | 1ca6d54de6eab252c708bf83835ace8a109d73fc | 4c2fa8178e0ef7afbf0b736387f05cbada72f95d | refs/heads/master | 2020-07-20T22:17:40.700366 | 2020-06-29T00:06:11 | 2020-06-29T00:06:11 | 206,717,677 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | import sys
sys.stdin = open('sample_input_03.txt', 'r')
N = int(input())
for i in range(1, N+1):
play = list(map(int, input().split()))
test_words = [[] for i in range(play[0])]
for j in range(play[0]):
test_words[j] = list(map(str, input()))
for m in range(play[0]):
for n in range(play[0]):
mo_list = test_words[m][n:play[0]:] | [
"91hongppie@gmail.com"
] | 91hongppie@gmail.com |
f407b06b2595c41745867f79d0c2bf69dedc166e | 638fa52ac8fc9439f3ad06682c98c21646baf317 | /LatinGlyphs.py | 30d438949c7076016bea617653261187307fa34f | [
"Apache-2.0"
] | permissive | DunwichType/mixer | 68c1965de0a30b34ce9e852fe9e7323e9cb2f6eb | 7b82ba851fdb5b3aa26092d5d54d5e5e47f9b8a1 | refs/heads/master | 2021-01-10T17:10:20.760847 | 2015-10-19T22:30:52 | 2015-10-19T22:30:52 | 44,567,912 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,631 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#Lists of Latin Glyphs for use with Mixer
# Basic Latin Alphabet
majbasic = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
minbasic = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
allbasic = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# DTF Latin Character Set
majuscules = [u'A', u'À', u'Á', u'Â', u'Ã', u'Ä', u'Å', u'Æ', u'Ā', u'Ă', u'Ą', u'Æ', u'Ǽ', u'Z', u'B', u'C', u'Ç', u'Ć', u'Ĉ', u'Ċ', u'Č', u'D', u'Ď', u'Đ', u'E', u'È', u'É', u'Ê', u'Ë', u'Ē', u'Ĕ', u'Ė', u'Ę', u'Ě', u'F', u'G', u'Ĝ', u'Ğ', u'Ġ', u'Ģ', u'H', u'Ĥ', u'Ħ', u'I', u'Ì', u'Í', u'Î', u'Ï', u'Ĩ', u'Ī', u'Ĭ', u'Į', u'J', u'Ĵ', u'K', u'Ķ', u'L', u'Ĺ', u'Ļ', u'Ľ', u'Ł', u'Ŀ', u'M', u'N', u'Ń', u'Ņ', u'Ň', u'Ŋ', u'Ñ', u'Ò', u'Ó', u'Ô', u'Õ', u'Ö', u'Ō', u'Ŏ', u'Ő', u'Ø', u'Ǿ', u'Œ', u'P', u'Q', u'Þ', u'R', u'Ŕ', u'Ř', u'Ŗ', u'S', u'Ś', u'Ŝ', u'Ş', u'Š', u'Ș', u'T', u'Ţ', u'Ť', u'Ŧ', u'Ț', u'U', u'Ù', u'Ú', u'Û', u'Ü', u'Ũ', u'Ū', u'Ŭ', u'Ů', u'Ű', u'Ų', u'V', u'W', u'Ŵ', u'Ẁ', u'Ẃ', u'Ẅ', u'X', u'Y', u'Ý', u'Ŷ', u'Ÿ', u'Z', u'Ź', u'Ż', u'Ž']
minuscules = [u'a', u'à', u'á', u'â', u'ã', u'ä', u'å', u'æ', u'ā', u'ă', u'ą', u'æ', u'ǽ', u'b', u'v', u'ç', u'ć', u'ĉ', u'ċ', u'č', u'd', u'ď', u'đ', u'e', u'è', u'é', u'ê', u'ë', u'ē', u'ĕ', u'ė', u'ę', u'ě', u'f', u'g', u'ĝ', u'ğ', u'ġ', u'ģ', u'h', u'ĥ', u'ħ', u'i', u'ì', u'í', u'î', u'ï', u'ĩ', u'ī', u'ĭ', u'į', u'j', u'ĵ', u'k', u'ķ', u'l', u'ĺ', u'ļ', u'ľ', u'ł', u'ŀ', u'm', u'n', u'ń', u'ņ', u'ň', u'ŋ', u'ñ', u'o', u'ò', u'ó', u'ô', u'õ', u'ö', u'ō', u'ŏ', u'ő', u'ø', u'ǿ', u'œ', u'p', u'þ', u'q', u'r', u'ŕ', u'ř', u'ŗ', u's', u'ś', u'ŝ', u'ş', u'š', u'ș', u'ß', u't', u'ţ', u'ť', u'ŧ', u'ț', u'u', u'ù', u'ú', u'û', u'ü', u'ũ', u'ū', u'ŭ', u'ů', u'ű', u'ų', u'v', u'w', u'ŵ', u'ẁ', u'ẃ', u'ẅ', u'x', u'y', u'ý', u'ŷ', u'ÿ', u'z', u'ź', u'ż', u'ž']
# Punctuation
basicpunct = [u'.', u',', u'\"', u'!', u'?', u'&']
punct = [u'.', u'…', u',', u':', u';', u'?', u'¿', u'!', u'¡', u'\'', u'\"', u'‘', u'’', u'‚', u'“', u'”', u'„', u'‹', u'›', u'«', u'»', u'-', u'–', u'—', u'_', u'†', u'‡', u'•', u'*', u'©', u'®', u'™', u'@', u'¶', u'(', u')', u'[', u']', u'{', u'}', u'/', u'\\', u'|']
# Numbers
currency = [u'#', u'%', u'&', u'¢', u'$', u'£', u'¥', u'ƒ', u'€']
numerals = [u'0', u'1', u'2', u'3', u'4', u'5', u'6', u'7', u'8', u'9']
prebuilt = [u'½', u'¼', u'¾', u'⅓', u'⅔', u'⅛', u'⅜', u'⅝']
math = [u'<', u'+', u'−', u'=', u'÷', u'×', u'>', u'±', u'^', u'~', u'|', u'¦', u'§', u'°', u'ª', u'º', u'%']
fractions = [u'½', u'¼', u'¾', u'⅓', u'⅔', u'⅛', u'⅜', u'⅝']
# Prototyping
adhesion = [u'a', u'd', u'h', u'e', u's', u'i', u'o', u'n']
ADHESION = [u'A', u'D', u'H', u'E', u'S', u'I', u'O', u'N']
handgloves = [u'h', u'a', u'n', u'd', u'g', u'l', u'o', u'v', u'e', u's']
HANDGLOVES = [u'H', u'A', u'N', u'D', u'G', u'L', u'O', u'V', u'E', u'S']
hamburgefontivs = [u'h', u'a', u'm', u'b', u'u', u'r', u'g', u'e', u'f', u'o', u'n', u't', u'i', u'v', u's']
HAMBURGEFONTIVS = [u'H', u'A', u'M', u'B', u'U', u'R', u'G', u'E', u'F', u'O', u'N', u'T', u'I', u'V', u'S']
#Latin Extended B
majLatinXB = [u'Ɓ', u'Ƃ', u'Ƅ', u'Ɔ', u'Ƈ', u'Ɖ', u'Ɗ', u'Ƌ', u'Ǝ', u'Ə', u'Ɛ', u'Ƒ', u'Ɠ', u'Ɣ', u'Ɩ', u'Ɨ', u'Ƙ', u'Ɯ', u'Ɲ', u'Ɵ', u'Ơ', u'Ƣ', u'Ƥ', u'Ʀ', u'Ƨ', u'Ʃ', u'ƪ', u'Ƭ', u'Ʈ', u'Ư', u'Ʊ', u'Ʋ', u'Ƴ', u'Ƶ', u'Ʒ', u'Ƹ', u'ƻ', u'Ƽ', u'ǀ', u'ǁ', u'ǂ', u'ǃ', u'DŽ', u'Dž', u'LJ', u'Lj', u'NJ', u'Nj', u'Ǎ', u'Ǐ', u'Ǒ', u'Ǔ', u'Ǖ', u'Ǘ', u'Ǚ', u'Ǜ', u'Ǟ', u'Ǡ', u'Ǣ', u'Ǥ', u'Ǧ', u'Ǩ', u'Ǫ', u'Ǭ', u'Ǯ', u'DZ', u'Dz', u'Ǵ', u'Ƕ', u'Ƿ', u'Ǹ', u'Ǻ', u'Ǽ', u'Ȁ', u'Ȃ', u'Ȅ', u'Ȇ', u'Ȉ', u'Ȋ', u'Ȍ', u'Ȏ', u'Ȑ', u'Ȓ', u'Ȕ', u'Ȗ', u'Ș', u'Ț', u'Ȝ', u'Ȟ', u'Ƞ', u'Ȣ', u'Ȥ', u'Ȧ', u'Ȩ', u'Ȫ', u'Ȭ', u'Ȯ', u'Ȱ', u'Ȳ', u'Ⱥ', u'Ȼ', u'Ƚ', u'Ⱦ', u'Ɂ', u'Ƀ', u'Ʉ', u'Ʌ', u'Ɇ', u'Ɉ', u'Ɋ', u'Ɍ', u'Ɏ']
minusLatinXB = [u'ƀ', u'ƃ', u'ƅ', u'ƈ', u'ƌ', u'ƍ', u'ƕ', u'ƙ', u'ƚ', u'ƛ', u'ơ', u'ƣ', u'ƥ', u'ƨ', u'ƫ', u'ƭ', u'ư', u'ƴ', u'ƶ', u'ƹ', u'ƺ', u'ƽ', u'ƾ', u'ƿ', u'dž', u'lj', u'nj', u'ǎ', u'ǐ', u'ǒ', u'ǔ', u'ǖ', u'ǘ', u'ǚ', u'ǜ', u'ǝ', u'ǟ', u'ǡ', u'ǣ', u'ǥ', u'ǧ', u'ǩ', u'ǫ', u'ǭ', u'ǯ', u'dz', u'ǵ', u'ǹ', u'ǻ', u'ǽ', u'ȁ', u'ȃ', u'ȅ', u'ȇ', u'ȉ', u'ȋ', u'ȍ', u'ȏ', u'ȑ', u'ȓ', u'ȕ', u'ȗ', u'ș', u'ț', u'ȝ', u'ȟ', u'ȡ', u'ȣ', u'ȥ', u'ȧ', u'ȩ', u'ȫ', u'ȭ', u'ȯ', u'ȱ', u'ȳ', u'ȴ', u'ȵ', u'ȶ', u'ȷ', u'ȸ', u'ȹ', u'ȼ', u'ȿ', u'ɀ', u'ɂ', u'ɇ', u'ɉ', u'ɋ', u'ɍ', u'ɏ']
#Control Characters
lc_control = [u'anon ', u'bnon ', u'cnon ', u'dnon ', u'enon ', u'fnon ', u'gnon ', u'hnon ', u'inon ', u'jnon ', u'knon ', u'lnon ', u'mnon ', u'nnon ', u'onon ', u'pnon ', u'qnon ', u'rnon ', u'snon ', u'tnon ', u'unon ', u'vnon ', u'wnon ', u'xnon ', u'ynon ', u'znon ']
controls = [u'H', u'O', u'h', u'n', u'o']
majcontrols = [u'H', u'O']
mincontrols = [u'h', u'o', u'n']
figcontrols = [u'0', u'1'] | [
"junker@dunwichtype.com"
] | junker@dunwichtype.com |
a90e5a8ba7a2476925946904b9c73c06d09cfda9 | e7c8bcd6156956123c0ffcd1e3603e8b0ba0fcf8 | /tickTacToe.py | 0da1e458ef3a0aadf53bbbd2573513c697c80919 | [] | no_license | flow0787/python | 421efa9c3541d618778e2f7cd3f3aceed946ee6e | 38a995568c10ff31c516d85877cb9b0ad9596a3a | refs/heads/master | 2020-04-06T06:55:46.278080 | 2020-02-12T08:32:32 | 2020-02-12T08:32:32 | 63,419,040 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | theBoard = {"topL": " ", "topM": " ", "topR": " ",
"midL": " ", "midM": " ", "midR": " ",
"lowL": " ", "lowM": " ", "lowR": " "}
def printBoard(board):
print(board['topL'] + '|' + board['topM'] + '|' + board['topR'])
print('-+-+-')
print(board['midL'] + '|' + board['midM'] + '|' + board['midR'])
print('-+-+-')
print(board['lowL'] + '|' + board['lowM'] + '|' + board['lowR'])
turn = "X"
for i in range(9):
printBoard(theBoard)
move = input("Turn for " + turn + ". Move on which space?")
theBoard[move] = turn
if turn == 'X':
turn = "O"
else:
turn = "X"
printBoard(theBoard) | [
"badeaflorien@gmail.com"
] | badeaflorien@gmail.com |
cd4f12206ec91523ba27cb33a771f3673c839cd1 | cc129db64fc64d1cb9a99526583771c10e245deb | /tests/test_det_next_michigan_development_corporation.py | da9a98ab1e31ab67be68a83440ae713aa016e955 | [
"MIT"
] | permissive | avelosa/city-scrapers-det | a42df36b7d2e98f7be68ae17e22c03af7a20280c | 964b941b67fb5113cda5e2bebd2ba288ac1422d7 | refs/heads/main | 2023-02-02T01:19:07.396737 | 2020-09-29T16:52:11 | 2020-09-29T16:52:11 | 300,441,174 | 1 | 0 | MIT | 2020-10-01T22:30:23 | 2020-10-01T22:30:22 | null | UTF-8 | Python | false | false | 4,826 | py | from datetime import datetime
from os.path import dirname, join
import pytest
import scrapy
from city_scrapers_core.constants import BOARD, PASSED, TENTATIVE
from city_scrapers_core.utils import file_response
from freezegun import freeze_time
from scrapy.settings import Settings
from city_scrapers.spiders.det_next_michigan_development_corporation import (
DetNextMichiganDevelopmentCorporationSpider,
)
LOCATION = {
"name": "DEGC, Guardian Building",
"address": "500 Griswold St, Suite 2200, Detroit, MI 48226",
}
TITLE = "Board of Directors"
test_response = file_response(
join(dirname(__file__), "files", "det_next_michigan_development_corporation.html"),
url="http://www.degc.org/public-authorities/d-nmdc/",
)
freezer = freeze_time("2018-07-26")
spider = DetNextMichiganDevelopmentCorporationSpider()
spider.settings = Settings(values={"CITY_SCRAPERS_ARCHIVE": False})
freezer.start()
parsed_items = [item for item in spider._next_meetings(test_response)]
freezer.stop()
def test_initial_request_count():
freezer.start()
items = list(spider.parse(test_response))
freezer.stop()
assert len(items) == 3
urls = {r.url for r in items if isinstance(r, scrapy.Request)}
assert urls == {
"http://www.degc.org/public-authorities/d-nmdc/fy-2017-2018-meetings/",
"http://www.degc.org/public-authorities/d-nmdc/dnmdc-fy-2016-2017-meetings/",
}
# current meeting http://www.degc.org/public-authorities/ldfa/
def test_title():
assert parsed_items[0]["title"] == TITLE
def test_description():
assert parsed_items[0]["description"] == ""
def test_start():
assert parsed_items[0]["start"] == datetime(2018, 9, 11, 9)
def test_end():
assert parsed_items[0]["end"] is None
def test_id():
assert (
parsed_items[0]["id"]
== "det_next_michigan_development_corporation/201809110900/x/board_of_directors"
)
def test_status():
assert parsed_items[0]["status"] == TENTATIVE
def test_location():
assert parsed_items[0]["location"] == LOCATION
def test_sources():
assert parsed_items[0]["source"] == "http://www.degc.org/public-authorities/d-nmdc/"
def test_links():
assert parsed_items[0]["links"] == []
@pytest.mark.parametrize("item", parsed_items)
def test_all_day(item):
assert item["all_day"] is False
@pytest.mark.parametrize("item", parsed_items)
def test_classification(item):
assert item["classification"] == BOARD
# previous meetings e.g.
# http://www.degc.org/public-authorities/ldfa/fy-2017-2018-meetings/
test_prev_response = file_response(
join(
dirname(__file__),
"files",
"det_next_michigan_development_corporation_prev.html",
),
url="http://www.degc.org/public-authorities/d-nmdc/dnmdc-fy-2016-2017-meetings",
)
freezer.start()
parsed_prev_items = [item for item in spider._parse_prev_meetings(test_prev_response)]
parsed_prev_items = sorted(parsed_prev_items, key=lambda x: x["start"], reverse=True)
freezer.stop()
def test_prev_request_count():
freezer.start()
items = list(spider._prev_meetings(test_response))
freezer.stop()
urls = {r.url for r in items if isinstance(r, scrapy.Request)}
assert len(urls) == 2
assert urls == {
"http://www.degc.org/public-authorities/d-nmdc/fy-2017-2018-meetings/",
"http://www.degc.org/public-authorities/d-nmdc/dnmdc-fy-2016-2017-meetings/",
}
def test_prev_meeting_count():
assert len(parsed_prev_items) == 1
def test_prev_title():
assert parsed_prev_items[0]["title"] == TITLE
def test_prev_description():
assert parsed_prev_items[0]["description"] == ""
def test_prev_start():
assert parsed_prev_items[0]["start"] == datetime(2017, 8, 8, 9)
def test_prev_end():
assert parsed_prev_items[0]["end"] is None
def test_prev_id():
assert (
parsed_prev_items[0]["id"]
== "det_next_michigan_development_corporation/201708080900/x/board_of_directors"
)
def test_prev_status():
assert parsed_prev_items[0]["status"] == PASSED
def test_prev_location():
assert parsed_prev_items[0]["location"] == LOCATION
def test_prev_source():
assert (
parsed_prev_items[0]["source"]
== "http://www.degc.org/public-authorities/d-nmdc/dnmdc-fy-2016-2017-meetings"
)
def test_prev_links():
assert parsed_prev_items[0]["links"] == [
{
"href": "http://www.degc.org/wp-content/uploads/2016-08-09-DNMDC-Special-Board-Meeting-Agenda-4-1.pdf", # noqa
"title": "D-NMDC Agenda",
},
]
@pytest.mark.parametrize("item", parsed_prev_items)
def test_prev_all_day(item):
assert item["all_day"] is False
@pytest.mark.parametrize("item", parsed_prev_items)
def test_prev_classification(item):
assert item["classification"] == BOARD
| [
"pjsier@gmail.com"
] | pjsier@gmail.com |
258e2deac675e627b1e12054d8f0b720e887f41f | aa0fc44d694f2b971bbda827c755296faa44d86f | /test/py2neo/index_test.py | f253623c2f494e062c1e5909427d941f1080a0bb | [
"Apache-2.0"
] | permissive | fugu13/py2neo | 8f6a5065883c7eb96bb0d32c45bce2a9533d19a5 | d3fa87199b51b554d1d04c7334d1bc7b887f7273 | refs/heads/master | 2021-01-17T04:52:58.719996 | 2012-08-01T20:44:43 | 2012-08-01T20:44:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,211 | py | #/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
PY3K = sys.version_info[0] >= 3
__author__ = "Nigel Small <py2neo@nigelsmall.org>"
__copyright__ = "Copyright 2011 Nigel Small"
__license__ = "Apache License, Version 2.0"
from py2neo import neo4j
import unittest
def default_graph_db():
return neo4j.GraphDatabaseService("http://localhost:7474/db/data/")
class NodeIndexTestCase(unittest.TestCase):
def setUp(self):
self.graph_db = default_graph_db()
def test_get_node_index(self):
index1 = self.graph_db.get_or_create_index(neo4j.Node, "index1")
self.assertIsNotNone(index1)
self.assertEqual("index1", index1.name)
self.assertEqual(neo4j.Node, index1.content_type)
def test_add_node_to_index(self):
index1 = self.graph_db.get_or_create_index(neo4j.Node, "index1")
index1.remove("surname", "Smith")
alice, = self.graph_db.create({"name": "Alice Smith"})
index1.add("surname", "Smith", alice)
entities = index1.get("surname", "Smith")
self.assertIsNotNone(entities)
self.assertTrue(isinstance(entities, list))
self.assertEqual(1, len(entities))
self.assertEqual(alice, entities[0])
def test_add_node_to_index_with_spaces(self):
index1 = self.graph_db.get_or_create_index(neo4j.Node, "index1")
index1.remove("family name", "von Schmidt")
alice, = self.graph_db.create({"name": "Alice von Schmidt"})
index1.add("family name", "von Schmidt", alice)
entities = index1.get("family name", "von Schmidt")
self.assertIsNotNone(entities)
self.assertTrue(isinstance(entities, list))
self.assertEqual(1, len(entities))
self.assertEqual(alice, entities[0])
def test_add_node_to_index_with_odd_chars(self):
index1 = self.graph_db.get_or_create_index(neo4j.Node, "index1")
index1.remove("@!%#", "!\"£$%^&*()")
alice = self.graph_db.create_node({"name": "Alice Smith"})
index1.add("@!%#", "!\"£$%^&*()", alice)
entities = index1.get("@!%#", "!\"£$%^&*()")
self.assertIsNotNone(entities)
self.assertTrue(isinstance(entities, list))
self.assertEqual(1, len(entities))
self.assertEqual(alice, entities[0])
def test_add_multiple_nodes_to_index(self):
index1 = self.graph_db.get_or_create_index(neo4j.Node, "index1")
index1.remove("surname", "Smith")
alice, bob, carol = self.graph_db.create(
{"name": "Alice Smith"},
{"name": "Bob Smith"},
{"name": "Carol Smith"}
)
index1.add("surname", "Smith", alice, bob, carol)
entities = index1.get("surname", "Smith")
self.assertIsNotNone(entities)
self.assertTrue(isinstance(entities, list))
self.assertEqual(3, len(entities))
for entity in entities:
self.assertTrue(entity in (alice, bob, carol))
def test_get_or_create_node(self):
index1 = self.graph_db.get_or_create_index(neo4j.Node, "index1")
index1.remove("surname", "Smith")
alice = index1.get_or_create("surname", "Smith", {"name": "Alice Smith"})
self.assertIsNotNone(alice)
self.assertTrue(isinstance(alice, neo4j.Node))
self.assertEqual("Alice Smith", alice["name"])
alice_id = alice.id
for i in range(10):
alice = index1.get_or_create("surname", "Smith", {"name": "Alice Smith"})
self.assertIsNotNone(alice)
self.assertTrue(isinstance(alice, neo4j.Node))
self.assertEqual("Alice Smith", alice["name"])
self.assertEqual(alice_id, alice.id)
def test_add_node_if_none(self):
index1 = self.graph_db.get_or_create_index(neo4j.Node, "index1")
index1.remove("surname", "Smith")
alice, bob = self.graph_db.create(
{"name": "Alice Smith"}, {"name": "Bob Smith"}
)
index1.add_if_none("surname", "Smith", alice)
entities = index1.get("surname", "Smith")
self.assertIsNotNone(entities)
self.assertTrue(isinstance(entities, list))
self.assertEqual(1, len(entities))
self.assertEqual(alice, entities[0])
index1.add_if_none("surname", "Smith", bob)
entities = index1.get("surname", "Smith")
self.assertIsNotNone(entities)
self.assertTrue(isinstance(entities, list))
self.assertEqual(1, len(entities))
self.assertEqual(alice, entities[0])
def test_node_index_query(self):
index1 = self.graph_db.get_or_create_index(neo4j.Node, "index1")
index1.remove("colour", "red")
index1.remove("colour", "green")
index1.remove("colour", "blue")
red, green, blue = self.graph_db.create({}, {}, {})
index1.add("colour", "red", red)
index1.add("colour", "green", green)
index1.add("colour", "blue", blue)
colours_containing_R = index1.query("colour:*r*")
self.assertTrue(red in colours_containing_R)
self.assertTrue(green in colours_containing_R)
self.assertFalse(blue in colours_containing_R)
if __name__ == '__main__':
unittest.main()
| [
"nigel@nigelsmall.name"
] | nigel@nigelsmall.name |
4212426d83cef5a31b6993b1859aa096f5a86957 | c7bb490ef96fda51a946478a4f584814e1665a6a | /backend/urls.py | 06c33f1ea3c2e43ed3c886400d353b67ec87d687 | [] | no_license | pawanpaudel93/motion-planning-dashboard | e70acc9737cdedf0fd0beac0a0700cc88f9c2559 | 642f5955d518747dfc14f1f22a93ef20784329d8 | refs/heads/master | 2023-03-11T14:33:31.643898 | 2021-02-28T11:26:16 | 2021-02-28T11:26:16 | 340,398,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | """MPD URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
"""
from django.contrib import admin
from django.urls import path, include, re_path
from rest_framework import routers
from .api import urls as api_urls
from .api.views import index_view
router = routers.DefaultRouter()
urlpatterns = [
path('api/v1/', include(api_urls)),
path('admin/', admin.site.urls),
re_path(r'^.*$', index_view, name='index')
]
| [
"pawanpaudel93@gmail.com"
] | pawanpaudel93@gmail.com |
966163ac218a00e186e1835d56634756e84143fb | cf72eced416ae5fee75e194b4da7728a00520c54 | /Chapter12SpreadSheetCellInverter.py | 1c1fd68ab2760d6d47d1bc1f4deba4c7b1928ea9 | [] | no_license | spencercorwin/automate-the-boring-stuff-answers | ed08080ec3c38a5cc84510e13995cb3cb95d5809 | e564658ae702672670e17c2989a58d75b9110d32 | refs/heads/master | 2021-09-19T02:50:39.541981 | 2018-07-22T22:09:13 | 2018-07-22T22:09:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | #! usr/bin/env python3
#Chapter 12 Challenge - Spreadsheet Cell Inverter
#This program inverts the row and column of cells in a spreadsheet
import os, openpyxl, pprint
os.chdir('/Users/spencercorwin/Desktop')
wb = openpyxl.load_workbook('testFile.xlsx')
sheet = wb.active
resultSheet = wb.create_sheet(index=2, title='resultSheet')
sheetData = []
for r in range(0, sheet.max_row):
sheetData.append([])
for c in range(0, sheet.max_column):
sheetData[r].append(sheet.cell(row = r+1, column = c+1).value)
for r in range(0, sheet.max_row):
for c in range(0, sheet.max_column):
resultSheet.cell(row = c+1, column = r+1).value = sheetData[r][c]
wb.save('myTestResult.xlsx')
| [
"noreply@github.com"
] | spencercorwin.noreply@github.com |
97cb339e8b6bfc2cd89e1ed0be47bb4f41d910d8 | 1feffdfcdc376ce007d64c911ebbe31826bf217e | /Core/CoreEngine.py | 9100c47f796a722de061abd7fb2c2871a97ad97e | [] | no_license | Zaladar/rpgmotor | 1c3bfae5c332351b0d69559575ebe5a8cd2fdc55 | dccc84f73008132be18a2a9c389d02d68b14b28d | refs/heads/master | 2020-12-30T11:38:56.071865 | 2017-10-20T12:32:47 | 2017-10-20T12:32:47 | 91,575,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,185 | py | #1 standard livraries
from random import *
#2 Third party
#3 Local
dices = {}
class Dice:
def __init__(self, name, sides, mod):
self.name = name
self.sides = int(sides)
self.mod = int (mod)
# här borde finnas decorator som kan applicera mod
def rolling(self, t):
#printout här kanske ej bör finnas?
print("rolling your dice!")
res = 0
i = 0
while i < t:
res += randint(1, self.sides)
return res
# what die exists
def storeddie(self):
if self.name in dices:
print("name: " + str(self.name))
print("sides: " + str(self.sides))
# to create new dies if there is a need for it
def DiceCreator(self):
print(' =[Dice Creator]=')
print('=[used to create dice]=')
print(' ')
sides = input('how many sides does your dice have?:')
rn = input("want to name them? default name is d" + sides)
if rn.lower() == "yes":
name = input("name:")
elif rn.lower() == "no":
print("ok")
name = rn
else:
print("invalid input")
#dices[name] = Dice(name, sides) dettak anske inte ska vara kvar som så, eftersom funktionen borde returnera en variand av Dice
print("dices:" + ",".join([x for x in dices.keys()]))
def sides(self, name):
sidesqq = int(input("what do you wish to set your sides to?"))
if isinstance(sidesqq, int):
dices[name] = Dice(name, sidesqq)
else:
print("incvalid input")
bootupseq()
def rename(self, name):
qq = input("This will change the name of the dice proceed?")
if qq == "yes":
nameqq = input(" what do you want to call these dice?")
dices[name] = Dice(nameqq, self.sides)
elif qq == "no":
print("okay, rebootinng")
bootupseq()
else:
print("invalid input")
bootupseq()
def DiceBase():
pd = [2, 3, 4, 6, 8, 10, 12, 20, 100]
for i in pd:
dice = {
"name": 'd' + str(i),
"sides": i,
}
dices[dice["name"]] = Dice(**dice)
print("db done")
# en ny funktion spel kontrol ska skapas och där ska man kunna initiera spel boot up ska bara kunna kalla på spelinitiering karaktärs och tärnings förändringar och information.
def Gameinit():
type = input("what kind of game do you wish to play, pathfinder or dark heresy?")
if type.lower() == "pathfinder":
print("pathfinder is being setup!")
elif type.lower() == "dark heresy":
print("dark heresy is being setup!")
else:
print("invalid input,returning to boot up sequence!")
bootupseq()
def bootupseq():
while True:
ans = input('what function do you want to start? type help for... help...:').lower()
if ans == 'dice creator':
Dice.DiceCreator()
elif ans == 'rolling':
name = input("what dice do you wish to use?")
if name in dices.keys():
Dice.rolling(dices[name])
else:
print("invalid input, dices doesn't exist! use dice creator")
elif ans == "existing die":
print("dices: " + ",".join([x for x in dices.keys()]))
req = input("do you want to look at any of the dice? yes/no:")
req = req.lower()
if req == "yes":
name = input("what dice?")
if name.lower() in dices.keys():
Dice.storedie(dices[name])
else:
print("not in dice")
elif ans == "game init":
qq = input("what game? Pathfinder or Dark heresy").lower()
if qq == ("pathfinder"|"dark heresy"):
Gameinit(qq)
elif ans == 'help':
print('lol noob')
print('functions available: Dice creator, Existing die, Game init and Rolling')
elif ans == 'break':
break
else:
print('invalid input')
print("input:" + ans)
DiceBase()
bootupseq() | [
"oscarjwhaglund@gmail.com"
] | oscarjwhaglund@gmail.com |
e8c4c60a57463e9f15f1b88dd4eda1629eea2dfc | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /2JHYavYqynX8ZCmMG_5.py | f3bd9ad800ee0f88625397c941672c01b7288b50 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py |
def ascii_sort(lst):
if sum([ord(x) for x in lst[0]]) <= sum([ord(x) for x in lst[1]]):
return lst[0]
return lst[1]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
0bc44e39ed3c0411a6484900df8dc4ccda28fa3a | 67b0379a12a60e9f26232b81047de3470c4a9ff9 | /profile/migrations/0042_auto_20170225_1639.py | 6f002bfd9f51f8ca97ff8153953db520d0afe6e9 | [] | no_license | vintkor/whitemandarin | 8ea9022b889fac718e0858873a07c586cf8da729 | 5afcfc5eef1bb1cc2febf519b04a4819a7b9648f | refs/heads/master | 2021-05-06T03:35:09.367375 | 2017-12-20T15:43:08 | 2017-12-20T15:43:08 | 114,904,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-02-25 14:39
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('profile', '0041_auto_20170217_1405'),
]
operations = [
migrations.AlterField(
model_name='user',
name='date_of_birth',
field=models.DateField(default=datetime.datetime(2017, 2, 25, 14, 39, 18, 342403, tzinfo=utc)),
),
]
| [
"alkv84@yandex.ru"
] | alkv84@yandex.ru |
272977aa9883b7e270a1e4aa51d6f4540f0c7ef8 | ada39040fa1e56fb7de6147ff62e6c8dee1f69bb | /Backend.py | 9d49833595073ea3e4fab7d29b580bc74cec35ea | [] | no_license | hendpraz/chatbot-pattern-matching | c18880fde6df9663768ee482c233e5804823b756 | 478c873f1e53398c6f37919cda9e0f77a0194a88 | refs/heads/master | 2020-05-09T21:09:52.091634 | 2019-04-25T01:13:50 | 2019-04-25T01:13:50 | 181,433,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,956 | py | #!/usr/bin/python
import sys
import re
from utils import stopwords, listSynonym, FAQs
#from Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory
#from ntlk.corpus import stopwords
#from ntlk.tokenize import word_tokenize
numOfQuestion = 0
questionDB = []
answerDB = []
#factory = StopWordRemoverFactory()
#stopwords = factory.get_stop_words()
# KNUTH MORRIS PRAT #
def bigThree(value,idxes):
#Mengembalikan indeks indeks dengan nilai terbesar
newIdxes = [0]*3
#SelectionSort
for i in range(3):
max = 0
maxIdx = -1
for j in range(i,len(idxes)):
if(value[j] > max):
max = value[j]
maxIdx = j
#Swap
temp = idxes[i]
idxes[i] = idxes[maxIdx]
idxes[maxIdx] = temp
temp = value[i]
value[i] = value[maxIdx]
value[maxIdx] = temp
for i in range(3):
newIdxes[i] = idxes[i]
return newIdxes
def borderFunctionKMP(str, m):
suffLen = 0
border = [0]*m
i = 1
while (i < m):
if (str[i] == str[suffLen]):
suffLen = suffLen + 1
border[i] = suffLen
i = i + 1
else:
if (suffLen != 0):
suffLen = border[suffLen - 1]
else:
border[i] = 0
i = i + 1
return border
def knuthMorrisPrat(string1, txt):
n = len(txt) #Dikurangi tanda tanya
m = len(string1)
match = False
wholeScore = m * 100 / n
if(wholeScore >= 90) and (wholeScore <= 110):
# Periksa seluruh string secara eksak
border = borderFunctionKMP(string1,m)
i = 0
j = 0
while (i < n):
if (string1[j] == txt[i]):
i = i + 1
j = j + 1
if (j == m):
#Pattern ditemukan
match = True
j = border[j-1]
elif (i < n) and (string1[j] != txt[i]):
#Tidak cocok, geser
if(j != 0):
j = border[j-1]
else:
i = i + 1
if(match):
return wholeScore
countMatch = 0
if(not match):
tokenizedString = string1.split()
totalLength = len(txt)
n = len(txt) - 1 # Dikurangi tanda tanya
for substring in tokenizedString:
#Cari setiap sinonimnya
listOfPattern = findSynonym(substring)
for pattern in listOfPattern:
m = len(pattern)
border = borderFunctionKMP(pattern,m)
patternMatch = False
i = 0
j = 0
while (i < n):
if (pattern[j] == txt[i]):
i = i + 1
j = j + 1
if (j == m):
#Pattern ditemukan
countMatch = countMatch + m + 1 #Ditambah sebuah spasi
j = border[j-1]
patternMatch = True
break #BreakWhile
elif (i < n) and (pattern[j] != txt[i]):
#Tidak cocok, geser
if(j != 0):
j = border[j-1]
else:
i = i + 1
if(patternMatch):
break #BreakFor
if(wholeScore <= 110):
return (countMatch * 100.0 / totalLength)
elif(countMatch > 0):
return (totalLength * 100.0 / countMatch)
else:
return 0
#Kemungkinan lain
return 0
def resultKMP(string):
#knuth-morris-Prat
max = 0
maxIdx = -1
countOfResult = 0
idxes = []
maxValues =[]
for i in range(numOfQuestion):
# Kode
x = knuthMorrisPrat(string,questionDB[i])
if(x >= 90):
#Ketemu
countOfResult = countOfResult + 1
maxValues.append(x)
idxes.append(i)
if(x > max):
max = x
maxIdx = i
if(countOfResult == 0):
if(maxIdx != -1):
idxes.append(maxIdx)
elif(countOfResult > 3):
idxes = bigThree(maxValues,idxes)
return ((countOfResult > 0), idxes)
# BOYER MOORE #
def badCharBM(string):
#Banyak jenis karakter = 256
#Diinisialisasi dengan -1
badChar = [-1]*256
m = len(string)
for i in range(m):
#Mengubah ke nilai char (tabel ASCII)
badChar[ord(string[i])] = i
return badChar
def boyerMoore(string1,txt):
n = len(txt) #Dikurangi tanda tanya
m = len(string1)
wholeScore = m * 100 / n
match = False
if(wholeScore >= 90) and (wholeScore <= 110):
# Seluruh string dicocokan
badChar = badCharBM(string1)
shift = 0
while(shift <= n-m):
j = m - 1
while(j >= 0) and (string1[j] == txt[shift+j]):
j = j - 1
if(j < 0):
# Pattern ditemukan
match = True
break #BreakWhile
else:
shift = shift + max(1, j-badChar[ord(txt[shift+j])])
if(match):
return wholeScore
if(not match):
#Per substring
tokenizedString = string1.split()
countMatch = 0
totalLength = len(txt)
n = len(txt) - 1
for substring in tokenizedString:
#Cari setiap sinonimnya
listOfPattern = findSynonym(substring)
patternMatch = False
for pattern in listOfPattern:
m = len(pattern)
badChar = badCharBM(pattern)
shift = 0
while(shift <= n-m):
j = m - 1
while(j >= 0) and (pattern[j] == txt[shift+j]):
j = j - 1
if(j < 0):
# Pattern ditemukan
countMatch = countMatch + m + 1 #Ditambah sebuah spasi
patternMatch = True
break #BreakWhile
else:
shift = shift + max(1, j-badChar[ord(txt[shift+j])])
if(patternMatch):
break #BreakFor
if(wholeScore <= 110):
return (countMatch * 100.0 / totalLength)
elif(countMatch > 0):
return (totalLength * 100.0 / countMatch)
else:
return 0
#kemungkinan lain
return 0
def resultBM(str):
#boyer moore
max = 0
maxIdx = -1
countOfResult = 0
idxes = []
maxValues = []
for i in range(numOfQuestion):
# Kode
x = boyerMoore(str,questionDB[i])
if(x >= 90):
#Ketemu
countOfResult = countOfResult + 1
idxes.append(i)
if(x > max):
max = x
maxIdx = i
if(countOfResult == 0):
if(maxIdx != -1):
idxes.append(maxIdx)
elif(countOfResult > 3):
idxes = bigThree(maxValues,idxes)
return ((countOfResult > 0), idxes)
# REGULAR EXPRESSION #
def buildString(tokenizedString, line, j):
stringBuilt = "(.*)"
for i in range(len(tokenizedString)):
if(i == j):
stringBuilt = stringBuilt + line + "(.*)"
else:
stringBuilt = stringBuilt + tokenizedString[i] + "(.*)"
def resultRegex(string):
#Regular expression
maxIdx = -1
max = 0
countOfResult = 0
idxes = []
maxValues = []
for i in range(numOfQuestion):
#Change this later
tokenizedString = string.split()
j = 0
for substring in tokenizedString:
substringSynonyms = findSynonym(substring)
for line in substringSynonyms:
pattern = buildString(tokenizedString, line, j)
x = re.search(string,questionDB[i],re.M|re.I)
if(x):
score = len(string) * 100.0 / len(questionDB[i])
if(score <= 110):
countOfResult += 1
maxValues.append(score)
idxes.append(i)
if(score > max):
max = score
break #BreakFor
if(x):
break #BreakFor
else:
j += 1
if(countOfResult == 0):
if(maxIdx != -1):
idxes.append(maxIdx)
elif(countOfResult > 3):
idxes = bigThree(maxValues,idxes)
return ((countOfResult > 0), idxes)
# OTHER FUNCTION
def otherFunc(string):
#other algorithm for pattern matching
max = 0
idx = -1
return (max, 0)
def initDB():
#Add questions and answers to database
global numOfQuestion
numOfQuestion = 1
questionDB.append("Siapa nama Anda")
answerDB.append("Aku Fluffball")
quest = open("pertanyaan.txt","r")
for line in quest:
numOfQuestion = numOfQuestion + 1
questString = line
questString = questString.replace("?","")
questString = removeStopWords(questString.strip()) + " "
questionDB.append(questString)
ans = open("jawaban.txt","r")
for line in ans:
answerDB.append(line.strip())
#print(questionDB)
#print(answerDB)
quest.close()
ans.close()
#Add FAQs
for tuple in FAQs:
numOfQuestion = numOfQuestion + 1
que, ans = tuple
questionDB.append(removeStopWords(que) + " ")
answerDB.append(ans)
def removeStopWords(string):
filteredString = ""
wordTokens = string.split()
found = False
for w in wordTokens:
if (w not in stopwords):
if(found):
filteredString = filteredString + " " + w
else:
filteredString = w
found = True
return filteredString
def findSynonym(string):
#Mencari sinonim dari suatu string
found = False
idx = -1
for listOfWords in listSynonym:
idx = idx + 1
for word in listOfWords:
if(string == word):
found = True
break
if(found):
break
if(found):
# Jika ada sinonimnya, kembalikan list of Synonym ke-idx
return listSynonym[idx]
else:
# Jika tidak ada sinonimnya, kembalikan list berisi string itu sendiri
listOneWord = []
listOneWord.append(string)
return listOneWord
def talk(string):
print("Fluffball : "+string)
# Main program #
def useKMP(string):
found, listHasil = resultKMP(string)
tampikanHasil(found,listHasil)
def useBM(string):
found, listHasil = resultBM(string)
tampikanHasil(found,listHasil)
def useRegex(string):
found, listHasil = resultRegex(string)
tampikanHasil(found,listHasil)
def tampikanHasil(found, listHasil):
if(found):
if(len(listHasil) == 1):
print(answerDB[listHasil[0]])
else: #len(listHasil) > 1
first = True
otp = ""
for i in listHasil:
if(first):
otp = questionDB[i].strip()+"?"
first = False
else:
otp = otp +", "+questionDB[i].strip()+"?"
print("Pilih pertanyaan ini : "+otp)
else:
otp = "Mungkin maksud Anda : "
if(len(listHasil) == 0):
#Kalo tidak ada isinya sama sekali
print("Saya tidak mengerti maksud Anda")
#print(otp + questionDB[0].strip()+"?)
else:
print(otp + questionDB[listHasil[0]]+"?")
def DebugAll():
initDB()
talk("Halo, ada yang bisa dibantu?")
talk("Pilih metode pencarian")
print("1. Knuth-Morris-Prat")
print("2. Boyer-Moore")
print("3. Regular expression")
choice = int(input("Anda : "))
while(True):
if(choice >= 1) and (choice <= 3):
string = str(input("Anda : "))
if(string == "end"):
break
string = string.replace("?","")
string = removeStopWords(string)
if(choice == 1):
useKMP(string)
elif(choice == 2):
useBM(string)
elif(choice == 3):
useRegex(string)
else:
talk("Invalid input!! Masukkan kembali pilihan Anda")
choice = int(input("Anda : "))
def Execute():
initDB()
chatLog = open("chatLog.txt","r")
for line in chatLog:
getQuestion = line
getQuestion = getQuestion.strip()
getQuestion = getQuestion.replace("?","")
getQuestion = removeStopWords(getQuestion)
if(sys.argv[1] == '1'):
useKMP(getQuestion)
elif(sys.argv[1] == '2'):
useBM(getQuestion)
elif(sys.argv[1] == '3'):
useRegex(getQuestion)
#DebugAll()
#DebugKMP()
#DebugBM()
#DebugRegex
Execute()
| [
"45161697+hendpraz@users.noreply.github.com"
] | 45161697+hendpraz@users.noreply.github.com |
c13f2224b2a218046e61fc2c3ce17a06270f3028 | d6a030dfacb63fcb7ede3b1ed97dd723e5aab478 | /plotfio.py | fad2a7454bd7ff152db230d88500db0c26b49071 | [] | no_license | ahlfors/FIO-scripts | 0adabf2ffc164de67b7757aeb7ed6a4eb999b711 | b8fcb4998d3516606da965f421a6f6f3c8957dc2 | refs/heads/master | 2023-03-16T14:53:40.037181 | 2019-03-18T19:32:32 | 2019-03-18T19:32:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,417 | py | #!/usr/bin/env python3
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import csv, argparse
matplotlib.use('Agg')
parser = argparse.ArgumentParser()
parser.add_argument('-f', dest="files", type=str, nargs='+', required=True, help='The out.txt files')
parser.add_argument('-l', dest="labels", type=str, nargs='+', required=True, help='Label for each curve')
parser.add_argument('-m', dest="markers", type=str, nargs='+', required=False, help='Marker for each curve')
parser.add_argument('-s', dest="scale", type=str, required=False, help='Scale of y-axis')
parser.add_argument('-x', dest="xlabel", type=str, required=True, help='Label of x-axis')
parser.add_argument('-y', dest="ylabel", type=str, required=True, help='Label of y-axis')
parser.add_argument('-o', dest="outputfolder", required=True, help="Ouput folder")
parser.add_argument('-n', dest="name", required=True, help="Name of output plot")
parser.add_argument('-t', dest="title", required=True, help="Title of output plot")
args = parser.parse_args()
scale = None
if args.scale is not None:
scale = args.scale
index = 0
for f in args.files:
with open(f, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
header = next(reader, None)
x, y = [], []
for row in reader:
x.append(int(row[0]))
if scale == "MB":
if "MB" in header[1]:
y.append(float(row[1]))
elif "KB" in header[1]:
y.append(float(row[1]) / float(1024))
elif scale == "GB":
if "MB" in header[1]:
y.append(float(row[1]) / float(1024))
elif "KB" in header[1]:
y.append(float(row[1]) / float(1024*1024))
else:
y.append(float(row[1]))
N = len(x)
x2 = np.arange(N)
if args.markers is not None:
plt.plot(x2, y, marker=args.markers[index], markersize=7, fillstyle='none', label=args.labels[index])
else:
plt.plot(x2, y, label=args.labels[index])
plt.xticks(x2, x, rotation=90)
index += 1
plt.xlabel(args.xlabel)
plt.ylabel(args.ylabel)
plt.xlim(left=0)
plt.ylim(bottom=0)
plt.title(args.title)
plt.legend()
plt.tight_layout()
#plt.show()
plt.savefig(args.outputfolder+"/"+args.name+".eps", format="eps")
| [
"batsarasnikos@gmail.com"
] | batsarasnikos@gmail.com |
99b49fca33ce2929cfd1a125527e1ee432ccfad4 | 42d58ba3005263744a04e6eb6e5a7e550b4eef29 | /Day2_Tip_Calculator.py | b63dca6d2e892a1ae8ef34186a247a15cec8473e | [] | no_license | ShaneNelsonCodes/100Days_Python | 8828ccfba0873084316f46bc98a53c603de452ae | 4cea2ba5fe0459a4676c7d164b5402dd244ff11b | refs/heads/main | 2023-02-12T14:01:10.726683 | 2021-01-12T13:59:04 | 2021-01-12T13:59:04 | 317,283,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | #If the bill was $150.00, split between 5 people, with 12% tip.
#Each person should pay (150.00 / 5) * 1.12 = 33.6
#Format the result to 2 decimal places = 33.60
#Tip: You might need to do some research in Google to figure out how to do this.
print("Welcome to the tip calculator\n")
bill = float(input("What was the total bill?\n$"))
tip = int(input("What percentage would you like to give? 10, 12, or 15\n%"))/100
split = int(input("How many people to split the bill?\n"))
amount = round(((bill * (1 + tip)) / split),2)
print(f"Each person should pay: ${amount}")
| [
"Shane.Nelson@kp.org"
] | Shane.Nelson@kp.org |
6eb0d30982c51c95fe8b185a70ce7a5e912cdd20 | 2da72c9f9bbb0b5db33710cddbdee28503e5a606 | /UCI/pyQT-matplot-example 2.py | 0228e2bce7c9d982c2ca7970f732c4860c0e6cc5 | [] | no_license | gddickinson/python_code | 2e71fb22b929cb26c2a1456b11dc515af048c441 | dbb20e171fb556e122350fb40e12cc76adbb9a66 | refs/heads/master | 2022-10-26T15:20:40.709820 | 2022-10-11T16:06:27 | 2022-10-11T16:06:27 | 44,060,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,689 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 23 16:50:19 2015
@author: George
"""
import sys
from PyQt4 import QtGui
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib import NavigationToolbar2QTAgg as NavigationToolbar
import matplotlib.pyplot as plt
import random
class Window(QtGui.QDialog):
def __init__(self, parent=None):
super(Window, self).__init__(parent)
# a figure instance to plot on
self.figure = plt.figure()
# this is the Canvas Widget that displays the `figure`
# it takes the `figure` instance as a parameter to __init__
self.canvas = FigureCanvas(self.figure)
# this is the Navigation widget
# it takes the Canvas widget and a parent
self.toolbar = NavigationToolbar(self.canvas, self)
# Just some button connected to `plot` method
self.button = QtGui.QPushButton('Plot')
self.button.clicked.connect(self.plot)
# set the layout
layout = QtGui.QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
layout.addWidget(self.button)
self.setLayout(layout)
def plot(self):
''' plot some random stuff '''
# random data
data = [random.random() for i in range(10)]
# create an axis
ax = self.figure.add_subplot(111)
# discards the old graph
ax.hold(False)
# plot data
ax.plot(data, '*-')
# refresh canvas
self.canvas.draw()
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
main = Window()
main.show()
sys.exit(app.exec_()) | [
"george.dickinson@gmail.com"
] | george.dickinson@gmail.com |
bf5bf6f46c12b9cda5cfa83050001a0e72113069 | 79e45fa0e495be5aa967b21467771a27970df99b | /178.py | c1be11f9ea4f1241ffac13ed1324ae98de88788d | [] | no_license | kisa77/Crawl | 7d3b6d7077e60a47f5336b2976b226646cfe2cdb | ec25b563007481b169165f06ca04560d64a1ea74 | refs/heads/master | 2016-09-02T11:19:14.152200 | 2013-07-16T10:12:09 | 2013-07-16T10:12:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,896 | py | #/usr/bin/python
#coding=utf8
import urllib2,urlparse
import os,sys,bs4,chardet,MySQLdb
import re
from datetime import datetime
from HTMLParser import HTMLParser
class Crawl:
""" Class Crawl crawl data from db.178.com """
__data = ''
__connect = ''
__retryMax = 3
def __init__(self):
self.connect(c_host='localhost', c_user='root', c_passwd='root12')
def request_url(self,url):
try:
request = urllib2.Request(url)
request.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) \ AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 ')
return urllib2.urlopen(request)
except CrawlError as e:
return
def write_file(self,file_name, content_list):
file = open(file_name, 'w')
for item in content_list:
file.write(item.prettify())
file.close()
def parse_web_page(self, cont, from_encoding='utf-8'):
return bs4.BeautifulSoup(cont, from_encoding='utf-8')
def connect(self, c_host, c_user, c_passwd):
if not self.__connect:
self.__connect = MySQLdb.connect(host=c_host, user=c_user, passwd=c_passwd)
return self.__connect
else:
return self.__connect
def output_log(self, msg):
print "[" + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "]\t" + msg
def save_to_db(self, data):
self.output_log("---\tsave to db...")
self.__connect.select_db('weixin')
cursor = self.__connect.cursor()
cursor.execute("select id from wow_items where id = %s", data['id'])
tmpResult = cursor.fetchall()
if tmpResult:
self.output_log("item " + data['id'] + " already exists! skip...")
return
insertData = [data['id'], data['name'], 0, 0, data['position'], data['attribute'], '']
insertData += [data['quality'], data['qnumber'], data['img'], data['html']]
cursor.execute("insert into wow_items values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", insertData)
self.__connect.commit()
del(cursor)
self.output_log("---\tsave to db success!")
return
def crawl_item(self, url):
self.__data = {}
for i in range(1, self.__retryMax):
self.output_log("crawling " + url + " ... retry:" + str(i))
tmpCont = self.request_url(url)
if not tmpCont :
continue
if tmpCont.readline() == 'no data':
self.output_log("---\t no data")
return
tmpSoup = self.parse_web_page(tmpCont.read())
bbCode = tmpSoup.find(id='bbcode_content')
try :
self.__data['img'] = re.compile(r'\[img\](.*)\[\/img\]').findall(bbCode.prettify())[0]
except:
self.__data['img'] = ''
try :
self.__data['quality'] = re.compile(r'(\d)').findall(tmpSoup.find(id='item_detail').find('h2')['class'][0])[0]
except:
self.__data['quality'] = ''
try :
self.__data['name'] = tmpSoup.find(id='item_detail').find('strong').text
except:
self.__data['name'] = ''
try :
self.__data['id'] = re.compile(r'ID:([0-9]*)').findall(tmpSoup.find(id='item_detail').find('span').text)[0]
except:
self.__data['id'] = ''
try :
self.__data['qnumber'] = tmpSoup.find(id='item_detail').find(id='ilv').text
except:
self.__data['qnumber'] = ''
try :
self.__data['position'] = tmpSoup.find(id='item_detail').find('table').find('table').find('th').text
except:
self.__data['position'] = ''
try :
self.__data['html'] = tmpSoup.find(id='main').find_all('div')[1].prettify()
except:
self.__data['html'] = ''
try :
""" strip html tag """
parser = HTMLParser()
tmpList = []
parser.handle_data = tmpList.append
parser.feed(tmpSoup.find(id='item_detail').find(id='_dps').prettify().strip("\n"))
parser.close()
self.__data['attribute'] = ''.join(tmpList)
except:
self.__data['attribute'] = ''
""" del temporary variables"""
del(parser,tmpList,tmpSoup,bbCode,tmpCont)
if not self.__data:
continue
return self.save_to_db(self.__data)
crawl = Crawl()
for num in range(1495, 100000):
try :
request_url = 'http://db.178.com/wow/cn/item/' + str(num) + '.html'
crawl.crawl_item(url=request_url)
except :
print crawl.output_log('Exception! skip..')
| [
"lixiao@comsenz.com"
] | lixiao@comsenz.com |
4f9561b6707673dd54e154e177dcfd3d25aebf98 | 3d276b170ed2255e8cee3b05fca4c6f75ef5eaae | /Consective-evens/solutionB.py | 3028f13b47118968e7dfa7109a1f3575ac7ff28e | [] | no_license | xdatageek/math_and_physics | 0db5a9e86c54bc7355cb6b0c47189f5de5b097ba | 5f1fafce9995c3be6ee02a5f0f4d8c45358e234e | refs/heads/main | 2023-02-09T19:00:01.665490 | 2021-01-02T04:37:35 | 2021-01-02T04:37:35 | 324,129,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | f = open('input.txt', 'r')
next(f)
A = [int(i) for i in f.readline().split()]
for k in range(int(f.readline())):
c = f.readline().split()
c1 = int(c[1])
c2 = int(c[2])
if c[0] == '1':
S = 0
for j in range(c1, c2+1):
S += A[j]
print(S)
else:
A[c1] = c2
| [
"noreply@github.com"
] | xdatageek.noreply@github.com |
66c605b31758d9dc44858b8868ef503970bdaba6 | 4ea7855ef54e1a62df5d25aa30acfc564c676ab9 | /catalog/tests/test_models.py | dae7890e4f8af49e7b195ea6714413e628481aa5 | [
"CC0-1.0"
] | permissive | Novel-Public-Health/Novel-Public-Health | 00282b9d3801494f7fc25f5f44c33070fa119bef | fbb7dd0da64ae4fc9641097ca8056152129bd83b | refs/heads/main | 2023-04-29T19:27:51.440301 | 2021-05-07T03:42:10 | 2021-05-07T03:42:10 | 337,804,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,617 | py | from django.test import TestCase
# Create your tests here.
from catalog.models import Director, Genre, Language, Movie, Profile
class DirectorModelTest(TestCase):
@classmethod
def setUpTestData(cls):
# Set up non-modified objects used by all test methods.
Director.objects.create(name='Big Bob')
def test_name_label(self):
director = Director.objects.get(id=1)
field_label = director._meta.get_field('name').verbose_name
self.assertEquals(field_label, 'name')
def test_date_of_birth_label(self):
director = Director.objects.get(id=1)
field_label = director._meta.get_field('date_of_birth').verbose_name
self.assertEquals(field_label, 'date of birth')
def test_date_of_death_label(self):
director = Director.objects.get(id=1)
field_label = director._meta.get_field('date_of_death').verbose_name
self.assertEquals(field_label, 'died')
def test_get_absolute_url(self):
director = Director.objects.get(id=1)
# This will also fail if the urlconf is not defined.
self.assertEquals(director.get_absolute_url(), '/directors/1')
class GenreModelTest(TestCase):
@classmethod
def setUpTestData(cls):
# Set up most popular genres.
cls.genres = ['Action', 'Fantasy', 'Comedy', 'Romance', 'Documentary']
for name in cls.genres:
Genre.objects.create(name=name)
def test_name_label(self):
for num, name in enumerate(self.genres, start=1):
genre = Genre.objects.get(id=num)
field_label = genre._meta.get_field('name').verbose_name
self.assertEquals(field_label, 'name')
# test the name of the genre
self.assertEquals(genre.name, name)
class LanguageModelTest(TestCase):
@classmethod
def setUpTestData(cls):
# Set up most popular languages.
cls.languages = ['Mandarin Chinese', 'Spanish', 'English', 'Hindi']
for name in cls.languages:
Language.objects.create(name=name)
def test_name_label(self):
for num, name in enumerate(self.languages, start=1):
language = Language.objects.get(id=num)
field_label = language._meta.get_field('name').verbose_name
self.assertEquals(field_label, 'name')
# test the name of the language
self.assertEquals(language.name, name)
import re
class MovieModelTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.imdb_movie = Movie.objects.create(
title='Concussion',
imdb_link='https://www.imdb.com/title/tt3322364/'
)
cls.non_imdb_movie = Movie.objects.create(
title='Public Health YOU Should Know',
director=Director.objects.create(name='Big Bob'),
language=Language.objects.create(name='English'),
summary="This movie is the greatest thing to ever see. \
Forreal.",
genre=Genre.objects.create(name='Documentary'),
year='2021'
)
def test_non_imdb_movie(self):
self.assertEquals(self.non_imdb_movie.director.name, 'Big Bob')
self.assertEquals(self.non_imdb_movie.language.name, 'English')
self.assertEquals(self.non_imdb_movie.genre.name, 'Documentary')
def test_imdb_movie(self):
imdb_stats = self.imdb_movie.get_imdb_stats()
self.assertEquals(imdb_stats[0], 2015)
self.assertEquals(imdb_stats[1]['name'], 'Peter Landesman')
self.assertTrue(re.match(r'(Biography|Drama|Sport)', imdb_stats[2])) | [
"haleau@live.unc.edu"
] | haleau@live.unc.edu |
0f983bfba296e9069d60f2ce24011761d28fcdc7 | fcfc18039d05878f6156536a6757832a3576147b | /Final_Project_Code_IS590PR_Functions.py | 47773ad62be23389aa8a872651a9c749d91fc25a | [] | no_license | rahulrohri/IS590PR-Spring-2020-Final-Project | 76fa6d8a0a56e39c155a0cc718ce0f06bd14d225 | f101cb2ceacf83c3e24fb07f63e5456497e8d116 | refs/heads/master | 2022-07-07T12:51:07.558538 | 2020-05-13T11:45:13 | 2020-05-13T11:45:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,195 | py | """
IS590 PR - University of Illinois - Urbana Champaign
This file is a list of functions that have been used for our project and are
Intended to support the jupyter notebook titled Final_Project_Code_IS590PR.ipynb
Name: NYC_Public_Safety_Functions.py
Team Members :
Megha Manglani (GitHub id – meghamm2)
Rahul Ohri (GitHub id- rahulrohri)
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
my_dir = 'C:/Users/rahul/Downloads/UIUC/Sem 2 - Spring 2020/Courses/Programing Analytics/Final Project/DataSets/' #https://github.com/iSchool-590pr/PR_Sp20_examples/blob/master/week_07/class7_pandas_pt2.ipynb
NYPD_Arrests = my_dir +'NYPD_Arrests_Data__Historic_.csv' # Loading NYPD Arrest Data file
Complaints = my_dir +'NYPD_Complaint_Data_Historic.csv' # Loading NYPD Complaints Data file
EMS_incident = my_dir +'EMS_Incident_Dispatch_Data.csv' # Loading EMS incident dispatch Data file
def dataset_validation():
"""
This function is used to check if the input data files are having the correct column headers that are needed
for our hypotheses analysis. The files need to be loaded from the local computer directory since they are
very large. If the files are not present , then the user can download it from a google drive link provided
by our team or can go to the official website from which the data was downloaded.
>>> NYPD_Arrests = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/Arrest_Correct.csv' # Loading NYPD Arrest Data file
>>> Complaints = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/Complaints_Correct.csv' # Loading NYPD Complaints Data file
>>> EMS_incident = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/EMS_Correct.csv' # Loading EMS incident dispatch Data file
>>> dataset_validation()
The columns necessary for analysis are present in the EMS data file
The columns necessary for analysis are present in the Complaints data file
The columns necessary for analysis are present in the Arrests data file
"""
data_EMS = pd.read_csv(EMS_incident, nrows=1)
data_Complaints = pd.read_csv(Complaints, nrows=1)
data_Arrest = pd.read_csv(NYPD_Arrests, nrows=1)
All_Col_list_EMS = list(data_EMS)
All_Col_list_Complaints = list(data_Complaints)
All_Col_list_Arrest = list(data_Arrest)
Req_Complaints_cols = ['CMPLNT_NUM', 'CMPLNT_FR_DT', 'BORO_NM', 'VIC_RACE', 'OFNS_DESC']
Req_Arrests_cols = ['ARREST_BORO', 'ARREST_DATE', 'ARREST_KEY']
Req_EMS_cols = ['INCIDENT_RESPONSE_SECONDS_QY', 'INCIDENT_DATETIME', 'BOROUGH']
check_EMS = all(item in All_Col_list_EMS for item in
Req_EMS_cols) # https://www.techbeamers.com/program-python-list-contains-elements/
if check_EMS is True:
print("The columns necessary for analysis are present in the EMS data file")
else:
print(
"The columns necessary for analysis are not present in the EMS data file. Kindly download the dataset files from - https://drive.google.com/open?id=1g_StaWiaWQyNjNOu3wlFKG2dsIJZjyjF or the latest file from https://data.cityofnewyork.us/Public-Safety/EMS-Incident-Dispatch-Data/76xm-jjuj")
check_Complaints = all(item in All_Col_list_Complaints for item in
Req_Complaints_cols) # https://www.techbeamers.com/program-python-list-contains-elements/
if check_Complaints is True:
print("The columns necessary for analysis are present in the Complaints data file")
else:
print(
"The columns necessary for analysis are not present in the Complaints data file. Kindly download the dataset files from - https://drive.google.com/open?id=112LOH-fYjUn5AHVnFbgQYRAAhSCcXvjq or the latest file from https://data.cityofnewyork.us/Public-Safety/NYPD-Complaint-Data-Historic/qgea-i56i ")
check_Arrests = all(item in All_Col_list_Arrest for item in
Req_Arrests_cols) # https://www.techbeamers.com/program-python-list-contains-elements/
if check_Arrests is True:
print("The columns necessary for analysis are present in the Arrests data file")
else:
print(
"The columns necessary for analysis are not present in the Arrests data file. Kindly download the dataset files from - https://drive.google.com/open?id=1g_StaWiaWQyNjNOu3wlFKG2dsIJZjyjF or the latest file from https://catalog.data.gov/dataset/nypd-arrests-data-historic")
def get_file(file, cols) -> pd.DataFrame:
"""
This function produced a dataframe that consists of the columns that are needed fr analysis from a datafile.
Since in our project we are using between 2 - 4 columns for each hypothesis analysis rather than loading the
entire data file, the dataframe will end up containing only between 2-4 columns.
>>> test_file = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/Airplane.csv'
>>> print("Enter 'AircraftHex' and 'SessionID' as column names")
Enter 'AircraftHex' and 'SessionID' as column names
>>> answer = get_file(test_file,2)
enter your column name 1 and press enter:enter your column name 2 and press enter:
>>> answer.iloc[0]['AircraftHex']
'A902B5'
>>> test_file = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/Airplane.csv'
>>> get_file(test_file,5)
'Invalid number of columns'
"""
if cols == 2:
col1 = input('enter your column name 1 and press enter:')
# print(type(col1))
col2 = input('enter your column name 2 and press enter:')
col_list = [col1, col2]
elif cols == 3:
col1 = input('enter your column name 1 and press enter:')
# print(type(col1))
col2 = input('enter your column name 2 and press enter:')
col3 = input('enter your column name 3 and press enter:')
col_list = [col1, col2, col3]
elif cols == 4:
col1 = input('enter your column name 1 inside and press enter:')
col2 = input('enter your column name 2 inside and press enter:')
col3 = input('enter your column name 3 inside and press enter:')
col4 = input('enter your column name 4 inside and press enter:')
col_list = [col1, col2, col3, col4]
else:
return "Invalid number of columns"
data_file = pd.read_csv(file, usecols=col_list) # Import only necessary columns from the dataset
return data_file
# Extracting the Month and Year of the incident
def extract_year_month(x, old_col: str, month_column: str, year_column: str):
"""
This function is used to extract the year and the month from an existing dataframe column that
contains date values in the format mm/dd/yyyy. The extraction process results in the formation
of two new columns in the dataframe - one containing only the months and the other containing only
the year values.
:param x: The dataframe on which opeartions are to be performed
:param old_col: The dataframe column containing date in format mm/dd/yyyy
:param month_column: The dataframe column to be created post extraction of the month from the column name old_col
:param year_column: The dataframe column to be created post extraction of the year from the column name old_col
>>> sample_csv = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/sample_date_func.csv'
>>> sample_df = pd.read_csv(sample_csv)
>>> answer = extract_year_month(sample_df,'Date','Month','Year')
>>> answer.iloc[0]['Population'] #doctest: +NORMALIZE_WHITESPACE
8300124
"""
x[month_column] = x[old_col].str[:2]
x[year_column] = x[old_col].str[6:10]
return x
def get_arrest_or_crime_count(dfname, col_year, col_month, col_boro, col_key) -> pd.core.frame.DataFrame:
"""
This function is used to create a multilevel index dataframe that groups the data by the
neighbourhood, year , and month columns and finally produces a column of the aggragation
type as count to display either the total count of arrests or the total count of complaints
:param dfname: The dataframe on which opeartions are to be performed
:param col_year: Column name containing year value
:param col_month: Column name containing month value
:param col_boro:Column name containing borough/area value
:param col_key: Column name on which aggreagation has to be done
:return: a dataframe containing year,month,neighbourhood,aggregated column count
>>> sample_csv = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/arrest_crime_count_function.csv'
>>> sample_df = pd.read_csv(sample_csv)
>>> answer = get_arrest_or_crime_count(sample_df,'Complaint_Filed_Year','Complaint_Filed_Month','BORO_NM','CMPLNT_NUM')
>>> answer.index.levels[1]
Int64Index([2006, 2009, 2011, 2015], dtype='int64', name='Complaint_Filed_Year')
"""
# replacing Borough initials with complete names
dfname = dfname.dropna(subset=[col_year])
dfname = dfname.astype({col_year: 'int64'})
dfname = dfname[dfname[col_year] > 2005]
data_count = dfname.groupby([col_boro, col_year, col_month]).agg({col_key: [
'count']}) # https://pandas.pydata.org/pandas-docs/version/0.23.1/generated/pandas.core.groupby.DataFrameGroupBy.agg.html
return data_count
def plot_graph(n, df, l, b, var1: str, var2: str, var3: str, var4: str, var5: str, constant: str):
'''
This function is used to plot a line graph for a multilevel index dataframe. The graph has multiple
subplots as well depending upon the number of index groups in the first column of the dataframe. eg
in one datframe we have 5 boroughs that are used as the grouping column and thus we will have 5
subplots
:param n: number of subplots
:param df: the dataframe for which graphs have to be plotted
:param l: length of the subplot figure
:param b: breadth of the subplot figure
:param var1: Index value to be plotted
:param var2: Index value to be plotted
:param var3: Index value to be plotted
:param var4: Index value to be plotted
:param var5: Index value to be plotted
:param constant: Constant part of text to be displayed in the title of the subplot
# https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html
>>> arrays = [np.array(['india', 'USA', 'italy', 'italy', 'india', 'canada', 'india', 'USA','australia']),np.array(['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two','one'])]
>>> df_new = pd.DataFrame(np.random.randn(9, 5), index=arrays) #Creating a dummy multi-index dataframe
>>> plot_graph(5,df_new,18,20,'india','USA','italy','australia','canada','Just testing')
'''
figure, axis = plt.subplots(n, 1, figsize=(
l, b)) # https://stackoverflow.com/questions/25386870/pandas-plotting-with-multi-index
df.xs(var1).plot(kind='line', ax=axis[0]).set_title(
var1 + ' - ' + constant) # https://pandas.pydata.org/pandas-docs/version/0.16.0/visualization.html
df.xs(var2).plot(kind='line', ax=axis[1]).set_title(var2 + ' - ' + constant)
df.xs(var3).plot(kind='line', ax=axis[2]).set_title(var3 + ' - ' + constant)
df.xs(var4).plot(kind='line', ax=axis[3]).set_title(var4 + ' - ' + constant)
df.xs(var5).plot(kind='line', ax=axis[4]).set_title(var5 + ' - ' + constant)
def race_percentage(row, colname) -> pd.core.series.Series:
# https://stackoverflow.com/questions/26886653/pandas-create-new-column-based-on-values-from-other-columns-apply-a-function-o
# https://worldpopulationreview.com/us-cities/new-york-city-population/
# total NYC population = 8175133
# Creating a function to add crime per capita values
"""
This function is used to return a pandas series that has the race percentage value of all the different
races present in NYC.
:param row: denotes that the operation has to be performed across rows
:param colname: Column name on which operation has to be done
:return : a specific numeric value if a row match is found
>>> data_dummy = {'Race': ['AMERICAN INDIAN/ALASKAN NATIVE','ASIAN / PACIFIC ISLANDER', 'BLACK','BLACK HISPANIC','WHITE','WHITE HISPANIC','UNKNOWN/OTHER'],'Offense': ['FRAUDS', 'BURGLARY','HARRASSMENT 2','FORGERY','FRAUDS','FRAUDS','FRAUDS'],'Comp_no':[1,2,3,4,5,6,7]}
>>> df_dummy = pd.DataFrame (data_dummy, columns = ['Race','Offense','Comp_no'])
>>> df_dummy.apply (lambda row: race_percentage(row,'Race'), axis=1)
0 0.0043
1 0.1400
2 0.2195
3 0.0233
4 0.3214
5 0.1053
6 0.1862
dtype: float64
"""
if row[colname] == 'AMERICAN INDIAN/ALASKAN NATIVE':
return 0.0043
if row[colname] == 'ASIAN / PACIFIC ISLANDER':
return 0.14
if row[colname] == 'BLACK':
return 0.2195
if row[colname] == 'BLACK HISPANIC':
return 0.0233
if row[colname] == 'WHITE':
return 0.3214
if row[colname] == 'WHITE HISPANIC':
return 0.1053
if row[colname] == 'UNKNOWN/OTHER':
return 0.1862
def offense_per_victim_race(dataframe_name) -> pd.DataFrame:
"""
This function returns a dataframe that contains the information pertaining to the offense committed
and the victims. It also takes the victim race into consideration and has a column that has normalized
complaint numbers based on that race percentage
:param dataframe_name:
>>> sample = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/offense_per_victim_race_sample.csv'
>>> sample_df = pd.read_csv(sample)
>>> ans = offense_per_victim_race(sample_df)
>>> ans.iloc[0]['race_percentage']
0.14
"""
dataframe_name.VIC_RACE = dataframe_name.VIC_RACE.fillna('UNKNOWN') # replacing nans with 'UNKNOWN'
dataframe_name = dataframe_name.replace({'VIC_RACE': 'UNKNOWN'}, 'UNKNOWN/OTHER')
dataframe_name = dataframe_name.replace({'VIC_RACE': 'OTHER'}, 'UNKNOWN/OTHER')
# Selecting only a particular set of crimes that involve harming another human
type_of_crime = ['HARRASSMENT 2', 'BURGLARY', 'ROBBERY', 'FELONY ASSAULT', 'SEX CRIMES', 'OFFENSES INVOLVING FRAUD',
'RAPE', 'THEFT-FRAUD', 'MURDER & NON-NEGL. MANSLAUGHTER', 'KIDNAPPING & RELATED OFFENSES',
'OFFENSES RELATED TO CHILDREN', 'KIDNAPPING', 'OTHER OFFENSES RELATED TO THEF', 'PETIT LARCENY',
'GRAND LARCENY', 'FORGERY', 'FRAUDS', 'ASSAULT 3 & RELATED OFFENSES']
# https://cmdlinetips.com/2018/02/how-to-subset-pandas-dataframe-based-on-values-of-a-column/
dataframe_name = dataframe_name[dataframe_name.OFNS_DESC.isin(type_of_crime)]
# race_count = complaints_df_new.groupby(['OFNS_DESC','VIC_RACE']).agg({'CMPLNT_NUM': ['count']}).reset_index()
dataframe_name = dataframe_name.groupby(["OFNS_DESC", "VIC_RACE"], as_index=False).count()
dataframe_name = dataframe_name[['OFNS_DESC', 'VIC_RACE', 'CMPLNT_NUM']]
dataframe_name.apply(lambda row: race_percentage(row, 'VIC_RACE'),
axis=1) # https://stackoverflow.com/questions/26886653/pandas-create-new-column-based-on-values-from-other-columns-apply-a-function-o
dataframe_name['race_percentage'] = dataframe_name.apply(lambda row: race_percentage(row, 'VIC_RACE'), axis=1)
# race_count_new['race_population'] = race_count_new['race_percentage']*8175133
# race_count_new['race_population'] = race_count_new['race_population'].astype('int64')
dataframe_name['Normalized results'] = dataframe_name['CMPLNT_NUM'] / dataframe_name['race_percentage']
dataframe_name['Normalized results'] = dataframe_name['Normalized results'].astype('int64')
return dataframe_name
def population_density_details(filename) -> pd.core.frame.DataFrame:
'''
This function returns a dataframe which contains the population density for each neighbourhood.
The area in sq.km column is added manually.
:param filename: The population CSV file from which dataframe needs to be created
>>> nyc_population_sample = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/New_York_City_Population_sample.csv'
>>> ans = population_density_details(nyc_population_sample)
>>> ans.iloc[0]['Population']
147388
'''
area_population = pd.read_csv(filename)
area_population = area_population[area_population['Year'] > 2000]
area_population_sum = area_population.groupby(['Borough'])['Population'].sum()
borough_pop_df = area_population_sum.to_frame().reset_index()
borough_pop_df['Area in sq. km'] = [109.04, 183.42, 59.13, 281.09,
151.18] # https://en.wikipedia.org/wiki/Demographics_of_New_York_City
borough_pop_df['Population Density'] = borough_pop_df['Population'] / borough_pop_df['Area in sq. km']
borough_pop_df['Borough'] = borough_pop_df['Borough'].str.upper()
borough_pop_df = borough_pop_df.rename(columns={"Borough": "BORO_NM"})
borough_pop_df['Population Density'] = borough_pop_df['Population Density'].astype('int64')
return borough_pop_df
def corr_coeff(col1, col2) -> np.float64:
"""
:param col1: The first dataframe column you want to use for correlation calculation
:param col2: The second dataframe column you want to use for correlation calculation
:return: The correlation value between the two columns which is of numpy float data type
>>> sample_csv = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/Correlation_dummy.csv'
>>> sample_df = pd.read_csv(sample_csv)
>>> corr_coeff(sample_df['Age'],sample_df['Height(m)'])
0.7723621551319031
>>> data_dummy = {'Weight': [55,66,77,88,99,33],'Age': [22,33,44,55,66,15]}
>>> df_dummy = pd.DataFrame (data_dummy, columns = ['Weight','Age'])
>>> corr_coeff(df_dummy['Weight'],df_dummy['Age'])
0.9787474369757403
"""
plt.scatter(col1, col2)
correlation = col1.corr(col2)
# rp_corr = rp.corr_pair(col1,col2)
return correlation
# Selecting only a particular set of crimes that involve harming another human
NYC_Population = my_dir + 'New_York_City_Population.csv' # Loading NYPD Arrest Data file
Pop_density_df = population_density_details(NYC_Population)
def get_crime_results(dfname) -> pd.core.frame.DataFrame:
'''
This function returns a dataframe which has the complaints per capita for each borough and offense
:param dfname: NYC complaints dataframe on which operations have to be performed
>>> sample_NYC_csv = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/NYC_get_crime.csv'
>>> sample_NYC_dframe = pd.read_csv(sample_NYC_csv)
>>> Pop_density_csv = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/Dummy_pop_density.csv'
>>> Pop_density_df = pd.read_csv(Pop_density_csv)
>>> ans = get_crime_results(sample_NYC_dframe)
>>> ans.iloc[0]['Population']
2504700
'''
type_of_crime = ['HARRASSMENT 2', 'BURGLARY', 'ROBBERY', 'FELONY ASSAULT', 'SEX CRIMES', 'OFFENSES INVOLVING FRAUD',
'RAPE', 'THEFT-FRAUD', 'MURDER & NON-NEGL. MANSLAUGHTER', 'KIDNAPPING & RELATED OFFENSES',
'OFFENSES RELATED TO CHILDREN', 'KIDNAPPING', 'OTHER OFFENSES RELATED TO THEF', 'PETIT LARCENY',
'GRAND LARCENY', 'FORGERY', 'FRAUDS', 'ASSAULT 3 & RELATED OFFENSES']
# https://cmdlinetips.com/2018/02/how-to-subset-pandas-dataframe-based-on-values-of-a-column/
dfname = dfname[dfname.OFNS_DESC.isin(type_of_crime)]
# complaints_df_new.OFNS_DESC.unique()
g1 = dfname.groupby(["OFNS_DESC", "BORO_NM"], as_index=False).count()
g1 = g1[["OFNS_DESC", "BORO_NM", "CMPLNT_NUM"]]
# merging with population density dataframe to add necessary density columns
Crime_result_df = pd.merge(g1, Pop_density_df, how='left', left_on='BORO_NM', right_on='BORO_NM')
# creating the per capita values by dividing complaint numbers and population
Crime_result_df['complaints per capita'] = Crime_result_df['CMPLNT_NUM'] / Crime_result_df['Population']
Crime_result_df['complaints per capita'] = Crime_result_df['complaints per capita'].astype('float64')
return Crime_result_df
#EMS_incident_response_avg = EMS_Data[['INCIDENT_RESPONSE_SECONDS_QY','BOROUGH']]
def EMS_details(dfname) -> pd.DataFrame:
'''
This function returns a dataframe containing details of the incident response time, incident datetime, borough
There are also details pertaining to the population density , which is a result of the 2 dataframes being joined
:param dfname: EMS dataframe to be passed as input
>>> ems_sample_csv = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/EMS_incidentResponse.csv'
>>> ems_sample_df = pd.read_csv(ems_sample_csv)
>>> Pop_density_csv = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/Dummy_pop_density.csv'
>>> Pop_density_df = pd.read_csv(Pop_density_csv)
>>> ans = EMS_details(ems_sample_df)
>>> ans.iloc[0]['Population']
2504700
'''
EMS_incident_response_avg = dfname
EMS_incident_response_avg = EMS_incident_response_avg.groupby(['BOROUGH','Incident_Year','Incident_Month'],as_index = False)['INCIDENT_RESPONSE_SECONDS_QY'].mean()
EMS_incident_response_avg['AVG_INCIDENT_RESPONSE (Minutes)'] = EMS_incident_response_avg['INCIDENT_RESPONSE_SECONDS_QY']/60
#Renaming the index to Staten Island
EMS_incident_response_avg = EMS_incident_response_avg.replace({'BOROUGH': 'RICHMOND / STATEN ISLAND'}, 'STATEN ISLAND')
result_inc_resp_df = pd.merge(Pop_density_df, EMS_incident_response_avg, how='inner', left_on='BORO_NM', right_on='BOROUGH')
return result_inc_resp_df | [
"noreply@github.com"
] | rahulrohri.noreply@github.com |
bd8527aee37e224f869349bec2f6fb2bdadc1d5b | a140fe192fd643ce556fa34bf2f84ddbdb97f091 | /.history/예외처리_20200709144804.py | 9b8a16ecb397905296a8e33b88abcd084eadb309 | [] | no_license | sangha0719/py-practice | 826f13cb422ef43992a69f822b9f04c2cb6d4815 | 6d71ce64bf91cc3bccee81378577d84ba9d9c121 | refs/heads/master | 2023-03-13T04:40:55.883279 | 2021-02-25T12:02:04 | 2021-02-25T12:02:04 | 342,230,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | try:
print("나누기 전용 계산기입니다.")
num1 = int(input("첫 번째 숫자를 입력하세요 : "))
num2 = int(input("두 번째 숫자를 입력하세요 : "))
print("{0} / {1} = {2}".format(n)) | [
"sangha0719@gmail.com"
] | sangha0719@gmail.com |
cc878c320008f8db66aa030c2f2f6bc3e205a9cc | 6d1728bf105a7d6481d0bbca2b88f4478e0632d9 | /study/ch1/area.py | 1a498690da37f4f891110371603717db2e529035 | [] | no_license | Phantomn/Python | 00c63aceb2d4aa0db71fe5e33fe8b5159b41aadd | 12808adf4b52c60cfe94befb6daa1e8187224beb | refs/heads/Python | 2022-11-09T16:49:49.165884 | 2019-08-05T07:30:07 | 2019-08-05T07:30:07 | 44,149,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | horizon=0
vertical=0
print("Input horizon length : ", end="")
horizon=int(input())
print("Input vertical length : ",end="")
vertical=int(input())
print("rectangle is %d."%(horizon*vertical))
| [
"tmdvyr123@naver.com"
] | tmdvyr123@naver.com |
0baadeafe82ed3f2330579af9aeb7806db738dc3 | 7f8c24fe161fee3f32e206e013ea89fc8eb9a50a | /example_api/urls.py | 4c07dd5d1421c42a6038b536a60b6f7e7826f9cc | [] | no_license | vnitikesh/rest-registration | a04f4cf643766d3844e7a63e0616157d1c1f1e9a | 0578589f6cb9b9138fa5915395bf616de57eaf0b | refs/heads/main | 2023-02-18T12:32:40.392439 | 2021-01-21T23:55:23 | 2021-01-21T23:55:23 | 331,453,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | from django.urls import path
from . import views
from rest_framework.routers import DefaultRouter
urlpatterns = [
path('category/', views.CategoryListView.as_view(), name = 'category-list'),
path('category/<int:pk>/', views.CategoryDetailView.as_view(), name = 'category-detail'),
path('product/', views.ProductRecordView.as_view(), name = 'product-list'),
path('cart/', views.CartViewSet.as_view(), name = 'cart'),
path('checkout/', views.CheckoutView.as_view(), name = 'checkout'),
#path('order/', views.OrderViewSet.as_view(), name = 'order')
]
| [
"vnitikesh@gmail.com"
] | vnitikesh@gmail.com |
803d74d1d4c3dd4a3fb7412c178cd596d5ac7f41 | 2f3278709d2409a4a20b7f79d31dfed43aac6557 | /LiTS_Decide_Preprocessing/TumorNetAvgpoolRelu.py | 893eb9d11c26f76fac1a4be2dec161ac10e44d82 | [] | no_license | HaoW14/Preprocess-of-CT-data | 8678ff749c644a0881d8305d6c6b51d8bf87912a | 811d95f28c11abc2b4b0305806397602ab4d51ba | refs/heads/master | 2022-12-28T01:02:39.929089 | 2020-10-18T10:31:27 | 2020-10-18T10:31:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,773 | py | # TumorNet without source; Downsample by pooling; activation is relu
# Which can use transfer lerning directly
import torch
from torch import nn
import numpy as np
import SimpleITK as sitk
from medpy import metric
def saved_preprocessed(savedImg,origin,direction,xyz_thickness,saved_name):
origin = tuple(k.item() for k in origin)
direction = tuple(k.item() for k in direction)
xyz_thickness = tuple(k.item() for k in xyz_thickness)
savedImg = np.squeeze(np.argmax(savedImg.detach().cpu().numpy(),axis=1),0).astype(np.float32)
newImg = sitk.GetImageFromArray(savedImg)
newImg.SetOrigin(origin)
newImg.SetDirection(direction)
newImg.SetSpacing(xyz_thickness)
sitk.WriteImage(newImg, saved_name)
def Dice(output2, target):
pred_lesion = np.argmax(output2.detach().cpu().numpy(), axis=1)
target = np.squeeze(target.detach().cpu().numpy(), axis=1)
true_lesion = target == 2
# Compute per-case (per patient volume) dice.
if not np.any(pred_lesion) and not np.any(true_lesion):
tumor_dice = 1.
print('tumor_dice = 1')
else:
tumor_dice = metric.dc(pred_lesion, true_lesion)
return tumor_dice
def one_hot(scores, labels):
_labels = torch.zeros_like(scores)
_labels.scatter_(dim=1, index=labels.long(), value=1)#scatter_(input, dim, index, src)
return _labels
class DiceLoss(nn.Module):
def __init__(self):
super().__init__()
self.smooth = 1e-5
def forward(self, output2, target):
temp = target.clone() # deep clone
temp[target == 2] = 1
temp[target <= 1] = 0
target2 = one_hot(output2, temp)
intersection2 = 2. * (output2 * target2).sum()
denominator2 = output2.sum() + target2.sum()
dice2 = (intersection2 + self.smooth) / (denominator2 + self.smooth)
dice = 1 - dice2
return dice
class PostRes(nn.Module):
def __init__(self, n_in, n_out, stride = 1):
super(PostRes, self).__init__()
self.resBlock = nn.Sequential(
nn.Conv3d(n_in, n_out, kernel_size=3, stride=stride, padding=1),
nn.InstanceNorm3d(n_out),
nn.ReLU(inplace=True),
# nn.PReLU(),
nn.Conv3d(n_out, n_out, kernel_size=3, padding=1),
nn.InstanceNorm3d(n_out)
)
self.relu = nn.ReLU(inplace=True)
# self.prelu = nn.PReLU()
if stride != 1 or n_out != n_in:
self.shortcut = nn.Sequential(
nn.Conv3d(n_in, n_out, kernel_size = 1, stride = stride),
nn.InstanceNorm3d(n_out))
else:
self.shortcut = None
def forward(self, x):
residual = x
if self.shortcut is not None:
residual = self.shortcut(x)
out = self.resBlock(x)
out += residual
out = self.relu(out)
# out = self.prelu(out)
return out
class Decoder2(nn.Module):
def __init__(self):
super().__init__()
self.num_blocks_back = [3, 3, 2, 2] # [5-2]
self.nff = [1, 8, 16, 32, 64, 128] # NumFeature_Forw[0-5]
self.nfb = [64, 32, 16, 8, 2] # NunFeaturn_Back[5-0]
#deconv4-1,output
self.deconv4 = nn.Sequential(
nn.ConvTranspose3d(self.nff[5], self.nfb[0], kernel_size=2, stride=2),
nn.InstanceNorm3d(self.nfb[0]),
nn.ReLU(inplace=True)
# nn.PReLU()
)
self.deconv3 = nn.Sequential(
nn.ConvTranspose3d(self.nfb[0], self.nfb[1], kernel_size=2, stride=2),
nn.InstanceNorm3d(self.nfb[1]),
nn.ReLU(inplace=True)
# nn.PReLU()
)
self.deconv2 = nn.Sequential(
nn.ConvTranspose3d(self.nfb[1], self.nfb[2], kernel_size=2, stride=2),
nn.InstanceNorm3d(self.nfb[2]),
nn.ReLU(inplace=True)
# nn.PReLU()
)
self.deconv1 = nn.Sequential(
nn.ConvTranspose3d(self.nfb[2], self.nfb[3], kernel_size=2, stride=2),
nn.InstanceNorm3d(self.nfb[3]),
nn.ReLU(inplace=True)
# nn.PReLU()
)
self.output = nn.Sequential(
nn.Conv3d(self.nfb[3], self.nfb[3], kernel_size=1),
nn.InstanceNorm3d(self.nfb[3]),
nn.ReLU(inplace=True),
# nn.PReLU(),
# nn.Dropout3d(p = 0.3),
nn.Conv3d(self.nfb[3], self.nfb[4], kernel_size=1)) # since class number = 3 and split into 2 branch
#backward4-1
for i in range(len(self.num_blocks_back)):
blocks = []
for j in range(self.num_blocks_back[i]):
if j == 0:
blocks.append(PostRes(self.nfb[i] * 2, self.nfb[i]))
else:
blocks.append(PostRes(self.nfb[i], self.nfb[i]))
setattr(self, 'backward' + str(4-i), nn.Sequential(*blocks))
self.drop = nn.Dropout3d(p=0.5, inplace=False)
self.softmax = nn.Softmax(dim=1)#(NCDHW)
def forward(self, layer1, layer2, layer3, layer4, layer5):
# decoder
up4 = self.deconv4(layer5)
cat_4 = torch.cat((up4, layer4), 1)
layer_4 = self.backward4(cat_4)
up3 = self.deconv3(layer_4)
cat_3 = torch.cat((up3, layer3), 1)
layer_3 = self.backward3(cat_3)
up2 = self.deconv2(layer_3)
cat_2 = torch.cat((up2, layer2), 1)
layer_2 = self.backward2(cat_2)
up1 = self.deconv1(layer_2)
cat_1 = torch.cat((up1, layer1), 1)
layer_1 = self.backward1(cat_1)
layer_1 = self.output(layer_1)
layer_1 = self.softmax(layer_1)
return layer_1
class TumorNet(nn.Module):
def __init__(self):
super(TumorNet, self).__init__()
self. nff = [1, 8, 16, 32, 64, 128]#NumFeature_Forw[0-5]
self.num_blocks_forw = [2, 2, 3, 3]#[2-5]
# forward1
self.forward1 = nn.Sequential(
nn.Conv3d(self.nff[0], self.nff[1], kernel_size=3, padding=1),
nn.InstanceNorm3d(self.nff[1]),
nn.ReLU(inplace=True),
# nn.PReLU(),
nn.Conv3d(self.nff[1], self.nff[1], kernel_size=3, padding=1),
nn.InstanceNorm3d(self.nff[1]),
nn.ReLU(inplace=True)
# nn.PReLU()
)
# forward2-5
for i in range(len(self.num_blocks_forw)): # 4
blocks = []
for j in range(self.num_blocks_forw[i]): # {2,2,3,3}
if j == 0: # conv
###plus source connection
blocks.append(PostRes(self.nff[i + 1], self.nff[i + 2]))
else:
blocks.append(PostRes(self.nff[i + 2], self.nff[i + 2]))
setattr(self, 'forward' + str(i + 2), nn.Sequential(*blocks))
self.avgpool = nn.AvgPool3d(kernel_size=2, stride=2)
self.maxpool = nn.MaxPool3d(kernel_size=2, stride=2)
# downsamp1-4 by stride convolution
# self.downsamp1 = nn.Conv3d(self.nff[1], self.nff[2], kernel_size=3, stride=2, padding=1)
# self.downsamp2 = nn.Conv3d(self.nff[2], self.nff[3], kernel_size=3, stride=2, padding=1)
# self.downsamp3 = nn.Conv3d(self.nff[3], self.nff[4], kernel_size=3, stride=2, padding=1)
# self.downsamp4 = nn.Conv3d(self.nff[4], self.nff[5], kernel_size=3, stride=2, padding=1)
self.decoder2 = Decoder2()
self.drop = nn.Dropout3d(p=0.5, inplace=False)
def forward(self, input):
#encoder
layer1 = self.forward1(input)
down1 = self.maxpool(layer1)
# down1 = self.downsamp1(layer1)
layer2 = self.forward2(down1)
down2 = self.maxpool(layer2)
# down2 = self.downsamp2(layer2)
layer3 = self.forward3(down2)
down3 = self.maxpool(layer3)
# down3 = self.downsamp3(layer3)
layer4 = self.forward4(down3)
down4 = self.maxpool(layer4)
# down4 = self.downsamp4(layer4)
layer5 = self.forward5(down4)
# decoder
branch2 = self.decoder2(layer1, layer2, layer3, layer4, layer5)
return branch2
def main():
net = TumorNet().cuda()#necessary for torchsummary, must to cuda
from torchsummary import summary
summary(net, input_size=(1,64,256,256))#must remove the number of N
# input = torch.randn([1,1,64,256,256]).cuda()#(NCDHW)
# output = net(input)
# print(output.shape)
# print('############net.named_parameters()#############')
# for name, param in net.named_parameters():
# print(name)
if __name__ == '__main__':
main() | [
"lihuiyu23@gmail.com"
] | lihuiyu23@gmail.com |
91503fa1a7ffe5118597d43b74f8c1563b6bdca6 | b4c164c9c6f91badb305bae23246ab0c5ba5fcbe | /Problem Set 3/Motion.py | 3d2b43cec29e675115eb800b8faf2a7e42d0d3c2 | [] | no_license | KhrulSergey/AI_Robotics_Udacity | e2b25a5b9d752b2daaa5195b7b487738aae83231 | ff41e877f2af87348de8a0d44bc8f51ea29523f8 | refs/heads/master | 2021-04-26T22:31:06.456066 | 2019-03-30T20:33:49 | 2019-03-30T20:34:20 | 124,104,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,159 | py | # -----------------
# USER INSTRUCTIONS
#
# Write a function in the class robot called move()
#
# that takes self and a motion vector (this
# motion vector contains a steering* angle and a
# distance) as input and returns an instance of the class
# robot with the appropriate x, y, and orientation
# for the given motion.
#
# *steering is defined in the video
# which accompanies this problem.
#
# For now, please do NOT add noise to your move function.
#
# Please do not modify anything except where indicated
# below.
#
# There are test cases which you are free to use at the
# bottom. If you uncomment them for testing, make sure you
# re-comment them before you submit.
from math import *
import random
# --------
#
# the "world" has 4 landmarks.
# the robot's initial coordinates are somewhere in the square
# represented by the landmarks.
#
# NOTE: Landmark coordinates are given in (y, x) form and NOT
# in the traditional (x, y) format!
landmarks = [[0.0, 100.0], [0.0, 0.0], [100.0, 0.0], [100.0, 100.0]] # position of 4 landmarks
world_size = 100.0 # world is NOT cyclic. Robot is allowed to travel "out of bounds"
max_steering_angle = pi / 4 # You don't need to use this value, but it is good to keep in mind the limitations of a real car.
# ------------------------------------------------
#
# this is the robot class
#
class robot:
# --------
# init:
# creates robot and initializes location/orientation
#
def __init__(self, length=10.0):
self.x = random.random() * world_size # initial x position
self.y = random.random() * world_size # initial y position
self.orientation = random.random() * 2.0 * pi # initial orientation
self.length = length # length of robot
self.bearing_noise = 0.0 # initialize bearing noise to zero
self.steering_noise = 0.0 # initialize steering noise to zero
self.distance_noise = 0.0 # initialize distance noise to zero
def __repr__(self):
return '[x=%.6s y=%.6s orient=%.6s]' % (str(self.x), str(self.y), str(self.orientation))
# --------
# set:
# sets a robot coordinate
#
def set(self, new_x, new_y, new_orientation):
if new_orientation < 0 or new_orientation >= 2 * pi:
raise (ValueError, 'Orientation must be in [0..2pi]')
self.x = float(new_x)
self.y = float(new_y)
self.orientation = float(new_orientation)
# --------
# set_noise:
# sets the noise parameters
#
def set_noise(self, new_b_noise, new_s_noise, new_d_noise):
# makes it possible to change the noise parameters
# this is often useful in particle filters
self.bearing_noise = float(new_b_noise)
self.steering_noise = float(new_s_noise)
self.distance_noise = float(new_d_noise)
############# ONLY ADD/MODIFY CODE BELOW HERE ###################
# --------
# move:
# move along a section of a circular path according to motion
# motion[0] - angle of steering
# motion[1] - move_distance = x
#
def move(self, motion): # Do not change the name of this function
stearing_angle = motion[0]
distance = motion[1]
if abs(stearing_angle) > max_steering_angle:
raise (ValueError, 'Exceed max steering angle')
if distance < 0:
raise (ValueError, 'Moving backwards is not permited')
epsilon = 0.001
result = robot()
result.set_noise(self.bearing_noise, self.steering_noise, self.distance_noise)
result.length = self.length
# apply noise to future
stearing_angle2 = random.gauss(stearing_angle, self.steering_noise)
dist2 = random.gauss(distance, self.distance_noise)
turn_angle = dist2/result.length * tan(stearing_angle2)
if(abs(turn_angle) < epsilon):
# approximate by straight line motion
result.x = self.x + dist2 * cos(self.orientation)
result.y = self.y + dist2 * sin(self.orientation)
result.orientation = (self.orientation + turn_angle)%(2.0*pi)
else:
# approximate bycicle model for motion
R = dist2/turn_angle
cx = self.x - sin(self.orientation)*R
cy = self.y + cos(self.orientation)*R
result.orientation = (self.orientation + turn_angle) % (2.0 * pi)
result.x = cx + (sin(result.orientation)* R)
result.y = cy - (cos(result.orientation)* R)
return result # make sure your move function returns an instance
# of the robot class with the correct coordinates.
############## ONLY ADD/MODIFY CODE ABOVE HERE ####################
## IMPORTANT: You may uncomment the test cases below to test your code.
## But when you submit this code, your test cases MUST be commented
## out. Our testing program provides its own code for testing your
## move function with randomized motion data.
## --------
## TEST CASE:
##
## 1) The following code should print:
## Robot: [x=0.0 y=0.0 orient=0.0]
## Robot: [x=10.0 y=0.0 orient=0.0]
## Robot: [x=19.861 y=1.4333 orient=0.2886]
## Robot: [x=39.034 y=7.1270 orient=0.2886]
##
##
# length = 20.
# bearing_noise = 0.0
# steering_noise = 0.0
# distance_noise = 0.0
#
# myrobot = robot(length)
# myrobot.set(0.0, 0.0, 0.0)
# myrobot.set_noise(bearing_noise, steering_noise, distance_noise)
#
# motions = [[0.0, 10.0], [pi / 6.0, 10], [0.0, 20.0]]
# T = len(motions)
#
# print ('Robot: ', myrobot)
# for t in range(T):
# myrobot = myrobot.move(motions[t])
# print ('Robot: ', myrobot)
## IMPORTANT: You may uncomment the test cases below to test your code.
## But when you submit this code, your test cases MUST be commented
## out. Our testing program provides its own code for testing your
## move function with randomized motion data.
## 2) The following code should print:
## Robot: [x=0.0 y=0.0 orient=0.0]
## Robot: [x=9.9828 y=0.5063 orient=0.1013]
## Robot: [x=19.863 y=2.0201 orient=0.2027]
## Robot: [x=29.539 y=4.5259 orient=0.3040]
## Robot: [x=38.913 y=7.9979 orient=0.4054]
## Robot: [x=47.887 y=12.400 orient=0.5067]
## Robot: [x=56.369 y=17.688 orient=0.6081]
## Robot: [x=64.273 y=23.807 orient=0.7094]
## Robot: [x=71.517 y=30.695 orient=0.8108]
## Robot: [x=78.027 y=38.280 orient=0.9121]
## Robot: [x=83.736 y=46.485 orient=1.0135]
##
length = 20.
bearing_noise = 0.0
steering_noise = 0.0
distance_noise = 0.0
myrobot = robot(length)
myrobot.set(0.0, 0.0, 0.0)
myrobot.set_noise(bearing_noise, steering_noise, distance_noise)
motions = [[0.2, 10.] for row in range(10)]
T = len(motions)
print ('Robot: ', myrobot)
for t in range(T):
myrobot = myrobot.move(motions[t])
print ('Robot: ', myrobot)
## IMPORTANT: You may uncomment the test cases below to test your code.
## But when you submit this code, your test cases MUST be commented
## out. Our testing program provides its own code for testing your
## move function with randomized motion data.
| [
"siberianodis@gmail.com"
] | siberianodis@gmail.com |
637aebc9dc0ee30985a63efc692a3f892fbed308 | c6f9a46393048add6fad888d382978b9be12dd4c | /python/ql/test/experimental/dataflow/strange-pointsto-interaction-investigation/src/urandom_problem.py | d4a06529cf60991084b7d954d234703134c192b9 | [
"MIT",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Python-2.0"
] | permissive | luchua-bc/ql | 6e9480e8c92cbb12570fcc7f65366bfdd54dad06 | a1d9228a66cb80329041fa8d95b08ce5697dec54 | refs/heads/master | 2023-01-23T17:11:54.776916 | 2022-07-20T14:36:37 | 2022-07-20T14:36:37 | 248,313,302 | 4 | 0 | MIT | 2023-01-16T09:13:30 | 2020-03-18T18:35:48 | CodeQL | UTF-8 | Python | false | false | 920 | py | # These are defined so that we can evaluate the test code.
NONSOURCE = "not a source"
SOURCE = "source"
def is_source(x):
return x == "source" or x == b"source" or x == 42 or x == 42.0 or x == 42j
def SINK(x):
if is_source(x):
print("OK")
else:
print("Unexpected flow", x)
def SINK_F(x):
if is_source(x):
print("Unexpected flow", x)
else:
print("OK")
# ------------------------------------------------------------------------------
# Actual tests
# ------------------------------------------------------------------------------
def give_src():
return SOURCE
foo = give_src()
SINK(foo) # $ flow="SOURCE, l:-3 -> foo"
import os
cond = os.urandom(1)[0] > 128 # $ unresolved_call=os.urandom(..)
if cond:
pass
if cond:
pass
foo = give_src() # $ unresolved_call=give_src()
SINK(foo) # $ unresolved_call=SINK(..) MISSING: flow="SOURCE, l:-15 -> foo"
| [
"rasmuswl@github.com"
] | rasmuswl@github.com |
929d4cbe14e60aaf7683f78e7b8e87aa8cf4d89d | a2490d50c85bc8385cdda1e2eaf88f02951dc808 | /client/verta/verta/_protos/public/modeldb/metadata/MetadataService_pb2.py | f7435b6df5e8379dcdef0246f8e30430db6a2fe6 | [
"Apache-2.0"
] | permissive | Atharex/modeldb | 2e379bc87df054dc5c1a9058620aef8a3ada9108 | 3a286d5861c1dd14342084793dd7d7584ff8a29b | refs/heads/master | 2022-11-08T09:23:37.799241 | 2020-07-01T12:16:31 | 2020-07-01T12:16:31 | 275,455,778 | 0 | 0 | Apache-2.0 | 2020-06-27T21:26:05 | 2020-06-27T21:26:05 | null | UTF-8 | Python | false | true | 16,824 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: modeldb/metadata/MetadataService.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='modeldb/metadata/MetadataService.proto',
package='ai.verta.modeldb.metadata',
syntax='proto3',
serialized_options=b'P\001ZGgithub.com/VertaAI/modeldb/protos/gen/go/protos/public/modeldb/metadata',
serialized_pb=b'\n&modeldb/metadata/MetadataService.proto\x12\x19\x61i.verta.modeldb.metadata\x1a\x1cgoogle/api/annotations.proto\"U\n\nIDTypeEnum\"G\n\x06IDType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x19\n\x15VERSIONING_REPOSITORY\x10\x01\x12\x15\n\x11VERSIONING_COMMIT\x10\x02\"\x80\x01\n\x12IdentificationType\x12=\n\x07id_type\x18\x01 \x01(\x0e\x32,.ai.verta.modeldb.metadata.IDTypeEnum.IDType\x12\x10\n\x06int_id\x18\x02 \x01(\x04H\x00\x12\x13\n\tstring_id\x18\x03 \x01(\tH\x00\x42\x04\n\x02id\"i\n\x10GetLabelsRequest\x12\x39\n\x02id\x18\x01 \x01(\x0b\x32-.ai.verta.modeldb.metadata.IdentificationType\x1a\x1a\n\x08Response\x12\x0e\n\x06labels\x18\x01 \x03(\t\"y\n\x10\x41\x64\x64LabelsRequest\x12\x39\n\x02id\x18\x01 \x01(\x0b\x32-.ai.verta.modeldb.metadata.IdentificationType\x12\x0e\n\x06labels\x18\x02 \x03(\t\x1a\x1a\n\x08Response\x12\x0e\n\x06status\x18\x01 \x01(\x08\"|\n\x13\x44\x65leteLabelsRequest\x12\x39\n\x02id\x18\x01 \x01(\x0b\x32-.ai.verta.modeldb.metadata.IdentificationType\x12\x0e\n\x06labels\x18\x02 \x03(\t\x1a\x1a\n\x08Response\x12\x0e\n\x06status\x18\x01 \x01(\x08\x32\xca\x03\n\x0fMetadataService\x12\x8b\x01\n\tGetLabels\x12+.ai.verta.modeldb.metadata.GetLabelsRequest\x1a\x34.ai.verta.modeldb.metadata.GetLabelsRequest.Response\"\x1b\x82\xd3\xe4\x93\x02\x15\x12\x13/v1/metadata/labels\x12\x8e\x01\n\tAddLabels\x12+.ai.verta.modeldb.metadata.AddLabelsRequest\x1a\x34.ai.verta.modeldb.metadata.AddLabelsRequest.Response\"\x1e\x82\xd3\xe4\x93\x02\x18\x1a\x13/v1/metadata/labels:\x01*\x12\x97\x01\n\x0c\x44\x65leteLabels\x12..ai.verta.modeldb.metadata.DeleteLabelsRequest\x1a\x37.ai.verta.modeldb.metadata.DeleteLabelsRequest.Response\"\x1e\x82\xd3\xe4\x93\x02\x18*\x13/v1/metadata/labels:\x01*BKP\x01ZGgithub.com/VertaAI/modeldb/protos/gen/go/protos/public/modeldb/metadatab\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_IDTYPEENUM_IDTYPE = _descriptor.EnumDescriptor(
name='IDType',
full_name='ai.verta.modeldb.metadata.IDTypeEnum.IDType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VERSIONING_REPOSITORY', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VERSIONING_COMMIT', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=113,
serialized_end=184,
)
_sym_db.RegisterEnumDescriptor(_IDTYPEENUM_IDTYPE)
_IDTYPEENUM = _descriptor.Descriptor(
name='IDTypeEnum',
full_name='ai.verta.modeldb.metadata.IDTypeEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_IDTYPEENUM_IDTYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=99,
serialized_end=184,
)
_IDENTIFICATIONTYPE = _descriptor.Descriptor(
name='IdentificationType',
full_name='ai.verta.modeldb.metadata.IdentificationType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id_type', full_name='ai.verta.modeldb.metadata.IdentificationType.id_type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='int_id', full_name='ai.verta.modeldb.metadata.IdentificationType.int_id', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='string_id', full_name='ai.verta.modeldb.metadata.IdentificationType.string_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='id', full_name='ai.verta.modeldb.metadata.IdentificationType.id',
index=0, containing_type=None, fields=[]),
],
serialized_start=187,
serialized_end=315,
)
_GETLABELSREQUEST_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='ai.verta.modeldb.metadata.GetLabelsRequest.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='labels', full_name='ai.verta.modeldb.metadata.GetLabelsRequest.Response.labels', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=396,
serialized_end=422,
)
_GETLABELSREQUEST = _descriptor.Descriptor(
name='GetLabelsRequest',
full_name='ai.verta.modeldb.metadata.GetLabelsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='ai.verta.modeldb.metadata.GetLabelsRequest.id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_GETLABELSREQUEST_RESPONSE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=317,
serialized_end=422,
)
_ADDLABELSREQUEST_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='ai.verta.modeldb.metadata.AddLabelsRequest.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='ai.verta.modeldb.metadata.AddLabelsRequest.Response.status', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=519,
serialized_end=545,
)
_ADDLABELSREQUEST = _descriptor.Descriptor(
name='AddLabelsRequest',
full_name='ai.verta.modeldb.metadata.AddLabelsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='ai.verta.modeldb.metadata.AddLabelsRequest.id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='ai.verta.modeldb.metadata.AddLabelsRequest.labels', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ADDLABELSREQUEST_RESPONSE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=424,
serialized_end=545,
)
_DELETELABELSREQUEST_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='ai.verta.modeldb.metadata.DeleteLabelsRequest.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='ai.verta.modeldb.metadata.DeleteLabelsRequest.Response.status', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=519,
serialized_end=545,
)
_DELETELABELSREQUEST = _descriptor.Descriptor(
name='DeleteLabelsRequest',
full_name='ai.verta.modeldb.metadata.DeleteLabelsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='ai.verta.modeldb.metadata.DeleteLabelsRequest.id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='ai.verta.modeldb.metadata.DeleteLabelsRequest.labels', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DELETELABELSREQUEST_RESPONSE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=547,
serialized_end=671,
)
_IDTYPEENUM_IDTYPE.containing_type = _IDTYPEENUM
_IDENTIFICATIONTYPE.fields_by_name['id_type'].enum_type = _IDTYPEENUM_IDTYPE
_IDENTIFICATIONTYPE.oneofs_by_name['id'].fields.append(
_IDENTIFICATIONTYPE.fields_by_name['int_id'])
_IDENTIFICATIONTYPE.fields_by_name['int_id'].containing_oneof = _IDENTIFICATIONTYPE.oneofs_by_name['id']
_IDENTIFICATIONTYPE.oneofs_by_name['id'].fields.append(
_IDENTIFICATIONTYPE.fields_by_name['string_id'])
_IDENTIFICATIONTYPE.fields_by_name['string_id'].containing_oneof = _IDENTIFICATIONTYPE.oneofs_by_name['id']
_GETLABELSREQUEST_RESPONSE.containing_type = _GETLABELSREQUEST
_GETLABELSREQUEST.fields_by_name['id'].message_type = _IDENTIFICATIONTYPE
_ADDLABELSREQUEST_RESPONSE.containing_type = _ADDLABELSREQUEST
_ADDLABELSREQUEST.fields_by_name['id'].message_type = _IDENTIFICATIONTYPE
_DELETELABELSREQUEST_RESPONSE.containing_type = _DELETELABELSREQUEST
_DELETELABELSREQUEST.fields_by_name['id'].message_type = _IDENTIFICATIONTYPE
DESCRIPTOR.message_types_by_name['IDTypeEnum'] = _IDTYPEENUM
DESCRIPTOR.message_types_by_name['IdentificationType'] = _IDENTIFICATIONTYPE
DESCRIPTOR.message_types_by_name['GetLabelsRequest'] = _GETLABELSREQUEST
DESCRIPTOR.message_types_by_name['AddLabelsRequest'] = _ADDLABELSREQUEST
DESCRIPTOR.message_types_by_name['DeleteLabelsRequest'] = _DELETELABELSREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
IDTypeEnum = _reflection.GeneratedProtocolMessageType('IDTypeEnum', (_message.Message,), {
'DESCRIPTOR' : _IDTYPEENUM,
'__module__' : 'modeldb.metadata.MetadataService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.metadata.IDTypeEnum)
})
_sym_db.RegisterMessage(IDTypeEnum)
IdentificationType = _reflection.GeneratedProtocolMessageType('IdentificationType', (_message.Message,), {
'DESCRIPTOR' : _IDENTIFICATIONTYPE,
'__module__' : 'modeldb.metadata.MetadataService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.metadata.IdentificationType)
})
_sym_db.RegisterMessage(IdentificationType)
GetLabelsRequest = _reflection.GeneratedProtocolMessageType('GetLabelsRequest', (_message.Message,), {
'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _GETLABELSREQUEST_RESPONSE,
'__module__' : 'modeldb.metadata.MetadataService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.metadata.GetLabelsRequest.Response)
})
,
'DESCRIPTOR' : _GETLABELSREQUEST,
'__module__' : 'modeldb.metadata.MetadataService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.metadata.GetLabelsRequest)
})
_sym_db.RegisterMessage(GetLabelsRequest)
_sym_db.RegisterMessage(GetLabelsRequest.Response)
AddLabelsRequest = _reflection.GeneratedProtocolMessageType('AddLabelsRequest', (_message.Message,), {
'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _ADDLABELSREQUEST_RESPONSE,
'__module__' : 'modeldb.metadata.MetadataService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.metadata.AddLabelsRequest.Response)
})
,
'DESCRIPTOR' : _ADDLABELSREQUEST,
'__module__' : 'modeldb.metadata.MetadataService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.metadata.AddLabelsRequest)
})
_sym_db.RegisterMessage(AddLabelsRequest)
_sym_db.RegisterMessage(AddLabelsRequest.Response)
DeleteLabelsRequest = _reflection.GeneratedProtocolMessageType('DeleteLabelsRequest', (_message.Message,), {
'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _DELETELABELSREQUEST_RESPONSE,
'__module__' : 'modeldb.metadata.MetadataService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.metadata.DeleteLabelsRequest.Response)
})
,
'DESCRIPTOR' : _DELETELABELSREQUEST,
'__module__' : 'modeldb.metadata.MetadataService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.metadata.DeleteLabelsRequest)
})
_sym_db.RegisterMessage(DeleteLabelsRequest)
_sym_db.RegisterMessage(DeleteLabelsRequest.Response)
DESCRIPTOR._options = None
_METADATASERVICE = _descriptor.ServiceDescriptor(
name='MetadataService',
full_name='ai.verta.modeldb.metadata.MetadataService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=674,
serialized_end=1132,
methods=[
_descriptor.MethodDescriptor(
name='GetLabels',
full_name='ai.verta.modeldb.metadata.MetadataService.GetLabels',
index=0,
containing_service=None,
input_type=_GETLABELSREQUEST,
output_type=_GETLABELSREQUEST_RESPONSE,
serialized_options=b'\202\323\344\223\002\025\022\023/v1/metadata/labels',
),
_descriptor.MethodDescriptor(
name='AddLabels',
full_name='ai.verta.modeldb.metadata.MetadataService.AddLabels',
index=1,
containing_service=None,
input_type=_ADDLABELSREQUEST,
output_type=_ADDLABELSREQUEST_RESPONSE,
serialized_options=b'\202\323\344\223\002\030\032\023/v1/metadata/labels:\001*',
),
_descriptor.MethodDescriptor(
name='DeleteLabels',
full_name='ai.verta.modeldb.metadata.MetadataService.DeleteLabels',
index=2,
containing_service=None,
input_type=_DELETELABELSREQUEST,
output_type=_DELETELABELSREQUEST_RESPONSE,
serialized_options=b'\202\323\344\223\002\030*\023/v1/metadata/labels:\001*',
),
])
_sym_db.RegisterServiceDescriptor(_METADATASERVICE)
DESCRIPTOR.services_by_name['MetadataService'] = _METADATASERVICE
# @@protoc_insertion_point(module_scope)
| [
"noreply@github.com"
] | Atharex.noreply@github.com |
4ac613dc87fb37cf7e557918be4a06f39615bb70 | c3b5e412fd9479c046c09c251dd0688a4fec6130 | /solver.py | f360ff7d89db5f41f308259d807e7c2f16ec6ff5 | [] | no_license | Pazaak/NaoPickUp | edb45f8d4dbf392d0a4737c3d9e91b2dfd00b2a6 | 417a20e344fa57272a1481810571287a18c864a4 | refs/heads/master | 2020-05-20T00:48:28.326318 | 2015-07-08T08:21:03 | 2015-07-08T08:21:03 | 36,508,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,804 | py | __author__ = 'Luis Fabregues de los Santos'
import heapq as heap
import copy
import math
infty = float('inf')
def distance_2d(x1, y1, x2, y2):
x = x1 - x2
y = y1 - y2
return math.sqrt(x*x+y*y)
def value(list, robots, target):
result = 0
for path in xrange(len(list)):
for i in xrange(len(list[path])):
if i == 0:
result += distance_2d(robots[path].x, robots[path].y, list[path][i].x, list[path][i].y)
result += distance_2d(target.x, target.y, list[path][i].x, list[path][i].y)
else:
result += 2 * distance_2d(target.x, target.y, list[path][i].x, list[path][i].y)
return result
# Evaluation function 1, min total steps
def branchAndBound1(robots, target, boxes):
maxLen = 2
insertions = 2
creations = 2
extractions = 0
boxes = sorted(boxes, key=lambda x: distance_2d(x.x, x.y, target.x, target.y), reverse=True)
pool = []
scores = []
for box in boxes:
scores.append(distance_2d(box.x, box.y, target.x, target.y) * 2)
current = []
for i in xrange(len(robots)):
current.append([])
heap.heappush(pool, (sum(scores), range(len(boxes)), current))
iteraciones = 0
while len(pool) > 0:
if len(pool) > maxLen: maxLen = len(pool)
extractions += 1
current = heap.heappop(pool)
# Si la lista de cajas esta vacia
if not current[1]:
return [maxLen, insertions, creations, extractions, iteraciones, value(current[2], robots, target), 0], \
current[2]
else:
# Por cada caja que quede la intentamos asignar a un robot
for nbox in current[1]:
for i in xrange(len(robots)):
temp = copy.deepcopy(current[2])
temp[i].append(boxes[nbox])
# Creamos una nueva lista de cajas
newboxes = list(current[1])
newboxes.remove(nbox)
creations += 1
plusScore = 0
for scbox in newboxes:
plusScore += scores[scbox]
temp = (value(temp, robots, target) + plusScore, newboxes, temp)
heap.heappush(pool, temp)
insertions += 1
iteraciones += 1
def value2(list, robots, target):
result = []
for path in xrange(len(list)):
result.append(0)
for i in xrange(len(list[path])):
if i == 0:
result[path] += distance_2d(robots[path].x, robots[path].y, list[path][i].x, list[path][i].y)
result[path] += distance_2d(target.x, target.y, list[path][i].x, list[path][i].y)
else:
result[path] += 2 * distance_2d(target.x, target.y, list[path][i].x, list[path][i].y)
return max(result)
# Evaluation function 2, min max robot steps
def branchAndBound2(robots, target, boxes):
maxLen = 2
insertions = 2
creations = 2
extractions = 0
boxes = sorted(boxes, key=lambda x: distance_2d(x.x, x.y, target.x, target.y), reverse=True)
pool = []
scores = []
for box in boxes:
scores.append((distance_2d(box.x, box.y, target.x, target.y) * 2))
current = []
for i in xrange(len(robots)):
current.append([])
heap.heappush(pool, (sum(scores), range(len(boxes)), current))
iteraciones = 0
while len(pool) > 0:
if len(pool) > maxLen: maxLen = len(pool)
extractions += 1
current = heap.heappop(pool)
# Si la lista de cajas esta vacia
if not current[1]:
return [maxLen, insertions, creations, extractions, iteraciones, value(current[2], robots, target), \
value2(current[2], robots, target)], current[2]
else:
# Por cada caja que quede la intentamos asignar a un robot
for nbox in current[1]:
for i in xrange(len(robots)):
temp = copy.deepcopy(current[2])
temp[i].append(boxes[nbox])
# Creamos una nueva lista de cajas
newboxes = list(current[1])
newboxes.remove(nbox)
creations += 1
temp = (value2(temp, robots, target), newboxes, temp)
heap.heappush(pool, temp)
insertions += 1
iteraciones += 1
# Evaluation function 2, with optimistic score, UNSTABLE
def branchAndBound3(robots, target, boxes):
maxLen = 2
insertions = 2
creations = 2
extractions = 0
boxes = sorted(boxes, key=lambda x: distance_2d(x.x, x.y, target.x, target.y), reverse=True)
pool = []
scores = []
for box in boxes:
scores.append(distance_2d(box.x, box.y, target.x, target.y) * 2)
current = []
for i in xrange(len(robots)):
current.append([])
heap.heappush(pool, (sum(scores), range(len(boxes)), current))
iteraciones = 0
while len(pool) > 0:
if len(pool) > maxLen: maxLen = len(pool)
extractions += 1
current = heap.heappop(pool)
# Si la lista de cajas esta vacia
if not current[1]:
return [maxLen, insertions, creations, extractions, iteraciones, value(current[2], robots, target), \
value2(current[2], robots, target)], current[2]
else:
# Por cada caja que quede la intentamos asignar a un robot
for nbox in current[1]:
for i in xrange(len(robots)):
temp = copy.deepcopy(current[2])
temp[i].append(boxes[nbox])
# Creamos una nueva lista de cajas
newboxes = list(current[1])
newboxes.remove(nbox)
creations += 1
temp = (2*value2(temp, robots, target)+len(newboxes), newboxes, temp)
heap.heappush(pool, temp)
insertions += 1
iteraciones += 1
# Evaluation function 2, save the better, complete state
def branchAndBound4(robots, target, boxes):
maxLen = 2
insertions = 2
creations = 2
extractions = 0
bestYet = infty
boxes = sorted(boxes, key=lambda x: distance_2d(x.x, x.y, target.x, target.y), reverse=True)
pool = []
scores = []
for box in boxes:
scores.append((distance_2d(box.x, box.y, target.x, target.y) * 2))
current = []
for i in xrange(len(robots)):
current.append([])
heap.heappush(pool, (sum(scores), range(len(boxes)), current))
iteraciones = 0
while len(pool) > 0:
if len(pool) > maxLen: maxLen = len(pool)
extractions += 1
current = heap.heappop(pool)
# Si la lista de cajas esta vacia
if not current[1]:
return [maxLen, insertions, creations, extractions, iteraciones, value(current[2], robots, target), \
value2(current[2], robots, target)], \
current[2]
else:
# Por cada caja que quede la intentamos asignar a un robot
for nbox in current[1]:
for i in xrange(len(robots)):
temp = copy.deepcopy(current[2])
temp[i].append(boxes[nbox])
# Creamos una nueva lista de cajas
newboxes = list(current[1])
newboxes.remove(nbox)
creations += 1
temp = (value2(temp, robots, target), newboxes, temp)
if temp[0] < bestYet:
heap.heappush(pool, temp)
insertions += 1
if not temp[1]:
bestYet = temp[0]
iteraciones += 1 | [
"aggalem@gmail.com"
] | aggalem@gmail.com |
e2072af249848ec0134b8e52255016c3d1ed8c07 | 6270ed787bfa3080975ce4f0a27a733b391f4f84 | /22Septembre.py | 1adf9ba336df6550b653abf92fdcb14a6d3712dd | [] | no_license | lucasdavid47/IPT_Sup | b6a8aec625e32903af1704f95f356205e6d43ba4 | 72ee4fa79c8b5f305cbe82d72d5161d6da32e86d | refs/heads/master | 2023-01-07T19:04:15.092690 | 2020-11-04T07:10:05 | 2020-11-04T07:10:05 | 294,027,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | #Somme
A=[]
B=[]
n =len(A) #nombre de lignes de A
C=[]
for i in range(0, n):
L=[]
for j in range(0, n):
L.append(A[i][j]+B[i][j])
C.append(L)
for x in C:
print(x) #affichage ligne par ligne de C
#Produit
n = len(A)
D=[]
for i in range(0, n):
L=[]
for j in range(0, n):
S=0
for k in range(0,n):
S += A[i][k]*B[k][j]
L.append(S)
D.append(L)
for x in D:
print(x)
#Python
L=[1,2,3,4,5,6]
for k in range(0, len(L)):
if(L[k]>3):
L[k] *=2
else:
L[k] *=3
print[L] | [
"lucas.david44000@gmail.com"
] | lucas.david44000@gmail.com |
6b9e64cf6ae66302800054f71df69a98d2400cb1 | 44754d4b76b59c91ec700e4b07fc7c20930eedd2 | /app.py | 3a5cc4daa936e681a3271a060c3939094f3e2ce7 | [
"MIT"
] | permissive | ghostcat404/simple_ml_model | f18b8e18a06b30ffc46e7b709473a6793f2ff335 | 0a65fb4a376d1781d356dbcedd5335f3d6803ab1 | refs/heads/master | 2023-06-10T18:30:07.288304 | 2021-06-17T23:39:57 | 2021-06-17T23:39:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,539 | py | import logging
import argparse
from textwrap import dedent
import mlflow
from sklearn.pipeline import Pipeline
from dotenv import load_dotenv
from utils import ModelMapper, DatasetMapper, save_model, get_model_params
# TODO: дописать применение модели
def run_train(args):
if args.params_path is not None:
params = get_model_params(args.params_path)['params']
else:
params = None
model = ModelMapper.get_model(args.model_type)(params)
model = Pipeline([
('estimator', model)
])
logging.info('Load model %s', model)
X, y = DatasetMapper.get_data('iris')
logging.info('Fit model')
mlflow.set_tracking_uri("http://194.67.111.68:5000")
mlflow.set_experiment(args.exp_name)
with mlflow.start_run() as run:
model.fit(X, y)
if params:
mlflow.log_params(params)
mlflow.sklearn.log_model(model, artifact_path="model")
if args.model_name is not None:
logging.info('Save %s to %s', model, args.model_name)
save_model(args.model_name, model)
def setup_parser(parser: argparse.ArgumentParser):
subparsers = parser.add_subparsers(
help='Choose command. Type <command> -h for more help'
)
train_parser = subparsers.add_parser(
'train',
help='train choosen model',
formatter_class=argparse.RawTextHelpFormatter,
)
train_parser.add_argument(
'--model',
help=dedent('''
Choose model type
Available types:
- logistic: sklearn.linear_models.LogisticRegression
'''),
dest='model_type',
type=str,
required=True
)
train_parser.add_argument(
'--config-params-path',
help='path to model params .yml file',
dest='params_path',
type=str,
required=False,
default=None
)
train_parser.add_argument(
'--model-name',
help='name of model',
dest='model_name',
type=str,
required=False,
default=None
)
train_parser.add_argument(
'--exp-name',
help='name of experiment',
dest='exp_name',
type=str,
required=False,
default='test_exp'
)
train_parser.set_defaults(callback=run_train)
def main():
load_dotenv()
parser = argparse.ArgumentParser('Simple ML project')
setup_parser(parser)
args = parser.parse_args()
args.callback(args)
if __name__ == '__main__':
main()
| [
"aechesnov@yandex.ru"
] | aechesnov@yandex.ru |
e195f2cada6fd440c8ec551ec4c3ca40c8efe9b4 | f0af90e1a5e9cd73682a42f0295574d70450c62e | /test.py | a4640244851e505dad79503dd608447c9eebf3f1 | [] | no_license | k3ch-jo/mocha | 9c4899e1157ad547b60c5e86c6587b9f4e917899 | 4142da6dd12a866d3d82f0e311c023f00fed83a7 | refs/heads/main | 2022-12-23T08:37:37.497990 | 2020-10-05T07:38:03 | 2020-10-05T07:38:03 | 301,324,930 | 0 | 0 | null | 2020-10-05T07:38:04 | 2020-10-05T07:21:42 | Python | UTF-8 | Python | false | false | 44 | py | #says goodmorning
print("Good Morning")
| [
"noreply@github.com"
] | k3ch-jo.noreply@github.com |
bd1236dee44cc218e34f71aa057ce6aeaae640d8 | 4f365fbdfd4701c3a294dfba17c1377d4eb369d8 | /jinja2htmlcompress.py | 507c7509a9a3a8418fcb4ce187fb21809e76fc26 | [
"BSD-3-Clause"
] | permissive | Orvillar/jinja2-htmlcompress | 4e725f9b6ceb6f327d4247d7dab6f55d344039ea | b34dc409762aaf205ccd59e37ad4b3dc5331904d | refs/heads/master | 2020-04-07T16:06:54.607802 | 2018-11-21T08:31:21 | 2018-11-21T08:31:21 | 158,515,466 | 0 | 0 | NOASSERTION | 2018-11-21T08:29:20 | 2018-11-21T08:29:19 | null | UTF-8 | Python | false | false | 6,354 | py | # -*- coding: utf-8 -*-
"""
jinja2htmlcompress
~~~~~~~~~~~~~~~~~~
A Jinja2 extension that eliminates useless whitespace at template
compilation time without extra overhead.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import re
from jinja2.ext import Extension
from jinja2.lexer import Token, describe_token
from jinja2 import TemplateSyntaxError
_tag_re = re.compile(r'(?:<(/?)([a-zA-Z0-9_-]+)\s*|(>\s*))(?s)')
_ws_normalize_re = re.compile(r'[ \t\r\n]+')
class StreamProcessContext(object):
def __init__(self, stream):
self.stream = stream
self.token = None
self.stack = []
def fail(self, message):
raise TemplateSyntaxError(message, self.token.lineno,
self.stream.name, self.stream.filename)
def _make_dict_from_listing(listing):
rv = {}
for keys, value in listing:
for key in keys:
rv[key] = value
return rv
class HTMLCompress(Extension):
isolated_elements = set(['script', 'style', 'noscript', 'textarea'])
void_elements = set(['br', 'img', 'area', 'hr', 'param', 'input',
'embed', 'col'])
block_elements = set(['div', 'p', 'form', 'ul', 'ol', 'li', 'table', 'tr',
'tbody', 'thead', 'tfoot', 'tr', 'td', 'th', 'dl',
'dt', 'dd', 'blockquote', 'h1', 'h2', 'h3', 'h4',
'h5', 'h6', 'pre'])
breaking_rules = _make_dict_from_listing([
(['p'], set(['#block'])),
(['li'], set(['li'])),
(['td', 'th'], set(['td', 'th', 'tr', 'tbody', 'thead', 'tfoot'])),
(['tr'], set(['tr', 'tbody', 'thead', 'tfoot'])),
(['thead', 'tbody', 'tfoot'], set(['thead', 'tbody', 'tfoot'])),
(['dd', 'dt'], set(['dl', 'dt', 'dd']))
])
def is_isolated(self, stack):
for tag in reversed(stack):
if tag in self.isolated_elements:
return True
return False
def is_breaking(self, tag, other_tag):
breaking = self.breaking_rules.get(other_tag)
return breaking and (tag in breaking or
('#block' in breaking and tag in self.block_elements))
def enter_tag(self, tag, ctx):
while ctx.stack and self.is_breaking(tag, ctx.stack[-1]):
self.leave_tag(ctx.stack[-1], ctx)
if tag not in self.void_elements:
ctx.stack.append(tag)
def leave_tag(self, tag, ctx):
if not ctx.stack:
ctx.fail('Tried to leave "%s" but something closed '
'it already' % tag)
if tag == ctx.stack[-1]:
ctx.stack.pop()
return
for idx, other_tag in enumerate(reversed(ctx.stack)):
if other_tag == tag:
for num in xrange(idx + 1):
ctx.stack.pop()
elif not self.breaking_rules.get(other_tag):
break
def normalize(self, ctx):
pos = 0
buffer = []
def write_data(value):
if not self.is_isolated(ctx.stack):
value = _ws_normalize_re.sub(' ', value.strip())
buffer.append(value)
for match in _tag_re.finditer(ctx.token.value):
closes, tag, sole = match.groups()
preamble = ctx.token.value[pos:match.start()]
write_data(preamble)
if sole:
write_data(sole)
else:
buffer.append(match.group())
(closes and self.leave_tag or self.enter_tag)(tag, ctx)
pos = match.end()
write_data(ctx.token.value[pos:])
return u''.join(buffer)
def filter_stream(self, stream):
ctx = StreamProcessContext(stream)
for token in stream:
if token.type != 'data':
yield token
continue
ctx.token = token
value = self.normalize(ctx)
yield Token(token.lineno, 'data', value)
class SelectiveHTMLCompress(HTMLCompress):
def filter_stream(self, stream):
ctx = StreamProcessContext(stream)
strip_depth = 0
while 1:
if stream.current.type == 'block_begin':
if stream.look().test('name:strip') or \
stream.look().test('name:endstrip'):
stream.skip()
if stream.current.value == 'strip':
strip_depth += 1
else:
strip_depth -= 1
if strip_depth < 0:
ctx.fail('Unexpected tag endstrip')
stream.skip()
if stream.current.type != 'block_end':
ctx.fail('expected end of block, got %s' %
describe_token(stream.current))
stream.skip()
if strip_depth > 0 and stream.current.type == 'data':
ctx.token = stream.current
value = self.normalize(ctx)
yield Token(stream.current.lineno, 'data', value)
else:
yield stream.current
stream.next()
def test():
from jinja2 import Environment
env = Environment(extensions=[HTMLCompress])
tmpl = env.from_string('''
<html>
<head>
<title>{{ title }}</title>
</head>
<script type=text/javascript>
if (foo < 42) {
document.write('Foo < Bar');
}
</script>
<body>
<li><a href="{{ href }}">{{ title }}</a><br>Test Foo
<li><a href="{{ href }}">{{ title }}</a><img src=test.png>
</body>
</html>
''')
print tmpl.render(title=42, href='index.html')
env = Environment(extensions=[SelectiveHTMLCompress])
tmpl = env.from_string('''
Normal <span> unchanged </span> stuff
{% strip %}Stripped <span class=foo > test </span>
<a href="foo"> test </a> {{ foo }}
Normal <stuff> again {{ foo }} </stuff>
<p>
Foo<br>Bar
Baz
<p>
Moep <span>Test</span> Moep
</p>
{% endstrip %}
''')
print tmpl.render(foo=42)
if __name__ == '__main__':
test()
| [
"armin.ronacher@active-4.com"
] | armin.ronacher@active-4.com |
803d49b2b49af27d2ac57b3a4e8ff335cdd579a8 | c96eab97976aa7fa60320d8b7de74f5148c7bf25 | /edf/g1997.py | 02cd9c1f7ee960c1bd6fc5b13dd077532754084c | [] | no_license | alexis-roche/scripts | 869eb9063e8b31a0e13284aeb777cc152f822f02 | aaae389a3fa5a0c6ff619034bdc5825a5f77a995 | refs/heads/master | 2021-01-02T23:07:33.730063 | 2012-05-08T06:59:06 | 2012-05-08T06:59:06 | 1,047,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,019 | py | from game import Game
g1997 = []
#
g = Game('22 Jan 1997','portugal','f','away',2,0)
g.players = ['barthez','thuram','blanc','desailly','laigle','karembeu',
'deschamps','zidane','ba','dugarry','pires']
g.subs = ['ngotty','djorkaeff','loko','blondeau']
g1997.append(g)
#
g = Game('26 Feb 1997','netherlands','f','home',2,1)
g.players = ['lama','thuram','blanc','desailly','lizarazu','karembeu',
'vieira','zidane','laigle','ba','dugarry']
g.subs = ['candela','ngotty','pires','loko']
g1997.append(g)
#
g = Game('2 Apr 1997','sweden','f','home',1,0)
g.players = ['barthez','thuram','blanc','desailly','candela','ba',
'makelele','zidane','vieira','djorkaeff','dugarry']
g.subs = ['blondeau','keller','gava','djetou','loko']
g1997.append(g)
#
g = Game('3 Jun 1997','brazil','f','home',1,1)
g.players = ['barthez','candela','blanc','desailly','lizarazu','karembeu',
'deschamps','zidane','ba','maurice','pires']
g.subs = ['thuram','vieira','keller']
g1997.append(g)
#
g = Game('7 Jun 1997','england','f','home',0,1)
g.players = ['barthez','thuram','blanc','ngotty','laigle','djorkaeff',
'vieira','deschamps','dugarry','ouedec','keller']
g.subs = ['lizarazu','zidane','loko']
g1997.append(g)
#
g = Game('11 Jun 1997','italy','f','home',2,2)
g.players = ['charbonnier','thuram','leboeuf','desailly','lizarazu','ba',
'karembeu','zidane','deschamps','dugarry','maurice']
g.subs = ['ngotty','vieira','djorkaeff']
g1997.append(g)
#
g = Game('11 Oct 1997','south africa','f','home',2,1)
g.players = ['letizi','thuram','blanc','desailly','candela','deschamps',
'djorkaeff','petit','pires','guivarch','henry']
g.subs = ['laigle','ba','boghossian','zidane']
g1997.append(g)
#
g = Game('12 Nov 1997','scotland','f','home',2,1)
g.players = ['barthez','thuram','blanc','desailly','laigle','ba',
'deschamps','zidane','petit','laslandes','guivarch']
g.subs = ['candela','gava','boghossian','djorkaeff']
g1997.append(g)
| [
"alexis.roche@gmail.com"
] | alexis.roche@gmail.com |
f832144531d3e829e3b9637112237b07b3bc34c5 | 290e0f86fd9cd2881e82b44a308beb1b7f657fb7 | /assistant_utils.py | 577de88ddc415bc0298dcd7c491b48e2555c4adb | [] | no_license | essalj/ai_assistant | 0d2696a90a3a29e28461f95abec6fb8685298f1c | 5a592b86015a8e0fc6ea961eeddf1b885dbe900d | refs/heads/main | 2023-04-22T21:57:22.568114 | 2021-04-25T12:45:20 | 2021-04-25T12:45:20 | 325,210,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py |
import pyautogui
#screen_shot(file_name)
def screen_shot(file_name):
screenshot = pyautogui.screenshot()
file_name = file_name + ".png"
screenshot.save(file_name)
print("file saved here: " + file_name)
#screen_shot("test_fil")
| [
"noreply@github.com"
] | essalj.noreply@github.com |
8f1d3b33b6976f1a8d134f8df6ee8b3eb067f5b5 | 2ecc0f925df105282b5c725d5441f73ff7bf8317 | /py31파일처리/py31_17_encrypt.py | 5bf5c6f4f34a9da7baec6fa9c9b436b956584c32 | [] | no_license | anhduong2020/anhduong3202 | 8ed8e7cd7782fb7eec507d4abbbac7f64576a65b | e06e27d8dd56130fc66271f1013de61ceea89335 | refs/heads/master | 2021-01-13T20:09:02.994695 | 2020-03-15T09:08:22 | 2020-03-15T09:08:22 | 242,481,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | key = "abcdefghijklmnopqrstuvwxyz"
# 평문을 받아서 암호화하고 암호문을 반환한다.
# 암호문을 받아서 복호화하고 평문을 반환한다.
| [
"hdfj@naver.com"
] | hdfj@naver.com |
68bda07db08e3d6b58a8cbb0bf86ce63b584f900 | 5a1f77b71892745656ec9a47e58a078a49eb787f | /4_Backwoods_Forest/140-A_Fine_Mint/fine_mint.py | f17553bc8c35e99e05fe9b3bbd9916adfeaa85f8 | [
"MIT"
] | permissive | ripssr/Code-Combat | 78776e7e67c033d131e699dfeffb72ca09fd798e | fbda1ac0ae4a2e2cbfce21492a2caec8098f1bef | refs/heads/master | 2020-06-11T20:17:59.817187 | 2019-07-21T09:46:04 | 2019-07-21T09:46:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | def pickUpCoin():
coin = hero.findNearestItem()
if coin:
hero.moveXY(coin.pos.x, coin.pos.y)
def attackEnemy():
enemy = hero.findNearestEnemy()
if enemy:
if hero.isReady("cleave"):
hero.cleave(enemy)
else:
hero.attack(enemy)
while True:
attackEnemy()
pickUpCoin()
| [
"katik.hello@gmail.com"
] | katik.hello@gmail.com |
c03e0187f206d06f07e5771f3ff8b322dcdba6cf | ce387fc31007f0616b6f2805bf998ae5f6288224 | /qubole_assembly/configure_airflow.py | aa05c8e46596fa8911dbf9ea0464dd358c76d091 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"Python-2.0"
] | permissive | harishjami1382/test2 | d3466604209377e899a239c4f29446ffef06684e | f778cc7290904a84bed06f65fa5dbb49a63639f0 | refs/heads/master | 2023-02-25T18:44:21.114158 | 2021-02-04T10:19:50 | 2021-02-04T10:19:50 | 335,915,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,826 | py | try:
import ConfigParser
except:
import configparser as ConfigParser
import traceback
import sys
import os
from optparse import OptionParser
import base64
import subprocess
def handle_specific_config(overrides_map, options):
# Handle sqlalchemy settings
use_cluster_datastore = False
core_settings = overrides_map.get('core', {})
logging_settings = overrides_map.get('logging', {})
if not 'remote_base_log_folder' in logging_settings or logging_settings['remote_base_log_folder'] is None or logging_settings['remote_base_log_folder'] == "":
logging_settings['remote_base_log_folder'] = os.getenv('AIRFLOW_LOGS_LOCATION', "")
if 'logging' not in overrides_map.keys():
overrides_map['logging'] = logging_settings
cluster_id = os.getenv('CLUSTER_ID', "")
qubole_base_url = os.getenv('QUBOLE_BASE_URL', "api.qubole.com")
if not 'sql_alchemy_conn' in core_settings or core_settings['sql_alchemy_conn'] is None or core_settings['sql_alchemy_conn'] == "":
core_settings['sql_alchemy_conn'] = "postgresql://root:" + cluster_id + "@localhost:5432/airflow"
use_cluster_datastore = True
# Handle webserver settings
web_server_port = '8080'
if not 'webserver' in overrides_map:
overrides_map['webserver'] = {}
# user controlled port will be bad idea, keeping it 8080 only
overrides_map['webserver']['web_server_port'] = web_server_port
if not 'base_url' in overrides_map['webserver']:
# Ideally we should not accpet any overrides for base url, this is temporary as sometimes we have to manually
# setup multi-node cluster using various one-node clusters.
overrides_map['webserver']['base_url'] = qubole_base_url + "/airflow-rbacwebserver-" + cluster_id
# Handle celery executor settings
default_broker_url = 'amqp://guest:guest@localhost:5672/'
use_cluster_broker_airflow = True
use_celery_airflow = True
if overrides_map.get('core', {}).get('executor', None) == 'CeleryExecutor':
# Executor type will always be there because we will set it in recommended config to use celery broker
if 'celery' in overrides_map and 'broken_url' in overrides_map['celery']: # Means user is hosting his own messaging broker
use_cluster_broker_airflow = False
else: # Implies user does not want to use celery executor
use_cluster_broker_airflow = False
use_celery_airflow = False
if use_celery_airflow:
if not 'celery' in overrides_map:
overrides_map['celery'] = {}
if use_cluster_broker_airflow:
overrides_map['celery']['broker_url'] = default_broker_url # Default broker config on machine
if 'result_backend' not in overrides_map['celery']:
# Reason for using sql alchemy for result backend: QBOL-5589
sql_alchemy_conn = overrides_map['core']['sql_alchemy_conn']
overrides_map['celery']['result_backend'] = 'db+' + sql_alchemy_conn
if 'celeryd_concurrency' in overrides_map['celery']:
overrides_map['celery']['worker_concurrency'] = overrides_map['celery']['celeryd_concurrency']
del overrides_map['celery']['celeryd_concurrency']
overrides_map['webserver']['rbac'] = False
return (use_cluster_broker_airflow, use_celery_airflow, use_cluster_datastore)
def setup_scheduler_child_process_directory_and_cron(overrides_map):
if not 'scheduler' in overrides_map:
overrides_map['scheduler'] = {}
if not 'child_process_log_directory' in overrides_map['scheduler']:
overrides_map['scheduler']['child_process_log_directory'] = '{0}/scheduler_task_logs'.format(os.getenv('AIRFLOW_LOG_DIR', '/media/ephemeral0/logs/airflow'))
if not 'child_process_log_rotation_days' in overrides_map['scheduler']:
overrides_map['scheduler']['child_process_log_rotation_days'] = '2'
def setup_logs_symlink(final_config):
logs_folder = final_config['logging']['base_log_folder']
symlink_folder = "{0}/logs".format(final_config['core']['airflow_home'])
if logs_folder != symlink_folder:
symlink_command = ["ln", "-s", logs_folder, symlink_folder]
process = subprocess.Popen(symlink_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
std, err = process.communicate()
if err != '':
print("An error occured while creating symlink: {0}".format(err))
def main():
optparser = OptionParser()
optparser.add_option("--airflow-overrides", default="", help="Airflow config overrides")
optparser.add_option("--master-public-dns", default=None, help="Master Public DNS of the cluster")
optparser.add_option("--airflow-home", help="Airflow Home")
optparser.add_option("--airflow-env-var-file", help="Airflow Environment File Location")
(options, args) = optparser.parse_args()
if options.airflow_home is None:
optparser.error('--airflow-home is mandatory')
if options.airflow_env_var_file is None:
optparser.error('--airflow-env-var-file is mandatory')
# Overall aim is to merge the overrides by user/recommended with the ones present as default in airflow config.
# Read config from Airflow Config file present at AIRFLOW_HOME
config = ConfigParser.RawConfigParser()
airflow_config_file_path = os.path.join(options.airflow_home , 'airflow.cfg')
config.read(airflow_config_file_path)
config_sections = config.sections()
# Parse the overrides in the form section1.key1=value1!section2.key2=value2..
# Store them in a map where key is section name and value is
# a map with key value pairs of that section
airflow_overrides = options.airflow_overrides
overrides = airflow_overrides.split('!')
overrides_map = {}
for override in overrides:
kv = override.split('.', 1)
if len(kv) != 2:
continue
section = kv[0]
prop_val = kv[1]
kv = prop_val.split('#', 1)
if len(kv) != 2:
continue
if not section in overrides_map:
overrides_map[section] = {}
overrides_map[section][kv[0]] = base64.b64decode(kv[1]).decode('utf-8')
(use_cluster_broker_airflow, use_celery_airflow, use_cluster_datastore) = handle_specific_config(overrides_map, options)
setup_scheduler_child_process_directory_and_cron(overrides_map)
# Get all sections by combining sections in overrides and config file
overrides_sections = list(overrides_map.keys())
sections = set(config_sections + overrides_sections)
final_config = {}
# Now it's time to merge configurations of both airflow config file and overrides
for section in sections:
config_items = {}
if config.has_section(section):
# config.items(section) is of the form [(key1, value1), (key2, value2)..] and then converted to dict.
config_items = dict(config.items(section))
override_items = {}
if section in overrides_map:
override_items = overrides_map[section]
# Merge the 2 maps
# Priority overrides > default config
final_section_config = dict(list(config_items.items()) + list(override_items.items()))
final_config[section] = final_section_config
# Finally we just reset the config object to have all sections with required options
for section in final_config.keys():
if not config.has_section(section):
config.add_section(section)
for option in final_config[section].keys():
config.set(section, option, final_config[section][option])
# Now dump the config again in the airflow config file
with open(airflow_config_file_path, 'w') as airflow_config_file:
config.write(airflow_config_file)
airflow_env_var_file_path = options.airflow_env_var_file
setup_logs_symlink(final_config)
newFileData = ""
for line in open(airflow_env_var_file_path, 'r'):
if "export USE_CELERY_AIRFLOW=" in line or "export USE_CLUSTER_BROKER_AIRFLOW=" in line or "export USE_CLUSTER_DATASTORE=" in line:
line = ""
newFileData += line
with open(airflow_env_var_file_path, 'w') as airflow_env_var_file:
airflow_env_var_file.write(newFileData)
with open(airflow_env_var_file_path, 'a') as airflow_env_var_file:
airflow_env_var_file.write("export USE_CELERY_AIRFLOW=" + str(use_celery_airflow) + "\n")
airflow_env_var_file.write("export USE_CLUSTER_BROKER_AIRFLOW=" + str(use_cluster_broker_airflow) + "\n")
airflow_env_var_file.write("export USE_CLUSTER_DATASTORE=" + str(use_cluster_datastore) + "\n")
if __name__ == '__main__':
try:
sys.exit(main())
except Exception:
traceback.print_exc(file=sys.stderr)
sys.exit(1)
| [
"jami.harish@accolite.com"
] | jami.harish@accolite.com |
203c4c5c65469b178d194de6b85feec2a5037e9a | 129941a1fb7c0bbd9969f0dd8843b057ce9f3666 | /VAJets/PKUTreeMaker/test/Wcrab/crab3_analysismu.py | 09dc3efeef0cc17499456da57454ef8dcc335da1 | [] | no_license | PKUHEPEWK/VBS_WGamma | 7cf43f136dd92777ab7a8a742c163e222b1f4dbf | 0f94abb2d4303b1c08d62971a74f25b100cbe042 | refs/heads/master | 2020-03-25T04:36:21.119377 | 2019-07-15T02:56:32 | 2019-07-15T02:56:32 | 143,404,007 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,416 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = 'SMu16B-v1'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles =['Summer16_23Sep2016BCDV4_DATA_L1FastJet_AK4PFchs.txt','Summer16_23Sep2016BCDV4_DATA_L2Relative_AK4PFchs.txt','Summer16_23Sep2016BCDV4_DATA_L3Absolute_AK4PFchs.txt','Summer16_23Sep2016BCDV4_DATA_L2L3Residual_AK4PFchs.txt','Summer16_23Sep2016BCDV4_DATA_L1FastJet_AK4PFPuppi.txt','Summer16_23Sep2016BCDV4_DATA_L2Relative_AK4PFPuppi.txt','Summer16_23Sep2016BCDV4_DATA_L3Absolute_AK4PFPuppi.txt','Summer16_23Sep2016BCDV4_DATA_L2L3Residual_AK4PFPuppi.txt']
# Name of the CMSSW configuration file
config.JobType.psetName = 'analysis_data.py'
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
config.Data.inputDataset = '/SingleMuon/Run2016B-03Feb2017_ver2-v2/MINIAOD'
config.Data.inputDBS = 'global'
config.Data.splitting = 'LumiBased'
config.Data.unitsPerJob = 40
config.Data.lumiMask = 'Cert_271036-284044_13TeV_23Sep2016ReReco_Collisions16_JSON.txt'
#config.Data.runRange = '246908-258750'
#config.Data.outLFNDirBase = '/store/user/%s/' % (getUsernameFromSiteDB())
config.Data.publication = False
config.Data.outputDatasetTag = 'SMu16B-v1'
config.section_("Site")
config.Site.storageSite = 'T3_US_FNALLPC' #T2_CN_Beijing'
| [
"jiexiao@pku.edu.cn"
] | jiexiao@pku.edu.cn |
ddeca087ba585f1d6f5c7bf63b5f45edb6aef713 | 0fee9fa700f769b8fbdbe0549d4b518a4a84b66e | /node/image_predict/image_predict2.py | b3bd45bb143b4f919421a68b3a6a414f33851551 | [] | no_license | KoGaYoung/2019_Capstone-design | c885b7fd31abf92cad303e26746eb310d5093a65 | 00e9815600157bb2f452a09f5e0c300a73deaf0b | refs/heads/master | 2020-09-15T17:45:41.785488 | 2020-04-27T14:46:46 | 2020-04-27T14:46:46 | 223,519,294 | 2 | 3 | null | 2019-11-23T02:31:13 | 2019-11-23T02:31:12 | null | UTF-8 | Python | false | false | 2,128 | py | import matplotlib
import numpy as np
import os
from PIL import Image
from keras.preprocessing.image import ImageDataGenerator
import operator
from keras.models import load_model
from keras.preprocessing import image
import base64
import ast
import sys
def recommand(predictions, class_dict):
# Recommend top 3
predictions = predictions[0].tolist()
predict = []
for i in range(len(predictions)):
predict.insert(i, [i, predictions[i]])
predict2 = sorted(predict, key=lambda x: x[1], reverse=True)
re_str = ""
for i in range(10):
recommend = [name for name, target in class_dict.items() if target == predict2[i][0]]
re_str = re_str + str(recommend)[2:len(recommend)-3]
if i == 9:
break
re_str += ","
# recommand_percent = predict2[i][1]
# print(recommand, " ", round(recommand_percent, 3) * 100, "%")\
return re_str
# base64.txt -> image -> save folder
remove_str = 'data:image/png;base64,'
image_path = '/home/student/2019_Capstone-design/node/image_predict/predict_image/12/out.png'
g = open(image_path, 'wb')
g.write(base64.b64decode(sys.argv[1][len(remove_str):]))
g.close()
model1 = load_model('/home/student/2019_Capstone-design/node/image_predict/v2.01.h5')
model1.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# Print test prediction
f2 = open('/home/student/2019_Capstone-design/node/image_predict/label_dict.txt')
label_dict = eval(f2.read())
f2.close()
batchsize = 64
image_size = (255, 255)
pred_gen = ImageDataGenerator().flow_from_directory(
'/home/student/2019_Capstone-design/node/image_predict/predict_image/',
class_mode='categorical',
batch_size=batchsize,
target_size=image_size
)
predictions = model1.predict_generator(pred_gen)
np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})
import operator
index, value = max(enumerate(predictions[0]), key=operator.itemgetter(1))
pred_result = [name for name, target in label_dict.items() if target == index]
#recommand_top3
re_list = recommand(predictions, label_dict)
print(re_list)
| [
"4723515@naver.com"
] | 4723515@naver.com |
6d36440509a5ce458028cddd6f11386e9c9f9b3e | 37c189ce1dfe146df449d61add792de639d04321 | /api/can_api_v2_definition.py | e4ceaddb9c7ebf78ed14e4f9807f171fc516f0fe | [
"MIT",
"CC-BY-4.0"
] | permissive | yangyijane/covid-data-model | 2f9d1db874e2b883f4c856989d1e0876ab293a77 | 3d7d8ec65fbabf1ce5e1749f328c96cdf6199905 | refs/heads/main | 2023-08-22T06:39:33.401142 | 2021-10-25T15:59:53 | 2021-10-25T15:59:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,290 | py | from typing import List, Optional, Dict
import enum
import textwrap
from libs.datasets.dataset_utils import AggregationLevel
from libs import base_model
from libs.datasets import timeseries
import pydantic
import datetime
from covidactnow.datapublic.common_fields import GetByValueMixin
CDC_TRANSMISSION_LEVEL_DESCRIPTION = textwrap.dedent(
"""
Community transmission level for region, calculated using the CDC definition.
Possible values:
- 0: Low
- 1: Moderate
- 2: Substantial
- 3: High
- 4: Unknown
See [definitions of CDC community transmission levels](
https://covid.cdc.gov/covid-data-tracker/#cases_community) for more
details.
Note that the value may differ from what the CDC website reports
given we have different data sources. We have also introduced an
"Unknown" level for when both case data and test positivity data are
missing for at least 15 days. The CDC does not have an "Unknown"
level and instead will designate a location as "Low" when case and
test positivity data are missing.
"""
)
class TestPositivityRatioMethod(GetByValueMixin, enum.Enum):
"""Method used to determine test positivity ratio."""
CMSTesting = "CMSTesting"
CDCTesting = "CDCTesting"
HHSTesting = "HHSTesting"
VALORUM = "Valorum"
COVID_TRACKING = "covid_tracking"
OTHER = "other"
class FieldSourceType(GetByValueMixin, enum.Enum):
"""The data source of a field (metric or actual). This enumeration lists the places from which
CAN fetches data. The source is tracked on a per field and region timeseries basis."""
NYTimes = "NYTimes"
CMSTesting = "CMSTesting"
CDCTesting = "CDCTesting"
HHSTesting = "HHSTesting"
HHSHospital = "HHSHospital"
VALORUM = "Valorum"
COVID_TRACKING = "covid_tracking"
USA_FACTS = "USAFacts"
TestAndTrace = "TestAndTrace"
CANScrapersStateProviders = "CANScrapersStateProviders"
OTHER = "other"
class TestPositivityRatioDetails(base_model.APIBaseModel):
"""Details about how the test positivity ratio was calculated."""
source: TestPositivityRatioMethod = pydantic.Field(
..., description="Source data for test positivity ratio."
)
class DemographicDistributions(base_model.APIBaseModel):
"""Distributions of demographic data.
Note that different regions may have different demographic distributions for
the same field. For instance, health departments in different states may report
different age ranges.
The data provided matches the source distributions.
"""
age: Optional[Dict[str, int]] = pydantic.Field(None)
race: Optional[Dict[str, int]] = pydantic.Field(None)
ethnicity: Optional[Dict[str, int]] = pydantic.Field(None)
sex: Optional[Dict[str, int]] = pydantic.Field(None)
class HospitalResourceUtilization(base_model.APIBaseModel):
capacity: Optional[int] = pydantic.Field(..., description="Total capacity for resource.")
currentUsageTotal: Optional[int] = pydantic.Field(
..., description="Currently used capacity for resource by all patients (COVID + Non-COVID)"
)
currentUsageCovid: Optional[int] = pydantic.Field(
..., description="Currently used capacity for resource by COVID "
)
class Actuals(base_model.APIBaseModel):
"""Known actuals data."""
cases: Optional[int] = pydantic.Field(
..., description="Cumulative confirmed or suspected cases."
)
deaths: Optional[int] = pydantic.Field(
...,
description=(
"Cumulative deaths that are suspected or confirmed to have been caused by COVID-19."
),
)
positiveTests: Optional[int] = pydantic.Field(
..., description="Cumulative positive test results to date"
)
negativeTests: Optional[int] = pydantic.Field(
..., description="Cumulative negative test results to date"
)
contactTracers: Optional[int] = pydantic.Field(..., description="Number of Contact Tracers")
hospitalBeds: Optional[HospitalResourceUtilization] = pydantic.Field(
...,
description="""
Information about acute bed utilization details.
Fields:
* capacity - Current staffed acute bed capacity.
* currentUsageTotal - Total number of acute beds currently in use
* currentUsageCovid - Number of acute beds currently in use by COVID patients.
""",
)
icuBeds: Optional[HospitalResourceUtilization] = pydantic.Field(
...,
description="""
Information about ICU bed utilization details.
Fields:
* capacity - Current staffed ICU bed capacity.
* currentUsageTotal - Total number of ICU beds currently in use
* currentUsageCovid - Number of ICU beds currently in use by COVID patients.
""",
)
newCases: Optional[int] = pydantic.Field(
...,
description="""
New confirmed or suspected cases.
New cases are a processed timeseries of cases - summing new cases may not equal
the cumulative case count.
Processing steps:
1. If a region does not report cases for a period of time but then begins reporting again,
we will exclude the first day that reporting recommences. This first day likely includes
multiple days worth of cases and can be misleading to the overall series.
2. We remove any days with negative new cases.
3. We apply an outlier detection filter to the timeseries, which removes any data
points that seem improbable given recent numbers. Many times this is due to
backfill of previously unreported cases.
""",
)
newDeaths: Optional[int] = pydantic.Field(
...,
description="""
New confirmed or suspected COVID-19 deaths.
New deaths is an estimate of deaths per day; summing new deaths may not equal the
cumulative death count.
Processing steps:
1. If a region does not report deaths for a period of time but then begins reporting again,
we will exclude the first day that reporting recommences. This first day likely includes
multiple days worth of deaths and can be misleading to the overall series.
2. We remove any days with negative new deaths.
3. We apply an outlier detection filter to the timeseries, which removes any data
points that seem improbable given recent numbers. Many times this is due to
backfill of previously unreported deaths.
""",
)
vaccinesDistributed: Optional[int] = pydantic.Field(
None, description="Number of vaccine doses distributed."
)
vaccinationsInitiated: Optional[int] = pydantic.Field(
None,
description="""
Number of vaccinations initiated.
This value may vary by type of vaccine, but for Moderna and Pfizer this indicates
number of people vaccinated with the first dose.
""",
)
vaccinationsCompleted: Optional[int] = pydantic.Field(
None,
description="""
Number of vaccinations completed.
This value may vary by type of vaccine, but for Moderna and Pfizer this indicates
number of people vaccinated with both the first and second dose.
""",
)
vaccinesAdministered: Optional[int] = pydantic.Field(
None, description="Total number of vaccine doses administered."
)
vaccinesAdministeredDemographics: Optional[DemographicDistributions] = pydantic.Field(
None, description="Demographic distributions for administered vaccines."
)
vaccinationsInitiatedDemographics: Optional[DemographicDistributions] = pydantic.Field(
None, description="Demographic distributions for initiated vaccinations."
)
# When adding a new "actual" field here remember to add a `FieldAnnotations` in `Annotations`.
class ActualsTimeseriesRow(Actuals):
"""Actual data for a specific day."""
date: datetime.date = pydantic.Field(..., description="Date of timeseries data point")
class AnomalyAnnotation(base_model.APIBaseModel):
date: datetime.date = pydantic.Field(..., description="Date of anomaly")
type: timeseries.TagType = pydantic.Field(..., description="Type of annotation")
original_observation: float = pydantic.Field(
..., description="Original value on this date detected as anomalous."
)
class FieldSource(base_model.APIBaseModel):
type: Optional[FieldSourceType] = pydantic.Field(
None, description="The type of data source from a CAN list of data source types"
)
url: Optional[str] = pydantic.Field(
None, description="URL of a webpage containing the data at the source"
)
name: Optional[str] = pydantic.Field(None, description="A human readable name of the source")
class FieldAnnotations(base_model.APIBaseModel):
"""Annotations associated with one field."""
sources: List[FieldSource]
anomalies: List[AnomalyAnnotation]
class Annotations(base_model.APIBaseModel):
"""Annotations for each field."""
# Keep this list of fields in sync with the fields in `Actuals`
cases: Optional[FieldAnnotations] = pydantic.Field(None, description="Annotations for cases")
deaths: Optional[FieldAnnotations] = pydantic.Field(None, description="Annotations for deaths")
positiveTests: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for positiveTests"
)
negativeTests: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for negativeTests"
)
contactTracers: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for contactTracers"
)
hospitalBeds: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for hospitalBeds"
)
icuBeds: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for icuBeds"
)
newCases: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for newCases"
)
newDeaths: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for newDeaths"
)
vaccinesDistributed: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for vaccinesDistributed"
)
vaccinationsInitiated: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for vaccinationsInitiated"
)
vaccinationsCompleted: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for vaccinationsCompleted"
)
vaccinesAdministered: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for vaccinesAdministered"
)
# Keep this list of fields in sync with the fields in `Metrics`
testPositivityRatio: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for testPositivityRatio"
)
caseDensity: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for caseDensity"
)
contactTracerCapacityRatio: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for contactTracerCapacityRatio"
)
infectionRate: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for infectionRate"
)
infectionRateCI90: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for infectionRateCI90"
)
icuCapacityRatio: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for icuCapacityRatio"
)
vaccinationsInitiatedRatio: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for vaccinationsInitiatedRatio"
)
vaccinationsCompletedRatio: Optional[FieldAnnotations] = pydantic.Field(
None, description="Annotations for vaccinationsCompletedRatio"
)
class Metrics(base_model.APIBaseModel):
"""Calculated metrics data based on known actuals."""
testPositivityRatio: Optional[float] = pydantic.Field(
...,
description="Ratio of people who test positive calculated using a 7-day rolling average.",
)
testPositivityRatioDetails: Optional[TestPositivityRatioDetails] = pydantic.Field(None)
caseDensity: Optional[float] = pydantic.Field(
...,
description="The number of cases per 100k population calculated using a 7-day rolling average.",
)
contactTracerCapacityRatio: Optional[float] = pydantic.Field(
...,
description=(
"Ratio of currently hired tracers to estimated "
"tracers needed based on 7-day daily case average."
),
)
infectionRate: Optional[float] = pydantic.Field(
..., description="R_t, or the estimated number of infections arising from a typical case."
)
infectionRateCI90: Optional[float] = pydantic.Field(
...,
description="90th percentile confidence interval upper endpoint of the infection rate.",
)
icuCapacityRatio: Optional[float] = pydantic.Field(
...,
description="Ratio of staffed intensive care unit (ICU) beds that are currently in use.",
)
vaccinationsInitiatedRatio: Optional[float] = pydantic.Field(
None, description=("Ratio of population that has initiated vaccination.")
)
vaccinationsCompletedRatio: Optional[float] = pydantic.Field(
None, description=("Ratio of population that has completed vaccination.")
)
@staticmethod
def empty():
"""Returns an empty Metrics object."""
return Metrics(
testPositivityRatio=None,
caseDensity=None,
contactTracerCapacityRatio=None,
infectionRate=None,
infectionRateCI90=None,
icuCapacityRatio=None,
)
@enum.unique
class RiskLevel(enum.Enum):
"""COVID Risk Level.
## Risk Level Definitions
*Low* - On track to contain COVID
*Medium* - Slow disease growth
*High* - At risk of outbreak
*Critical* - Active or imminent outbreak
*Unknown* - Risk unknown
*Extreme* - Severe outbreak
"""
LOW = 0
MEDIUM = 1
HIGH = 2
CRITICAL = 3
UNKNOWN = 4
EXTREME = 5
@enum.unique
class CDCTransmissionLevel(enum.Enum):
"""CDC community transmission level."""
LOW = 0
MODERATE = 1
SUBSTANTIAL = 2
HIGH = 3
UNKNOWN = 4
class RiskLevels(base_model.APIBaseModel):
"""COVID risk levels for a region."""
overall: RiskLevel = pydantic.Field(..., description="Overall risk level for region.")
testPositivityRatio: RiskLevel = pydantic.Field(
..., description="Test positivity ratio risk level."
)
caseDensity: RiskLevel = pydantic.Field(..., description="Case density risk level.")
contactTracerCapacityRatio: RiskLevel = pydantic.Field(
..., description="Contact tracer capacity ratio risk level."
)
infectionRate: RiskLevel = pydantic.Field(..., description="Infection rate risk level.")
icuCapacityRatio: RiskLevel = pydantic.Field(..., description="ICU capacity ratio risk level.")
@classmethod
def empty(cls) -> "RiskLevels":
return RiskLevels(
overall=RiskLevel.LOW,
testPositivityRatio=RiskLevel.LOW,
caseDensity=RiskLevel.LOW,
contactTracerCapacityRatio=RiskLevel.LOW,
infectionRate=RiskLevel.LOW,
icuCapacityRatio=RiskLevel.LOW,
)
# Additional class used for bulk timeseries where we are not including all risk levels
# right now, only the overall risk level.
class RiskLevelsRow(base_model.APIBaseModel):
overall: RiskLevel = pydantic.Field(..., description="Overall risk level for region.")
caseDensity: RiskLevel = pydantic.Field(..., description="Case density risk level for region.")
class RiskLevelTimeseriesRow(RiskLevelsRow):
"""Timeseries data for risk levels. Currently only surfacing overall risk level for region."""
date: datetime.date = pydantic.Field(..., description="Date of timeseries data point")
class MetricsTimeseriesRow(Metrics):
"""Metrics data for a specific day."""
date: datetime.date = pydantic.Field(..., description="Date of timeseries data point")
class CdcTransmissionLevelTimeseriesRow(base_model.APIBaseModel):
date: datetime.date = pydantic.Field(..., description="Date of timeseries data point")
cdcTransmissionLevel: CDCTransmissionLevel = pydantic.Field(
..., description=CDC_TRANSMISSION_LEVEL_DESCRIPTION
)
class RegionSummary(base_model.APIBaseModel):
"""Summary of actual and prediction data for a single region."""
fips: str = pydantic.Field(
...,
description=(
"FIPS Code. FIPS codes are either 2-digit state codes, "
"5-digit county codes, 5-digit CBSA codes, or 1-digit '0' for the entire USA."
),
)
country: str = pydantic.Field(..., description="2-letter ISO-3166 Country code.")
state: Optional[str] = pydantic.Field(
..., description="2-letter ANSI state code. For CBSA regions, state is omitted."
)
county: Optional[str] = pydantic.Field(..., description="County name")
level: AggregationLevel = pydantic.Field(..., description="Level of region.")
lat: Optional[float] = pydantic.Field(
..., description="Latitude of point within the state or county. Currently a placeholder."
)
locationId: str = pydantic.Field(
...,
description="Location ID as defined here: https://github.com/covidatlas/li/blob/master/docs/reports-v1.md#general-notes",
)
long: Optional[float] = pydantic.Field(
..., description="Longitude of point within the state or county. Currently a placeholder."
)
population: int = pydantic.Field(
..., description="Total Population in geographic region.", gt=0
)
metrics: Metrics = pydantic.Field(...)
riskLevels: RiskLevels = pydantic.Field(..., description="Risk levels for region.")
cdcTransmissionLevel: CDCTransmissionLevel = pydantic.Field(
..., description=CDC_TRANSMISSION_LEVEL_DESCRIPTION
)
actuals: Actuals = pydantic.Field(...)
annotations: Annotations = pydantic.Field(...)
lastUpdatedDate: datetime.date = pydantic.Field(..., description="Date of latest data")
url: Optional[str] = pydantic.Field(
..., description="URL linking to Covid Act Now location page."
)
class RegionSummaryWithTimeseries(RegionSummary):
"""Summary data for a region with prediction timeseries data and actual timeseries data."""
metricsTimeseries: List[MetricsTimeseriesRow] = pydantic.Field(...)
actualsTimeseries: List[ActualsTimeseriesRow] = pydantic.Field(...)
riskLevelsTimeseries: List[RiskLevelTimeseriesRow] = pydantic.Field(...)
cdcTransmissionLevelTimeseries: List[CdcTransmissionLevelTimeseriesRow] = pydantic.Field(...)
@property
def region_summary(self) -> RegionSummary:
data = {}
# Iterating through self does not force any conversion
# https://pydantic-docs.helpmanual.io/usage/exporting_models/#dictmodel-and-iteration
for field, value in self:
if field not in RegionSummary.__fields__:
continue
data[field] = value
return RegionSummary(**data)
class AggregateRegionSummary(base_model.APIBaseModel):
"""Summary data for multiple regions."""
__root__: List[RegionSummary] = pydantic.Field(...)
class AggregateRegionSummaryWithTimeseries(base_model.APIBaseModel):
"""Timeseries and summary data for multiple regions."""
__root__: List[RegionSummaryWithTimeseries] = pydantic.Field(...)
class RegionTimeseriesRowWithHeader(base_model.APIBaseModel):
"""Prediction timeseries row with location information."""
date: datetime.date = pydantic.Field(..., description="Date of timeseries data point")
country: str = pydantic.Field(..., description="2-letter ISO-3166 Country code.")
state: Optional[str] = pydantic.Field(..., description="2-letter ANSI state code.")
county: Optional[str] = pydantic.Field(..., description="County name")
fips: str = pydantic.Field(
...,
description=(
"FIPS Code. FIPS codes are either 2-digit state codes, "
"5-digit county codes, 5-digit CBSA codes, or 1-digit '0' for the entire USA."
),
)
lat: Optional[float] = pydantic.Field(
..., description="Latitude of point within the state or county"
)
long: Optional[float] = pydantic.Field(
..., description="Longitude of point within the state or county"
)
locationId: str = pydantic.Field(
...,
description="Location ID as defined here: https://github.com/covidatlas/li/blob/master/docs/reports-v1.md#general-notes",
)
actuals: Optional[Actuals] = pydantic.Field(..., description="Actuals for given day")
metrics: Optional[Metrics] = pydantic.Field(..., description="Metrics for given day")
riskLevels: Optional[RiskLevelsRow] = pydantic.Field(
..., description="Risk Levels for given day"
)
cdcTransmissionLevel: Optional[CDCTransmissionLevel] = pydantic.Field(
..., description=CDC_TRANSMISSION_LEVEL_DESCRIPTION
)
class AggregateFlattenedTimeseries(base_model.APIBaseModel):
"""Flattened timeseries data for multiple regions."""
__root__: List[RegionTimeseriesRowWithHeader] = pydantic.Field(...)
| [
"noreply@github.com"
] | yangyijane.noreply@github.com |
ab90b58745ab3cc097586354bf150248a81ee6f9 | 6938ecea830f45abbef0218fa33be5ab21986eca | /ucpe/bcm_controller/bcm_controller.py | af406abae3955b4333e7333c1c19c771b0721204 | [] | no_license | AnEscapist/sdn-orchestrator | 16ea20d8cdcd817990bd34da071b03451a2cf3dd | f242bb2ab42864cde9f0bafb1a92acbb33173b24 | refs/heads/master | 2020-07-01T09:22:29.835594 | 2019-08-11T19:46:44 | 2019-08-11T19:46:44 | 201,121,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,367 | py |
from inspect import signature, Parameter
from ucpe.bcm_controller.utils import get_caller_function_name
import ucpe.bcm_controller.grpc.autobcm_pb2 as autobcm_pb2
import ucpe.bcm_controller.grpc.autobcm_pb2_grpc as autobcm_pb2_grpc
import grpc
hostname = "10.10.81.250:50051"
class BCMController:
@staticmethod
def bcm_controller_show_active_ports(**kwargs):
func = show_active_ports
return _call_function(func, **kwargs)
@staticmethod
def bcm_controller_create_vlan(**kwargs):
func = create_vlan
return _call_function(func, **kwargs)
@staticmethod
def bcm_controller_destroy_vlan(**kwargs):
func = destroy_vlan
return _call_function(func, **kwargs)
@staticmethod
def bcm_controller_show_vlans(**kwargs):
func = show_vlans
return _call_function(func, **kwargs)
@staticmethod
def bcm_controller_add_ports(**kwargs):
func = add_ports
return _call_function(func, **kwargs)
@staticmethod
def bcm_controller_rem_ports(**kwargs):
func = rem_ports
return _call_function(func, **kwargs)
@staticmethod
def bcm_controller_set_pvlan(**kwargs):
func = set_pvlan
return _call_function(func, **kwargs)
@staticmethod
def bcm_controller_show_pvlans(**kwargs):
func = show_pvlans
return _call_function(func, **kwargs)
def show_active_ports():
channel = grpc.insecure_channel(hostname)
stub = autobcm_pb2_grpc.AutoBCMStub(channel)
request = autobcm_pb2.ConfigRequest()
response = stub.ShowActivePorts(request)
return response.message
def create_vlan(vlanid, pbm='', ubm=''):
channel = grpc.insecure_channel(hostname)
stub = autobcm_pb2_grpc.AutoBCMStub(channel)
request = autobcm_pb2.ConfigRequest(vlanid=vlanid, pbm=pbm, ubm=ubm)
rv = ''
response = stub.CreateVLAN(request)
rv = rv + response.message
if pbm != '':
response = stub.AddPorts(request)
rv = rv + '\n' + response.message
return rv
def destroy_vlan(vlanid):
channel = grpc.insecure_channel(hostname)
stub = autobcm_pb2_grpc.AutoBCMStub(channel)
request = autobcm_pb2.ConfigRequest(vlanid=vlanid)
response = stub.DestroyVLAN(request)
return response.message
def show_vlans():
channel = grpc.insecure_channel(hostname)
stub = autobcm_pb2_grpc.AutoBCMStub(channel)
request = autobcm_pb2.ConfigRequest()
response = stub.ShowVLANs(request)
return response.message
def add_ports(vlanid, pbm, ubm=''):
channel = grpc.insecure_channel(hostname)
stub = autobcm_pb2_grpc.AutoBCMStub(channel)
request = autobcm_pb2.ConfigRequest(vlanid=vlanid, pbm=pbm, ubm=ubm)
response = stub.AddPorts(request)
return response.message
def rem_ports(vlanid, pbm):
channel = grpc.insecure_channel(hostname)
stub = autobcm_pb2_grpc.AutoBCMStub(channel)
request = autobcm_pb2.ConfigRequest(vlanid=vlanid, pbm=pbm)
response = stub.RemovePorts(request)
return response.message
def set_pvlan(vlanid, pbm):
channel = grpc.insecure_channel(hostname)
stub = autobcm_pb2_grpc.AutoBCMStub(channel)
request = autobcm_pb2.ConfigRequest(vlanid=vlanid, pbm=pbm)
response = stub.SetPVLAN(request)
return response.message
def show_pvlans():
channel = grpc.insecure_channel(hostname)
stub = autobcm_pb2_grpc.AutoBCMStub(channel)
request = autobcm_pb2.ConfigRequest()
response = stub.ShowPVLANs(request)
return response.message
def _call_function(func, **kwargs):
body = kwargs["body"] # todo: bad
params = signature(func).parameters # get the function arguments
relevant_kwargs = {} # todo: this is REALLY bad
for param in params:
if params[param].default == Parameter.empty:
try:
relevant_kwargs[param] = body[param]
except KeyError:
raise KeyError("missing argument " + param + " in call to " + func.__name__)
else: # todo: this is REALLY bad - depends on the arg name, but so does the request/response
relevant_kwargs[param] = body.get(param, params[param].default)
return_dict = {}
return_dict["result"] = func(**relevant_kwargs)
caller_name = get_caller_function_name()
return_dict["function"] = caller_name
return return_dict
| [
"azhang307@gatech.edu"
] | azhang307@gatech.edu |
a7b174b85eba3c6f121e88eb9985de14f93428b9 | 14ac991bba2eb7d59a1d76db792b7689316f8060 | /leetcode/00179.py | 2097fd3046480dd7c91a1af857c955626b82b82d | [] | no_license | munagekar/cp | bde88fa565a7e2158ebe0f2611c4718a3d2970f1 | c25d29f68943e3721233e177abe13068e5f40e4b | refs/heads/master | 2021-07-04T05:00:02.511874 | 2021-05-30T14:30:05 | 2021-05-30T14:30:05 | 240,286,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | from itertools import zip_longest
from functools import cmp_to_key
def cmp(a, b):
if a + b > b + a:
return 1
else:
return -1
class Solution:
def largestNumber(self, nums: List[int]) -> str:
nums = map(str, nums)
nums = sorted(nums, key=cmp_to_key(cmp), reverse=True)
nums = "".join(nums)
return nums.lstrip("0") or "0" | [
"avm.abhishek@gmail.com"
] | avm.abhishek@gmail.com |
674c893b5c3d74c716ca8d94e1128a7f82ceea42 | fbf22c2f482f47d45f1ddd024cc2a12236c25dbe | /transfer/lib/gitCom.py | 146f8682c71806418ebca2a3ee96da78ba44e153 | [] | no_license | LofOWL/Jupyter-code-tracker | 3068e7e71ad9ecc43e8118257434b00fdb0cb9d9 | 2cdf55dc3d7f7f697ae57fe7661b23150b1a5edf | refs/heads/master | 2022-02-18T05:25:51.427151 | 2022-02-13T22:03:38 | 2022-02-13T22:03:38 | 234,213,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | import os
import subprocess
class git:
def __init__(self,path):
print(path)
os.chdir(path)
self.path = path
self.ipynb_commit = "git log --pretty=%H --follow *.ipynb"
self.commit_name_status = ""
self.commit_parent = ""
def set_commit_name_status(self,commit):
self.commit_name_status = "git show "+str(commit)+" --name-status --pretty=\"\" "
def set_commit_parent(self,commit):
self.commit_parent = "git show "+str(commit)+" --name-status --pretty=\"raw\" "
def run(self,commond):
os.chdir(self.path)
diff = subprocess.check_output(commond,shell=True)
alist = diff.decode("utf-8").split("\n")
return alist[:-1]
| [
"jeffersonjjin@gmail.com"
] | jeffersonjjin@gmail.com |
fd8b43d4911e377665daaa891b42fdc5fbbe6787 | bff728cca48292af65e6c14ca942ba92f0161f0f | /wiki_search.py | 9354460dac59f44ecdc1dba005eb13e21b9eff58 | [] | no_license | enriched-uranium235/chatbot | 1520a68d2084c4b4b61136ce9da167786a5878d8 | e95f6dde49dc684b93b4f95ad8da085b95840dd3 | refs/heads/master | 2023-02-20T05:18:25.807488 | 2021-01-06T07:43:52 | 2021-01-06T07:43:52 | 327,227,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | import wikipedia
def wikipediaSearch(search_text):
response_string = ""
wikipedia.set_lang("ja")
search_response = wikipedia.search(search_text)
if not search_response:
response_string = "その単語は登録されていません。"
return response_string
try:
wiki_page = wikipedia.page(search_response[0])
except Exception as e:
response_string = "エラーが発生しました。\n{}\n{}".format(e.message, str(e))
return response_string
wiki_content = wiki_page.content
response_string += wiki_content[0:wiki_content.find("。")] + "。\n"
response_string += "リンクはこちら:" + wiki_page.url
return response_string
if __name__ == "__main__":
while True:
user_input = input("検索したい単語を入力してください。:")
if not user_input:
break
print(wikipediaSearch(user_input)) | [
"hexanitrobenzene@gmail.com"
] | hexanitrobenzene@gmail.com |
62a61d7f251b2dd796c2a0864e338c6272236b1a | 87828431072e3c60a92dc274b078d7cf1e5705be | /back_python/account/migrations/0001_initial.py | 34d3acacd2cf509d472797922ba4727ed9535d39 | [] | no_license | cash2one/habit | 90adfd80427a0c0d04104ea5cf8123cf025b2d8b | 3782e498e1e40d6b638aaf2c7c1ac087c0739a36 | refs/heads/master | 2021-01-19T12:32:51.627847 | 2017-04-11T15:41:28 | 2017-04-11T15:41:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,302 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-25 08:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('activity', '0013_auto_20170125_1649'),
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tradeDate', models.DateField(auto_now=True, verbose_name='时间')),
('tradeType', models.CharField(choices=[('fee', '套餐服务费'), ('deposit', '押金'), ('milyInput', '套餐囤米'), ('milyInputByDeposit', '押金囤米'), ('milyOutput', '米粒打赏'), ('milyOutputByDonate', '米粒捐赠'), ('feedBack', '打卡奖励米粒'), ('feedBackReturnDeposit', '打卡返还押金'), ('aveDeposit', '平均分配懒人押金')], max_length=50, verbose_name='类型')),
('fee', models.IntegerField(default=0, verbose_name='套餐服务费')),
('deposit', models.IntegerField(default=0, verbose_name='囤米押金')),
('milyInput', models.IntegerField(default=0, verbose_name='套餐囤米')),
('milyInputByDeposit', models.IntegerField(default=0, verbose_name='押金囤米')),
('milyOutput', models.IntegerField(default=0, verbose_name='米粒打赏')),
('milyOutputByDonate', models.IntegerField(default=0, verbose_name='米粒捐赠')),
('feedBack', models.IntegerField(default=0, verbose_name='打卡奖励米粒')),
('feedBackReturnDeposit', models.IntegerField(default=0, verbose_name='打卡奖励押金')),
('aveDeposit', models.IntegerField(default=0, verbose_name='平均分配懒人押金')),
('createdTime', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updatedTime', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('activity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='activity.Activity', verbose_name='活动')),
],
),
]
| [
"jiangyong@qq.com"
] | jiangyong@qq.com |
57154970bf889cb5ae8637e4d25670f1909939ea | 1213a0cc401365556d488913693cde54281ef350 | /backup_fully_working_with_sensor/ai.py | 5f272f3508088f2185990cc42d95d0cc93923477 | [] | no_license | mkhetan/session10_endgame_exp | 188adf0a8dae15f7603cc40fdc3b2dc9de1d0424 | e6cd6066697c95825708f264f77a123622e10d5b | refs/heads/master | 2022-07-15T22:04:31.604956 | 2020-05-12T05:19:33 | 2020-05-12T05:19:33 | 261,933,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,067 | py | # AI for Self Driving Car
# Importing the libraries
import numpy as np
import random
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.autograd as autograd
from torch.autograd import Variable
# Creating the architecture of the Neural Network
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.layer_1 = nn.Linear(state_dim, 400)
self.layer_2 = nn.Linear(400, 300)
self.layer_3 = nn.Linear(300, action_dim)
self.max_action = max_action
def forward(self, x):
x = F.relu(self.layer_1(x))
x = F.relu(self.layer_2(x))
x = self.max_action * torch.tanh(self.layer_3(x))
return x
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
# Defining the first Critic neural network
self.layer_1 = nn.Linear(state_dim + action_dim, 400)
self.layer_2 = nn.Linear(400, 300)
self.layer_3 = nn.Linear(300, 1)
# Defining the second Critic neural network
self.layer_4 = nn.Linear(state_dim + action_dim, 400)
self.layer_5 = nn.Linear(400, 300)
self.layer_6 = nn.Linear(300, 1)
def forward(self, x, u):
xu = torch.cat([x, u], 1)
# Forward-Propagation on the first Critic Neural Network
x1 = F.relu(self.layer_1(xu))
x1 = F.relu(self.layer_2(x1))
x1 = self.layer_3(x1)
# Forward-Propagation on the second Critic Neural Network
x2 = F.relu(self.layer_4(xu))
x2 = F.relu(self.layer_5(x2))
x2 = self.layer_6(x2)
return x1, x2
def Q1(self, x, u):
xu = torch.cat([x, u], 1)
x1 = F.relu(self.layer_1(xu))
x1 = F.relu(self.layer_2(x1))
x1 = self.layer_3(x1)
return x1
# Implementing Experience Replay
class ReplayBuffer(object):
def __init__(self, max_size=1e6):
self.storage = []
self.max_size = max_size
self.ptr = 0
def add(self, transition):
if len(self.storage) == self.max_size:
self.storage[int(self.ptr)] = transition
self.ptr = (self.ptr + 1) % self.max_size
else:
self.storage.append(transition)
def sample(self, batch_size):
ind = np.random.randint(0, len(self.storage), size=batch_size)
batch_states, batch_next_states, batch_actions, batch_rewards, batch_dones = [], [], [], [], []
for i in ind:
state, next_state, action, reward, done = self.storage[i]
batch_states.append(np.array(state, copy=False))
batch_next_states.append(np.array(next_state, copy=False))
batch_actions.append(np.array(action, copy=False))
batch_rewards.append(np.array(reward, copy=False))
batch_dones.append(np.array(done, copy=False))
return np.array(batch_states), np.array(batch_next_states), np.array(batch_actions), np.array(batch_rewards).reshape(-1, 1), np.array(batch_dones).reshape(-1, 1)
# Selecting the device (CPU or GPU)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class TD3(object):
def __init__(self, state_dim, action_dim, max_action):
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target.load_state_dict(self.actor.state_dict())
self.actor_optimizer = torch.optim.Adam(self.actor.parameters())
self.critic = Critic(state_dim, action_dim).to(device)
self.critic_target = Critic(state_dim, action_dim).to(device)
self.critic_target.load_state_dict(self.critic.state_dict())
self.critic_optimizer = torch.optim.Adam(self.critic.parameters())
self.max_action = max_action
def select_action(self, state):
state = torch.Tensor(state.reshape(1, -1)).to(device)
return self.actor(state).cpu().data.numpy().flatten()
def train(self, replay_buffer, iterations, batch_size=100, discount=0.99, tau=0.005, policy_noise=0.2, noise_clip=0.5, policy_freq=2):
for it in range(iterations):
# Step 4: We sample a batch of transitions (s, s’, a, r) from the memory
batch_states, batch_next_states, batch_actions, batch_rewards, batch_dones = replay_buffer.sample(batch_size)
state = torch.Tensor(batch_states).to(device)
next_state = torch.Tensor(batch_next_states).to(device)
action = torch.Tensor(batch_actions).to(device)
reward = torch.Tensor(batch_rewards).to(device)
done = torch.Tensor(batch_dones).to(device)
# Step 5: From the next state s’, the Actor target plays the next action a’
next_action = self.actor_target(next_state)
# Step 6: We add Gaussian noise to this next action a’ and we clamp it in a range of values supported by the environment
noise = torch.Tensor(batch_actions).data.normal_(0, policy_noise).to(device)
noise = noise.clamp(-noise_clip, noise_clip)
next_action = (next_action + noise).clamp(-self.max_action, self.max_action)
# Step 7: The two Critic targets take each the couple (s’, a’) as input and return two Q-values Qt1(s’,a’) and Qt2(s’,a’) as outputs
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
# Step 8: We keep the minimum of these two Q-values: min(Qt1, Qt2)
target_Q = torch.min(target_Q1, target_Q2)
# Step 9: We get the final target of the two Critic models, which is: Qt = r + γ * min(Qt1, Qt2), where γ is the discount factor
target_Q = reward + ((1 - done) * discount * target_Q).detach()
# Step 10: The two Critic models take each the couple (s, a) as input and return two Q-values Q1(s,a) and Q2(s,a) as outputs
current_Q1, current_Q2 = self.critic(state, action)
# Step 11: We compute the loss coming from the two Critic models: Critic Loss = MSE_Loss(Q1(s,a), Qt) + MSE_Loss(Q2(s,a), Qt)
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
# Step 12: We backpropagate this Critic loss and update the parameters of the two Critic models with a SGD optimizer
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Step 13: Once every two iterations, we update our Actor model by performing gradient ascent on the output of the first Critic model
if it % policy_freq == 0:
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Step 14: Still once every two iterations, we update the weights of the Actor target by polyak averaging
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)
# Step 15: Still once every two iterations, we update the weights of the Critic target by polyak averaging
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)
# Making a save method to save a trained model
def save(self, filename, directory):
torch.save(self.actor.state_dict(), '%s/%s_actor.pth' % (directory, filename))
torch.save(self.critic.state_dict(), '%s/%s_critic.pth' % (directory, filename))
# Making a load method to load a pre-trained model
def load(self, filename, directory):
self.actor.load_state_dict(torch.load('%s/%s_actor.pth' % (directory, filename)))
self.critic.load_state_dict(torch.load('%s/%s_critic.pth' % (directory, filename)))
| [
"mkhetan@extremenetworks.com"
] | mkhetan@extremenetworks.com |
72e87ff5fac87b45a4fbe10d20bbd6dc95907e38 | 242ebcb7220c2e16c141a6bea4a09c7cb5e4287d | /accounts/forms.py | 83f3c4a31f7b0a3a43e78a73a2980318f2d55c71 | [] | no_license | olivx/estudos_crud | 06ed8c269a4c36db3579daf6d6aef5e7d49dc5f9 | 24af031ed44a7c6cf567368556d368fe58ab1090 | refs/heads/master | 2021-01-11T09:28:49.355388 | 2017-03-03T15:17:25 | 2017-03-03T15:17:25 | 81,199,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,126 | py | from django import forms
from django.contrib.auth import authenticate
from accounts.models import User
from django.utils.translation import ugettext_lazy as _
class RegisterForm(forms.ModelForm):
password1 = forms.CharField(max_length=30, widget=forms.PasswordInput, required=True)
password2 = forms.CharField(max_length=30, widget=forms.PasswordInput, required=True)
def clean_password2(self):
password1 = self.cleaned_data['password1']
password2 = self.cleaned_data['password2']
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(_("The two password fields didn't match."))
return self.cleaned_data
class Meta:
model = User
fields = ('username', 'email', 'password1', 'password2')
def save(self, commit=True):
user = super(RegisterForm, self).save(commit=False)
user.email = self.cleaned_data['username']
user.email = self.cleaned_data['email']
user.set_password(self.cleaned_data['password1'])
if commit:
user.save()
return user
class AuthenticanUserForm(forms.Form):
email = forms.EmailField(label='Email', max_length=30, required=True)
password = forms.CharField(label='Password', max_length=30, required=True, widget=forms.PasswordInput)
error_messages = {
'invalid_login': _(
"Please enter a correct %(email)s and password. Note that both "
"fields may be case-sensitive."
),
'inactive': _("This account is inactive."),
'email_confirmation': _(
'this email is not confirmed yet, please confirm the your eamil and try again'
),
}
def clean(self):
email = self.cleaned_data.get('email')
password = self.cleaned_data.get('password')
if email and password:
self.user = authenticate(email=email, password=password)
if self.user is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'email': 'Email'},
)
return self.cleaned_data
def confirm_login_allowed(self, user):
"""
Controls whether the given User may log in. This is a policy setting,
independent of end-user authentication. This default behavior is to
allow login by active users, and reject login by inactive users.
If the given user cannot log in, this method should raise a
``forms.ValidationError``.
If the given user may log in, this method should return None.
"""
if not user.is_active:
raise forms.ValidationError(
self.error_messages['inactive'],
code='inactive',
)
if not user.profile.email_confirmation:
raise forms.ValidationError(
self.error_messages['email_confirmation'],
code='email_confirmation'
)
class Meta:
fields = ('email', 'password')
| [
"oliveiravicente.net@gmail.com"
] | oliveiravicente.net@gmail.com |
1cd644fe4370089fe5cf86ae2fc2d3fa316e8e2e | e629d61db2f08f66cf46d934ab0f87fa1666de05 | /backend/lively_heart_25130/urls.py | 5c32c3d6b9e17dce8e7eb899ed0a90b4b5455ae7 | [] | no_license | crowdbotics-apps/lively-heart-25130 | ec80559da8d6b168df1ce75415c5d6b916c97ee1 | ed33785297cbb8f794034de1bc3c7fb81bdbe048 | refs/heads/master | 2023-03-24T16:57:41.146127 | 2021-03-19T21:41:18 | 2021-03-19T21:41:18 | 349,561,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,235 | py | """lively_heart_25130 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Lively Heart"
admin.site.site_title = "Lively Heart Admin Portal"
admin.site.index_title = "Lively Heart Admin"
# swagger
api_info = openapi.Info(
title="Lively Heart API",
default_version="v1",
description="API documentation for Lively Heart App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
c1f3f5ba64a7e7a7306a3bb2c2820a4dbb6a892e | 5a7b38ee398e4f63a26b2ec2f6fa1efbce025264 | /api/src/dao/commentDao.py | 64c4f0711be9c2461c689cf2af8510d641966f97 | [] | no_license | AJarombek/saints-xctf-api | f1e361bfc762dcc197cbc78b2b41f7cff18919b5 | c2812089ec0351fd72ef7b1581a48bc55d65fd0e | refs/heads/main | 2023-02-09T11:50:04.698394 | 2023-01-29T18:52:33 | 2023-01-29T18:52:33 | 190,956,043 | 5 | 2 | null | 2023-01-22T23:04:10 | 2019-06-09T02:35:46 | Python | UTF-8 | Python | false | false | 6,225 | py | """
Comment data access from the SaintsXCTF MySQL database. Contains comments posted on exercise logs.
Author: Andrew Jarombek
Date: 7/3/2019
"""
from datetime import datetime
from sqlalchemy import desc
from database import db
from dao.basicDao import BasicDao
from model.Comment import Comment
class CommentDao:
@staticmethod
def get_comments() -> list:
"""
Retrieve all the comments in the database.
:return: The result of the query.
"""
return (
Comment.query.filter(Comment.deleted.is_(False))
.order_by(Comment.time)
.all()
)
@staticmethod
def get_comment_by_id(comment_id: int) -> Comment:
"""
Retrieve a single comment by its unique id
:param comment_id: The unique identifier for a comment.
:return: The result of the query.
"""
return (
Comment.query.filter_by(comment_id=comment_id)
.filter(Comment.deleted.is_(False))
.first()
)
@staticmethod
def get_comments_by_log_id(log_id: int) -> list:
"""
Retrieve all the comments on a specific exercise log.
:param log_id: Unique identifier for an exercise log.
:return: The result of the query.
"""
return (
Comment.query.filter_by(log_id=log_id)
.filter(Comment.deleted.is_(False))
.order_by(desc(Comment.time))
.all()
)
@staticmethod
def add_comment(new_comment: Comment) -> bool:
"""
Add a comment for an exercise log to the database.
:param new_comment: Object representing a comment for an exercise log.
:return: True if the comment is inserted into the database, False otherwise.
"""
# pylint: disable=no-member
db.session.add(new_comment)
return BasicDao.safe_commit()
@staticmethod
def update_comment(comment: Comment) -> bool:
"""
Update a comment in the database. Certain fields (log_id, username, first, last) can't be modified.
:param comment: Object representing an updated comment.
:return: True if the comment is updated in the database, False otherwise.
"""
# pylint: disable=no-member
db.session.execute(
"""
UPDATE comments SET
time=:time,
content=:content,
modified_date=:modified_date,
modified_app=:modified_app
WHERE comment_id=:comment_id
AND deleted IS FALSE
""",
{
"comment_id": comment.comment_id,
"time": comment.time,
"content": comment.content,
"modified_date": comment.modified_date,
"modified_app": comment.modified_app,
},
)
return BasicDao.safe_commit()
@staticmethod
def delete_comment_by_id(comment_id: int) -> bool:
"""
Delete a comment from the database based on its id.
:param comment_id: ID which uniquely identifies the comment.
:return: True if the deletion was successful without error, False otherwise.
"""
# pylint: disable=no-member
db.session.execute(
"DELETE FROM comments WHERE comment_id=:comment_id AND deleted IS FALSE",
{"comment_id": comment_id},
)
return BasicDao.safe_commit()
@staticmethod
def delete_comments_by_log_id(log_id: int) -> bool:
"""
Delete comments from the database based on the log they are bound 2.
:param log_id: ID which uniquely identifies the log.
:return: True if the deletions were successful without error, False otherwise.
"""
# pylint: disable=no-member
db.session.execute(
"DELETE FROM comments WHERE log_id=:log_id AND deleted IS FALSE",
{"log_id": log_id},
)
return BasicDao.safe_commit()
@staticmethod
def soft_delete_comment(comment: Comment) -> bool:
"""
Soft Delete a comment from the database.
:param comment: Object representing a comment to soft delete.
:return: True if the soft deletion was successful without error, False otherwise.
"""
# pylint: disable=no-member
db.session.execute(
"""
UPDATE comments SET
deleted=:deleted,
modified_date=:modified_date,
modified_app=:modified_app,
deleted_date=:deleted_date,
deleted_app=:deleted_app
WHERE comment_id=:comment_id
AND deleted IS FALSE
""",
{
"comment_id": comment.comment_id,
"deleted": comment.deleted,
"modified_date": comment.modified_date,
"modified_app": comment.modified_app,
"deleted_date": comment.deleted_date,
"deleted_app": comment.deleted_app,
},
)
return BasicDao.safe_commit()
@staticmethod
def soft_delete_comments_by_log_id(log_id: int) -> bool:
"""
Soft Delete comments associated with an exercise log from the database.
:param log_id: Unique identifier for an exercise log.
:return: True if the soft deletion was successful without error, False otherwise.
"""
# pylint: disable=no-member
db.session.execute(
"""
UPDATE comments SET
deleted=:deleted,
modified_date=:modified_date,
modified_app=:modified_app,
deleted_date=:deleted_date,
deleted_app=:deleted_app
WHERE log_id=:log_id
AND deleted IS FALSE
""",
{
"log_id": log_id,
"deleted": True,
"modified_date": datetime.now(),
"modified_app": "saints-xctf-api",
"deleted_date": datetime.now(),
"deleted_app": "saints-xctf-api",
},
)
return BasicDao.safe_commit()
| [
"ajarombek95@gmail.com"
] | ajarombek95@gmail.com |
b301f0492fba0e2d359e1d51edb2d2c889cdd464 | 7836becef5fce2c55bef3ac08765f856f1ab2689 | /lx_blog/api_v1/profile.py | e0d7fd6f9b83eef6a1333c73621f3eb50556aa1f | [] | no_license | 2218997647/Blog_Test_LX | b049acd392c146fc054c918d32aa8976aca70b9e | d5e0c3e16e10a22e1f9547375256785a86f055b7 | refs/heads/master | 2020-04-05T21:48:19.407676 | 2018-11-12T15:37:42 | 2018-11-12T15:37:42 | 157,233,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,803 | py | # coding:utf-8
from . import api
from lx_blog.utils.commons import login_required
from flask import g, current_app, jsonify, request, session
from lx_blog.utils.response_code import RET
from lx_blog.models import User
from lx_blog import db, constants
@api.route("/users/name", methods=["PUT"])
@login_required
def change_user_name():
"""修改用户名"""
# 使用了login_required装饰器后,可以从g对象中获取用户user_id
user_id = g.user_id
# 获取用户想要设置的用户名
req_data = request.get_json()
if not req_data:
return jsonify(errno=RET.PARAMERR, errmsg="参数不完整")
name = req_data.get("name") # 用户想要设置的名字
if not name:
return jsonify(errno=RET.PARAMERR, errmsg="名字不能为空")
# 保存用户昵称name,并同时判断name是否重复(利用数据库的唯一索引)
try:
User.query.filter_by(id=user_id).update({"name": name})
db.session.commit()
except Exception as e:
current_app.logger.error(e)
db.session.rollback()
return jsonify(errno=RET.DBERR, errmsg="设置用户错误")
# 修改session数据中的name字段
session["name"] = name
return jsonify(errno=RET.OK, errmsg="OK", data={"name": name})
@api.route("/user", methods=["GET"])
@login_required
def get_user_profile():
"""获取个人信息"""
user_id = g.user_id
# 查询数据库获取个人信息
try:
user = User.query.get(user_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="获取用户信息失败")
if user is None:
return jsonify(errno=RET.NODATA, errmsg="无效操作")
return jsonify(errno=RET.OK, errmsg="OK", data=user.to_dict())
| [
"2218997647@qq.com"
] | 2218997647@qq.com |
dcd252960a1b08665c15f551c29bd9f9aacd0218 | 1abd12b1de8c92b5d6fa251544892d7f55c45ab1 | /MA-ConceptMining/inout/inputoutput.py | f0b680c5d5d92aadea071a5aaf8dc7cc3a01eedc | [
"MIT"
] | permissive | johannabi/MA-ConceptMining | fddd0511b71f664605e10ce33dad29055ba1239e | 5c257c234ee86ef10f3b358f623f342aa829df54 | refs/heads/master | 2023-07-15T15:22:02.003888 | 2021-08-30T09:58:33 | 2021-08-30T09:58:33 | 246,010,367 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,476 | py | import sqlite3 as sql
import re
import csv
def read_file_by_line(file):
"""
reads a text file line by line
:param file: path to file
:return: list of all lines
"""
word_list = list()
with open(file, mode='r', encoding='utf-8') as f:
for line in f:
line = line.strip()
word_list.append(line)
return word_list
def read_esco_csv(file, only_unigrams, synsets):
"""
reads a csv file containing esco skills
:param file: path to file
:param only_unigrams: True if you only want to collect unigram skills
:param synsets: True if you want to group skills by synsets
:return: list of skills or list of synsets
"""
if synsets:
synset_list = list()
else:
skills = list()
with open(file, newline='', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
next(reader, None) # skip header
for row in reader:
pref_label = row[4]
alt_labels = row[5]
synset = set()
if only_unigrams:
if ' ' not in pref_label:
synset.add(pref_label)
else:
synset.add(pref_label)
if len(alt_labels) > 0:
label_list = alt_labels.split('\n')
for l in label_list:
if only_unigrams:
if ' ' not in l:
synset.add(l)
else:
synset.add(l)
if synsets:
if len(synset) > 1: # process only synset with more than one member
synset_list.append(synset)
else:
skills.extend(synset)
if synsets:
return synset_list
else:
return skills
def read_ams_synsets(file, only_unigrams, synsets):
"""
:param file:
:param only_unigrams:
:param synsets:
:return:
"""
conn = sql.connect(file)
sql_select = """SELECT Synonyms, Orig_String FROM Categories"""
c = conn.cursor()
c.execute(sql_select)
rows = c.fetchall()
if synsets:
synsets = set()
for r in rows:
syns = r[0]
comp = r[1]
if syns is None:
continue
# collect als synonyms that are single-word-expressions
synset = set([s.lower() for s in syns.split(' | ') if ' ' not in s])
if only_unigrams:
if ' ' not in comp:
synset.add(comp.lower())
else:
synset.add(comp.lower())
if len(synset) > 1:
synsets.add(tuple(synset))
c.close()
return synsets
else:
skills = list()
for r in rows:
comp = r[1]
if only_unigrams:
if ' ' not in comp:
skills.append(comp.lower())
else:
skills.append(comp.lower())
c.close()
return skills
def read_jobads_content(file):
"""
reads all jobads from given sqlite file
:param file: path to file
:return: list of all jobads
"""
conn = sql.connect(file)
sql_select = """SELECT STELLENBESCHREIBUNG FROM jobs_textkernel"""
c = conn.cursor()
c.execute(sql_select)
rows = c.fetchall()
jobs = list()
for r in rows:
jobs.append(r[0])
return jobs
| [
"jbinnewitt@gmail.com"
] | jbinnewitt@gmail.com |
6af792a1f4600d4fa1047802376523a08b69a6c1 | 7f2b233462e38f7fd2094bd4e304ca1b7525ec24 | /practice sessin.py | 7cc2b376eca0f3676554c0f2627c86df150aab19 | [] | no_license | vin531999/vin531999 | 919ec99b8dc21a3a8e19357967b72d16211dd733 | 647fb7d4e91b8d6a966a61c150831168c67677be | refs/heads/master | 2021-04-12T18:35:29.594555 | 2020-03-24T04:31:55 | 2020-03-24T04:31:55 | 249,100,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | n = int(input("enter the numbers :"))
c = []
d = 1
for i in range (n):
i = int(input(" "))
c.append(i)
d = d*i
print(d)
| [
"noreply@github.com"
] | vin531999.noreply@github.com |
c3eaff4220b08fc56b7f529974b9922daf3908ab | 765e0744b33ee9e9b85829d8af542141ed0d1210 | /mproxy.py | c24eedd38d4254ff9ff84835e9aada4814eba6f5 | [] | no_license | tylerwowen/mitm_proxy | 0b5a0ebb13424c3993dce2c431b1ce54c5e8cd66 | 367d190b238c6f497ce4791abd3360e0c6447c1c | refs/heads/master | 2023-04-07T23:33:38.094417 | 2023-03-28T22:35:47 | 2023-03-28T22:35:47 | 55,040,650 | 2 | 0 | null | 2023-03-28T22:35:48 | 2016-03-30T07:07:37 | Python | UTF-8 | Python | false | false | 27 | py | import src
src.__init__()
| [
"ouyang@cs.ucsb.edu"
] | ouyang@cs.ucsb.edu |
62e62e7af49f8e9e5000474f3e2369b29eac4b01 | 76fd9a2d3b732a73b688a0c227bfe07219ca1ace | /wp3db/__init__.py | 951dc19a9d4263b1eba80b26bb6f304589845284 | [] | no_license | wp3-wearable/dbmodels | 61d1add4b09eae1f7be9d36a8db73df8bf9b240d | f881f98e6091649d8b8a79b5b9bc895792f23d54 | refs/heads/master | 2020-04-13T14:25:25.887785 | 2018-12-27T11:14:51 | 2018-12-27T11:14:51 | 163,262,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24 | py | from .db import Session
| [
"dev.trk.9001@gmail.com"
] | dev.trk.9001@gmail.com |
ef679fa89caf7d38e7aa2766c74680ff885e8be4 | ae9bb7babce2a0349ae932985cf418a03057c670 | /ProgramAndDataStructure/list/__init__.py | 50e5397d5291650f1e1f4a4e99a244b430ba0f89 | [] | no_license | Veraun/HogwartsSDET17-1 | d2592fcb4c9c63724c19bcf9edde349ebcd2c8af | 6648dbfb640b065ff2c76cb6889a8f9e4f124b91 | refs/heads/main | 2023-07-02T05:20:32.161248 | 2021-08-06T03:55:13 | 2021-08-06T03:55:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | '''
#!/usr/bin/python3
# -*- coding: utf-8 -*-
@author: wangwei
@project: HogwartsSDET17
@file: __init__.py.py
@time: 2021/5/20 19:54
@Email: Warron.Wang
''' | [
"wei1.wang@ximalaya.com"
] | wei1.wang@ximalaya.com |
0d1f8ac232ce4709e84ae40a42d8f4df1accaf4b | d6b4f2a17eacfda27a2823e180c45ae783cea43b | /introducao-python/knn/iris-knn.py | 6778b550102d6ea70867ead5f0fa9ce434993cc0 | [] | no_license | leonardoFiedler/data-science-course | ba31d9c4b0cced259d554b00786981c8b4a6f1c9 | fe7f576a66091bc9d2db4e28e9368d05575315cd | refs/heads/master | 2020-09-04T19:30:03.941010 | 2019-11-22T21:56:13 | 2019-11-22T21:56:13 | 219,868,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
(X, y) = load_iris(return_X_y=True)
data = train_test_split(X, y, test_size=0.2, random_state=1)
(X_train, X_test, y_train, y_test) = data
# Numero do K - quantidade de itens a serem verificados e separados
k = 5
labelsResults = []
for i in range(len(X_test)):
x = X_test[i, :]
d = X_train - x
d = np.square(d).sum(axis=1)
sortedMatDis = np.argsort(d)
labels = []
for j in range(k):
idx = sortedMatDis[j]
labels.append(y_train[idx])
labelsResults.append(pd.value_counts(labels).idxmax())
print(labelsResults)
print('Score:', accuracy_score(y_test, labelsResults)) | [
"leonardo.fiedler.96@gmail.com"
] | leonardo.fiedler.96@gmail.com |
db1facd386d242b1b380c5534537494db308c88d | 002aca3621afbe787f5f2133d3dbe8af6e5e9e33 | /contraClientes.py | dfaa170150fe8c98060e42d61179b22d6a65867c | [] | no_license | Toti848/Joselyn | 5ce55f38ac3092b5f283ddad47a0351ad78eefe3 | 735e123ef7a80770d54a26afe8ef9f112b37e7be | refs/heads/master | 2022-12-02T06:59:42.999170 | 2020-08-20T03:35:21 | 2020-08-20T03:35:21 | 284,575,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,978 | py | from tkinter import *
from tkinter import font
from tkinter import messagebox as msg
from tkinter import ttk
class Main:
def __init__(self):
#Pantalla
self.raiz = Tk()
self.raiz.title ("Acceso a los Clientes")
self.raiz.geometry('600x200')
#Fuente
self.fuente = font.Font(weight="bold")
self.user = StringVar()
self.pasw = StringVar()
#Titulo
self.lb_tituloPantalla = Label(self.raiz, text = "ACCESO DEL LOS CLIENTES", font = self.fuente)
self.lb_tituloPantalla.place(x = 180, y = 20)
#User
self.lb_User = Label(self.raiz, text = "User:")
self.lb_User.place(x = 100, y = 60)
self.txt_User = Entry(self.raiz, textvariable=self.user, justify="right", width = 30)
self.txt_User.place(x = 230, y = 60)
#Password
self.lb_Password = Label(self.raiz, text = "Password:")
self.lb_Password.place(x = 100, y = 90)
self.txt_Password = Entry(self.raiz, textvariable=self.pasw, justify="right", width = 30)
self.txt_Password.place(x = 230, y = 90)
#Boton Limpiar
self.bt_borrar = Button(self.raiz, text="Limpiar", width=15, command = self.Limpiar)
self.bt_borrar.place(x = 190, y = 130)
#Boton Acceder
self.bt_enviar = Button(self.raiz, text="Acceder", width=15, command = self.Acceder)
self.bt_enviar.place(x = 310, y = 130)
self.raiz.mainloop()
def Acceder(self):
if(self.user.get() == "Cliente" and self.pasw.get() == "p34"):
from Cliente_Socket import Chat_C
self.raiz.destroy()
Chat_C()
else:
msg.showinfo("Error", "La contrasena o ususario es incorrecta")
def Limpiar(self):
self.user.set("")
self.pasw.set("")
def main():
Main()
return 0
if __name__ == "__main__":
main() | [
"noreply@github.com"
] | Toti848.noreply@github.com |
337238a653f2c421c1f017238cbef58842b56a43 | 567ecf4ea5afbd7eb3003f7e14e00c7b9289b9c6 | /ax/storage/json_store/decoders.py | 7a586e03ddb3b32b0a5780c941e67e791e29d11a | [
"MIT"
] | permissive | danielrjiang/Ax | f55ef168a59381b5a03c6d51bc394f6c72ed0f39 | 43014b28683b3037b5c7307869cb9b75ca31ffb6 | refs/heads/master | 2023-03-31T12:19:47.118558 | 2019-12-02T16:47:39 | 2019-12-02T16:49:36 | 225,493,047 | 0 | 0 | MIT | 2019-12-03T00:09:52 | 2019-12-03T00:09:51 | null | UTF-8 | Python | false | false | 3,501 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from datetime import datetime
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from ax.core.arm import Arm
from ax.core.base_trial import TrialStatus
from ax.core.batch_trial import AbandonedArm, BatchTrial, GeneratorRunStruct
from ax.core.generator_run import GeneratorRun
from ax.core.runner import Runner
from ax.core.trial import Trial
if TYPE_CHECKING:
# import as module to make sphinx-autodoc-typehints happy
from ax import core # noqa F401 # pragma: no cover
def batch_trial_from_json(
experiment: "core.experiment.Experiment",
index: int,
trial_type: Optional[str],
status: TrialStatus,
time_created: datetime,
time_completed: Optional[datetime],
time_staged: Optional[datetime],
time_run_started: Optional[datetime],
abandoned_reason: Optional[str],
run_metadata: Optional[Dict[str, Any]],
generator_run_structs: List[GeneratorRunStruct],
runner: Optional[Runner],
abandoned_arms_metadata: Dict[str, AbandonedArm],
num_arms_created: int,
status_quo: Optional[Arm],
status_quo_weight_override: float,
optimize_for_power: Optional[bool],
) -> BatchTrial:
"""Load Ax BatchTrial from JSON.
Other classes don't need explicit deserializers, because we can just use
their constructors (see decoder.py). However, the constructor for Batch
does not allow us to exactly recreate an existing object.
"""
batch = BatchTrial(experiment=experiment)
batch._index = index
batch._trial_type = trial_type
batch._status = status
batch._time_created = time_created
batch._time_completed = time_completed
batch._time_staged = time_staged
batch._time_run_started = time_run_started
batch._abandoned_reason = abandoned_reason
batch._run_metadata = run_metadata or {}
batch._generator_run_structs = generator_run_structs
batch._runner = runner
batch._abandoned_arms_metadata = abandoned_arms_metadata
batch._num_arms_created = num_arms_created
batch._status_quo = status_quo
batch._status_quo_weight_override = status_quo_weight_override
batch.optimize_for_power = optimize_for_power
return batch
def trial_from_json(
experiment: "core.experiment.Experiment",
index: int,
trial_type: Optional[str],
status: TrialStatus,
time_created: datetime,
time_completed: Optional[datetime],
time_staged: Optional[datetime],
time_run_started: Optional[datetime],
abandoned_reason: Optional[str],
run_metadata: Optional[Dict[str, Any]],
generator_run: GeneratorRun,
runner: Optional[Runner],
num_arms_created: int,
) -> Trial:
"""Load Ax trial from JSON.
Other classes don't need explicit deserializers, because we can just use
their constructors (see decoder.py). However, the constructor for Trial
does not allow us to exactly recreate an existing object.
"""
trial = Trial(experiment=experiment, generator_run=generator_run)
trial._index = index
trial._trial_type = trial_type
trial._status = status
trial._time_created = time_created
trial._time_completed = time_completed
trial._time_staged = time_staged
trial._time_run_started = time_run_started
trial._abandoned_reason = abandoned_reason
trial._run_metadata = run_metadata or {}
trial._runner = runner
trial._num_arms_created = num_arms_created
return trial
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
8f9f79f922e726902af3535889af3665812574bd | 63cf7b2d363d3d4a371f7214f20b5503b401b5ee | /ene-jun-2020/joe tareas y parciales/ordinario/ordinario_busqueda.py | 263816de6626eea036c7531f294391a226f47535 | [] | no_license | joeguerrero735/AlgoritmosSistemas | fdfb03b1072835c5b40e4c5f1e954d0f5c740d72 | ed3132d7738ee31af40c99b2811488ea84c04d6a | refs/heads/master | 2022-09-06T22:45:01.039420 | 2020-05-30T05:03:18 | 2020-05-30T05:03:18 | 263,782,689 | 0 | 0 | null | 2020-05-14T01:17:54 | 2020-05-14T01:17:53 | null | UTF-8 | Python | false | false | 1,481 | py | def binarySearch(arreglo, inicial, final, x):
medio = (inicial + final)//2
if inicial > final:
return -1
if arreglo[medio][0] == x :
if x < arreglo[medio][0]:
return binarySearch(arreglo, inicial, medio-1, x)
else:
return binarySearch(arreglo, medio+1, final, x)
entrada_datos = input().split()
# número de cosa que va a comprar
tamaño=int(entrada_datos[0])
#artículo que busca
palabra=entrada_datos[1]
## arreglo
arreglo = []
for i in range(tamaño):
b = input().split()
arreglo.append(b)
def quickSort(arr,start,end):
pivot = start
point = end
while pivot != point:
if len(arr[point]) < len(arr[pivot]) and point > pivot:
# Si la regla no se cumple, cambio.
arr[point], arr[pivot] = arr[pivot], arr[point]
pivot, point = point, pivot
elif len(arr[point]) > len(arr[pivot]) and point < pivot:
# Si la regla no se cumple, cambio.
arr[point], arr[pivot] = arr[pivot], arr[point]
pivot, point = point, pivot
if pivot > point:
point += 1
else:
point -= 1
# Izquierda.
if pivot != start:
quickSort(arr, start, pivot-1)
# Derecha.
if pivot != end:
quickSort(arr, pivot+1, end)
quickSort(arreglo,0,len(arreglo)-1)
#print(arreglo)
print(binarySearch(arreglo, 0, len(arreglo), palabra)+1) | [
"yoyom_wwe@hotmail.com"
] | yoyom_wwe@hotmail.com |
d56aaa1b76881e1998052b9a341d91955fab83a2 | 1dd6726ebfef9736fea9d4b69c18333909197417 | /New folder/project_test/manage.py | 3a32ab96c4075d09f7fb2b70bdb53957ec7a5dd6 | [] | no_license | Uday-Kiran/My-Scribbles | c7255371ef4c15172dc8acb511d13143533032f4 | fc2383c5877b78dfa4751d4c114762f84504cc48 | refs/heads/master | 2022-01-19T02:36:08.964361 | 2019-07-21T16:40:35 | 2019-07-21T16:40:35 | 198,078,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project_test.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | Uday-Kiran.noreply@github.com |
070fec483cc8b3618847116669bce6bd58c2f158 | b290100dc3f40cc7867e21080c92135a75bca06b | /labwork/labwork/urls.py | 6b1f3561f1545c492a3cecbffcdb95dbef43ecd2 | [] | no_license | Kunduzha/labwork | a13d69eaff5ca3cad8a17bb038f85683648cc82f | 366af69bbbffcdf0422fe0ea83021e52ca82a61e | refs/heads/master | 2023-04-05T01:43:17.305601 | 2021-03-31T10:55:05 | 2021-03-31T10:55:05 | 353,324,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | """labwork URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"kuzanai@mail.ru"
] | kuzanai@mail.ru |
278d818322d05275fb24dbbd4ce90fbb73f4aad5 | cbd347d69f4ae9725ec479795e21ef45d6ccf41a | /tests/constants.py | 968375f731522300035d26c8d7a6828c743fad50 | [
"Apache-2.0"
] | permissive | pyni/perception | 9d1e398964312d8ebdef0374e1e089fa7ff28397 | 81262bd05524e9d28568d55107718783023ae14c | refs/heads/master | 2020-03-24T02:23:21.647095 | 2018-07-19T20:05:00 | 2018-07-19T20:05:00 | 142,373,747 | 1 | 0 | Apache-2.0 | 2018-07-26T01:46:36 | 2018-07-26T01:46:36 | null | UTF-8 | Python | false | false | 122 | py | IM_HEIGHT = 100
IM_WIDTH = 100
NUM_POINTS = 100
NUM_ITERS = 500
BINARY_THRESH = 127
COLOR_IM_FILEROOT = 'data/test_color'
| [
"jmahler@berkeley.edu"
] | jmahler@berkeley.edu |
62098dedacaa8ca0444becbb5aa5a5b7341645ea | d11f36debe9c5c2b5af87221782eebb4d6968d2e | /lesson19.py | fa9276880d4cc7e112e9e6e78b74ae6fef529263 | [] | no_license | OmorovAzat/lesson1 | cde12fcc19d86b22139ca9ac0773b059fd1165f8 | ad1d113c56bbc0961b0543c45b4df94eea3c0314 | refs/heads/master | 2023-05-07T08:31:58.450064 | 2021-05-30T08:54:51 | 2021-05-30T08:54:51 | 372,166,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | #Решение домашнего задания
# a = [1, 2, 3]
# b = [i * 2 for i in a]
# print(b)
# l1 = [1, 2, 3]
# res = 0
# for num in l1:
# res += num ** 2
# print(res)
# time1 = 3
# time2 = 6.7
# time3 = 11.8
#
# print(time1 // 2)
# print(time2 // 2)
# print(time3 // 2)
# s = 'Hello,world'
# if ' ' in s:
# s = s.upper()
# else:
# s = s.lower()
#
# print(s)
| [
"bdante025@gmail.com"
] | bdante025@gmail.com |
856043c72dfa18187c13e630e6c9e58fcc3c660b | a56a74b362b9263289aad96098bd0f7d798570a2 | /venv/lib/python3.8/site-packages/matplotlib/_pylab_helpers.py | 2407b573c4aabbe64132bc3a0ae71163132785bc | [
"MIT"
] | permissive | yoonkt200/ml-theory-python | 5812d06841d30e1068f6592b5730a40e87801313 | 7643136230fd4f291b6e3dbf9fa562c3737901a2 | refs/heads/master | 2022-12-21T14:53:21.624453 | 2021-02-02T09:33:07 | 2021-02-02T09:33:07 | 132,319,537 | 13 | 14 | MIT | 2022-12-19T17:23:57 | 2018-05-06T08:17:45 | Python | UTF-8 | Python | false | false | 3,445 | py | """
Manage figures for pyplot interface.
"""
import atexit
import gc
class Gcf:
"""
Singleton to manage a set of integer-numbered figures.
This class is never instantiated; it consists of two class
attributes (a list and a dictionary), and a set of static
methods that operate on those attributes, accessing them
directly as class attributes.
Attributes
----------
figs
dictionary of the form {*num*: *manager*, ...}
_activeQue
list of *managers*, with active one at the end
"""
_activeQue = []
figs = {}
@classmethod
def get_fig_manager(cls, num):
"""
If figure manager *num* exists, make it the active
figure and return the manager; otherwise return *None*.
"""
manager = cls.figs.get(num, None)
if manager is not None:
cls.set_active(manager)
return manager
@classmethod
def destroy(cls, num):
"""
Try to remove all traces of figure *num*.
In the interactive backends, this is bound to the
window "destroy" and "delete" events.
"""
if not cls.has_fignum(num):
return
manager = cls.figs[num]
manager.canvas.mpl_disconnect(manager._cidgcf)
cls._activeQue.remove(manager)
del cls.figs[num]
manager.destroy()
gc.collect(1)
@classmethod
def destroy_fig(cls, fig):
"*fig* is a Figure instance"
num = next((manager.num for manager in cls.figs.values()
if manager.canvas.figure == fig), None)
if num is not None:
cls.destroy(num)
@classmethod
def destroy_all(cls):
# this is need to ensure that gc is available in corner cases
# where modules are being torn down after install with easy_install
import gc # noqa
for manager in list(cls.figs.values()):
manager.canvas.mpl_disconnect(manager._cidgcf)
manager.destroy()
cls._activeQue = []
cls.figs.clear()
gc.collect(1)
@classmethod
def has_fignum(cls, num):
"""
Return *True* if figure *num* exists.
"""
return num in cls.figs
@classmethod
def get_all_fig_managers(cls):
"""
Return a list of figure managers.
"""
return list(cls.figs.values())
@classmethod
def get_num_fig_managers(cls):
"""
Return the number of figures being managed.
"""
return len(cls.figs)
@classmethod
def get_active(cls):
"""
Return the manager of the active figure, or *None*.
"""
if len(cls._activeQue) == 0:
return None
else:
return cls._activeQue[-1]
@classmethod
def set_active(cls, manager):
"""
Make the figure corresponding to *manager* the active one.
"""
oldQue = cls._activeQue[:]
cls._activeQue = [m for m in oldQue if m != manager]
cls._activeQue.append(manager)
cls.figs[manager.num] = manager
@classmethod
def draw_all(cls, force=False):
"""
Redraw all figures registered with the pyplot
state machine.
"""
for f_mgr in cls.get_all_fig_managers():
if force or f_mgr.canvas.figure.stale:
f_mgr.canvas.draw_idle()
atexit.register(Gcf.destroy_all)
| [
"kitae.yoon@deliveryhero.co.kr"
] | kitae.yoon@deliveryhero.co.kr |
52e97583338ee135280976c809802e41e82f2615 | 4321285ff5eed67fbca253ba7647235032700dfe | /RequestService/liquid_requests.py | fd208f5fa6a8659798a0d78942ac7498d37a46d0 | [] | no_license | TheBigGinge/Analytics | e8fd5ce3f04ce8ce32458500a264c10682dfbdc5 | 27c82bfdd4f06b9e80ee8f7ac7226370c62c4eb2 | refs/heads/master | 2020-12-18T22:19:56.904634 | 2016-08-08T19:53:55 | 2016-08-08T19:53:55 | 34,743,144 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,305 | py | import requests
import json
import os
import getpass
import base64
class LiquidPlannerRequest:
base_uri = 'https://app.liquidplanner.com/api'
workspace_id = None
project_id = None
email = 'ryanm@payscale.com'
password = 'Huge-Large1978'
session = None
def __init__(self, email=None, password=None):
if email is not None:
self.email = email
self.password = password
def get_workspace_id(self):
return self.workspace_id
def set_workspace_id(self, workspace_id):
self.workspace_id = workspace_id
def set_project_id(self, project_id):
self.project_id = project_id
def get(self, uri, options={}):
return requests.get(self.base_uri + uri,
data=options,
headers={'Content-Type': 'application/json'},
auth=(self.email, self.password))
def post(self, uri, options={}):
return requests.post(self.base_uri + uri,
data=options,
headers={'Content-Type': 'application/json'},
auth=(self.email, self.password))
def put(self, uri, options={}):
return requests.put(self.base_uri + uri,
data=options,
headers={'Content-Type': 'application/json'},
auth=(self.email, self.password))
def account(self):
"""
Returns a dictionary with information about the current user.
"""
return json.loads(self.get('/account').content)
def workspaces(self):
"""
Returns a list of dictionaries, each a workspace in which the user is a member
Workspaces are the root directory
"""
return json.loads(self.get('/workspaces').content)
def packages(self):
"""
Returns a dictionary of all packages
A workspace is made up of packages
"""
return json.loads(self.get('/workspaces/' + str(self.workspace_id) +
'/packages').content)
def projects(self):
"""
Returns a list of dictionaries, each a project in a workspace
A package can be made up of projects and tasks
"""
return json.loads(self.get('/workspaces/' + str(self.workspace_id) +
'/projects').content)
def pull_all_tasks(self):
"""
Returns a list of dictionaries, each a task in a workspace
Tasks can live in projects or packages
"""
return json.loads(self.get('/workspaces/' + str(self.workspace_id) +
'/tasks').content)
def create_task(self, data):
"""
Creates a task by POSTing data
:params data:
Commands for the api
"""
return json.loads(self.post('/workspaces/' + str(self.workspace_id) +
'/tasks', json.dumps({'task': data})).content)
def update_task(self, data):
"""
Updates a task by PUTing data
:params data:
Commands for the api
"""
return json.loads(self.put('/workspaces/' + str(self.workspace_id) +
'/tasks/' + str(data['id']), json.dumps({'task': data})).content)
def write_task_comment(self, task_id, comment):
"""
Writes a comment to a task
"""
return json.loads(self.post('/workspaces/' + str(self.workspace_id) +
'/tasks/' + str(task_id) + '/comments', json.dumps({'comment': comment})).content)
def check_for_task_changes(self):
return json.loads(self.get('/workspaces/' + str(self.workspace_id)
+ '/changes').content)
def pull_task_by_id(self, id_number):
"""
Returns a list of dictionaries, each a task in a workspace
Tasks can live in projects or packages
"""
return json.loads(self.get('/workspaces/' + str(self.workspace_id) +
'/tasks/' + str(id_number)).content)
def pull_task_note(self, task_id):
return self.get('/workspaces/' + str(self.workspace_id) +
'/tasks/' + str(task_id) + '/note').content | [
"rphm78@gmail.com"
] | rphm78@gmail.com |
9b1e0c1c235c51767a012be75e7b2a6729897c0d | e9da16d2d1468a47de78c66e4397216038e7ba88 | /zscdumin/谷歌翻译/python代码/getICAPSUrlList.py | e8dee91a1bf115825a8623c2154bb110ff77687d | [] | no_license | ZSCDumin/Spider | 9b28beb5d5166365c3b954c32d28a409a79fb085 | ba63f7122ce8e530cdfaaee56670dd5624d4c864 | refs/heads/master | 2021-06-27T02:04:29.622633 | 2020-09-13T01:20:18 | 2020-09-13T01:20:18 | 137,719,217 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,269 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-01-28 20:30:54
# @Author : 杜敏 (2712220318@qq.com)
# @Link : https://github.com/ZSCDumin
# @Version : $Id$
import requests
from bs4 import BeautifulSoup
def getHTMLText(url):
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return "爬取失败"
def getUrlList(urlList, downLoadList, url):
html = getHTMLText(url)
soup = BeautifulSoup(html, 'html.parser')
urls = soup.find_all('a')
for url in urls:
try:
href = str(url.attrs['href']) # 获取照片路径
if "view" in href and "paper" in href and len(href) > 66:
paperInfoUrl = href.replace("view", "viewPaper")
print(paperInfoUrl)
paperDownloadUrl = href.replace("view", "download")
print(paperDownloadUrl)
urlList.append(paperInfoUrl)
downLoadList.append(paperDownloadUrl)
except:
continue
def saveUrlAsFile(urlList, downLoadList, fPath, num):
path = fPath + "\\" + "ICAPS_20" + num + "_PaperInfoUrl.txt"
with open(path, 'w') as f: # 写入文件
for url in urlList:
print(url)
f.write(url + "\n")
f.close()
print("Url列表文件保存成功")
path = fPath + "\\" + "ICAPS_20" + num + "_DownloadUrl.txt"
with open(path, 'w') as f: # 写入文件
for url in downLoadList:
print(url)
f.write(url + "\n")
f.close()
print("Download列表文件保存成功")
def main():
urlList = [] # URL列表
downLoadList = [] # 下载列表
fPath = "F:\\接单项目\\谷歌翻译\\论文数据\\ICAPS" # 文件存储路径
for i in range(9, 19):
if i < 10:
num = "0" + str(i)
else:
num = str(i)
url = "https://www.aaai.org/ocs/index.php/ICAPS/ICAPS" + num + "/schedConf/presentations" # 爬取页面URL
print(url)
getUrlList(urlList, downLoadList, url)
saveUrlAsFile(urlList, downLoadList, fPath, num)
urlList.clear()
downLoadList.clear()
main()
| [
"2712220318@qq.com"
] | 2712220318@qq.com |
6e615abdf2ddd030aea3917e7b4d7214899e693e | e249e4bb6e3cb2aabf592bcd3f7ec07b7c080eb8 | /cvp_modules/library/cv_server_provision.py | 2d2d507f668d27dc109e4d45be3d50c71cef71db | [] | no_license | arista-eosplus/ansible-cloudvision | c87e230e5286628c3a2f162efab585f4b16ab4c7 | abe124577d1ebeb3dd7b493102fd15795f4a4506 | refs/heads/master | 2021-01-19T08:23:23.139937 | 2017-07-21T03:15:41 | 2017-07-21T03:15:41 | 72,491,582 | 5 | 0 | null | 2017-06-06T13:31:26 | 2016-11-01T01:09:52 | Python | UTF-8 | Python | false | false | 24,752 | py | #!/usr/bin/env python
#
# Copyright (c) 2017, Arista Networks EOS+
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cv_server_provision
version_added: "2.4"
author: "EOS+ CS (ansible-dev@arista.com) (@mharista)"
short_description:
Provision server port by applying or removing template configuration to a
configlet
description:
- This module allows a server team to provision server network ports for
new servers without having to access Arista CVP or asking the network team
to do it for them. Provide the information for connecting to CVP, switch
rack, port the new server is connected to, optional vlan, and an action
and the module will apply the configuration to the switch port via CVP.
Actions are add (applies template config to port),
remove (defaults the interface config) and
show (returns the current port config).
options:
host:
description:
- The hostname or IP address of the CVP node being connected to.
required: true
port:
description:
- The port number to use when making API calls to the CVP node. This
will default to the default port for the specified protocol. Port 80
for http and port 443 for https.
default: None
protocol:
description:
- The protocol to use when making API calls to CVP. CVP defaults to https
and newer versions of CVP no longer support http.
default: https
choices: [https, http]
username:
description:
- The user that will be used to connect to CVP for making API calls.
required: true
password:
description:
- The password of the user that will be used to connect to CVP for API
calls.
required: true
server_name:
description:
- The hostname or identifier for the server that is having it's switch
port provisioned.
required: true
switch_name:
description:
- The hostname of the switch is being configured for the server being
provisioned.
required: true
switch_port:
description:
- The physical port number on the switch that the new server is
connected to.
required: true
port_vlan:
description:
- The vlan that should be applied to the port for this server.
This parameter is dependent on a proper template that supports single
vlan provisioning with it. If a port vlan is specified by the template
specified does not support this the module will exit out with no
changes. If a template is specified that requires a port vlan but no
port vlan is specified the module will exit out with no changes.
default: None
template:
description:
- A path to a Jinja formatted template file that contains the
configuration block that will be applied to the specified switch port.
This template will have variable fields replaced by the module before
being applied to the switch configuration.
required: true
action:
description:
- The action for the module to take. The actions are add, which applies
the specified template config to port, remove, which defaults the
specified interface configuration, and show, which will return the
current port configuration with no changes.
default: show
choices: [show, add, remove]
auto_run:
description:
- Flag that determines whether or not the module will execute the CVP
task spawned as a result of changes to a switch configlet. When an
add or remove action is taken which results in a change to a switch
configlet, CVP will spawn a task that needs to be executed for the
configuration to be applied to the switch. If this option is True then
the module will determined the task number created by the configuration
change, execute it and wait for the task to complete. If the option
is False then the task will remain in the Pending state in CVP for
a network administrator to review and execute.
default: False
type: bool
notes:
requirements: [Jinja2, cvprac >= 0.7.0]
'''
EXAMPLES = '''
- name: Get current configuration for interface Ethernet2
cv_server_provision:
host: cvp_node
username: cvp_user
password: cvp_pass
protocol: https
server_name: new_server
switch_name: eos_switch_1
switch_port: 2
template: template_file.j2
action: show
- name: Remove existing configuration from interface Ethernet2. Run task.
cv_server_provision:
host: cvp_node
username: cvp_user
password: cvp_pass
protocol: https
server_name: new_server
switch_name: eos_switch_1
switch_port: 2
template: template_file.j2
action: remove
auto_run: True
- name: Add template configuration to interface Ethernet2. No VLAN. Run task.
cv_server_provision:
host: cvp_node
username: cvp_user
password: cvp_pass
protocol: https
server_name: new_server
switch_name: eos_switch_1
switch_port: 2
template: single_attached_trunk.j2
action: add
auto_run: True
- name: Add template with VLAN configuration to interface Ethernet2. Run task.
cv_server_provision:
host: cvp_node
username: cvp_user
password: cvp_pass
protocol: https
server_name: new_server
switch_name: eos_switch_1
switch_port: 2
port_vlan: 22
template: single_attached_vlan.j2
action: add
auto_run: True
'''
RETURN = '''
changed:
description: Signifies if a change was made to the configlet
returned: success
type: bool
sample: true
currentConfigBlock:
description: The current config block for the user specified interface
returned: when action = show
type: string
sample: "interface Ethernet4\n!"
newConfigBlock:
description: The new config block for the user specified interface
returned: when action = add or remove
type: string
sample: "interface Ethernet3\n description example\n no switchport\n!"
oldConfigBlock:
description: The current config block for the user specified interface
before any changes are made
returned: when action = add or remove
type: string
sample: "interface Ethernet3\n!"
fullConfig:
description: The full config of the configlet after being updated
returned: when action = add or remove
type: string
sample: "!\ninterface Ethernet3\n!\ninterface Ethernet4\n!"
updateConfigletResponse:
description: Response returned from CVP when configlet update is triggered
returned: when action = add or remove and configuration changes
type: string
sample: "Configlet veos1-server successfully updated and task initiated."
portConfigurable:
description: Signifies if the user specified port has an entry in the
configlet that Ansible has access to
returned: success
type: bool
sample: true
switchConfigurable:
description: Signifies if the user specified switch has a configlet
applied to it that CVP is allowed to edit
returned: success
type: bool
sample: true
switchInfo:
description: Information from CVP describing the switch being configured
returned: success
type: dictionary
sample: {"architecture": "i386",
"bootupTimeStamp": 1491264298.21,
"complianceCode": "0000",
"complianceIndication": "NONE",
"deviceInfo": "Registered",
"deviceStatus": "Registered",
"fqdn": "veos1",
"hardwareRevision": "",
"internalBuildId": "12-12",
"internalVersion": "4.17.1F-11111.4171F",
"ipAddress": "192.168.1.20",
"isDANZEnabled": "no",
"isMLAGEnabled": "no",
"key": "00:50:56:5d:e5:e0",
"lastSyncUp": 1496432895799,
"memFree": 472976,
"memTotal": 1893460,
"modelName": "vEOS",
"parentContainerId": "container_13_5776759195930",
"serialNumber": "",
"systemMacAddress": "00:50:56:5d:e5:e0",
"taskIdList": [],
"tempAction": null,
"type": "netelement",
"unAuthorized": false,
"version": "4.17.1F",
"ztpMode": "false"}
taskCompleted:
description: Signifies if the task created and executed has completed successfully
returned: when action = add or remove, and auto_run = true,
and configuration changes
type: bool
sample: true
taskCreated:
description: Signifies if a task was created due to configlet changes
returned: when action = add or remove, and auto_run = true or false,
and configuration changes
type: bool
sample: true
taskExecuted:
description: Signifies if the automation executed the spawned task
returned: when action = add or remove, and auto_run = true,
and configuration changes
type: bool
sample: true
taskId:
description: The task ID created by CVP because of changes to configlet
returned: when action = add or remove, and auto_run = true or false,
and configuration changes
type: string
sample: "500"
'''
import re
import time
from jinja2 import meta
import jinja2
from ansible.module_utils.basic import AnsibleModule
from cvprac.cvp_client import CvpClient
from cvprac.cvp_client_errors import CvpLoginError, CvpApiError
def connect(module):
''' Connects to CVP device using user provided credentials from playbook.
:param module: Ansible module with parameters and client connection.
:return: CvpClient object with connection instantiated.
'''
client = CvpClient()
try:
client.connect([module.params['host']],
module.params['username'],
module.params['password'],
protocol=module.params['protocol'],
port=module.params['port'])
except CvpLoginError, e:
module.fail_json(msg=str(e))
return client
def switch_info(module):
''' Get dictionary of switch info from CVP.
:param module: Ansible module with parameters and client connection.
:return: Dict of switch info from CVP or exit with failure if no
info for device is found.
'''
switch_name = module.params['switch_name']
switch_info = module.client.api.get_device_by_name(switch_name)
if not switch_info:
module.fail_json(msg=str("Device with name '%s' does not exist."
% switch_name))
return switch_info
def switch_in_compliance(module, sw_info):
''' Check if switch is currently in compliance.
:param module: Ansible module with parameters and client connection.
:param sw_info: Dict of switch info.
:return: Nothing or exit with failure if device is not in compliance.
'''
compliance = module.client.api.check_compliance(sw_info['key'],
sw_info['type'])
if compliance['complianceCode'] != '0000':
module.fail_json(msg=str('Switch %s is not in compliance. Returned'
' compliance code %s.'
% (sw_info['fqdn'],
compliance['complianceCode'])))
def server_configurable_configlet(module, sw_info):
''' Check CVP that the user specified switch has a configlet assigned to
it that Ansible is allowed to edit.
:param module: Ansible module with parameters and client connection.
:param sw_info: Dict of switch info.
:return: Dict of configlet information or None.
'''
configurable_configlet = None
configlet_name = module.params['switch_name'] + '-server'
switch_configlets = module.client.api.get_configlets_by_device_id(
sw_info['key'])
for configlet in switch_configlets:
if configlet['name'] == configlet_name:
configurable_configlet = configlet
return configurable_configlet
def port_configurable(module, configlet):
''' Check configlet if the user specified port has a configuration entry
in the configlet to determine if Ansible is allowed to configure the
port on this switch.
:param module: Ansible module with parameters and client connection.
:param configlet: Dict of configlet info.
:return: True or False.
'''
configurable = False
regex = r'^interface Ethernet%s' % module.params['switch_port']
for config_line in configlet['config'].split('\n'):
if re.match(regex, config_line):
configurable = True
return configurable
def configlet_action(module, configlet):
''' Take appropriate action based on current state of device and user
requested action.
Return current config block for specified port if action is show.
If action is add or remove make the appropriate changes to the
configlet and return the associated information.
:param module: Ansible module with parameters and client connection.
:param configlet: Dict of configlet info.
:return: Dict of information to updated results with.
'''
result = dict()
existing_config = current_config(module, configlet['config'])
if module.params['action'] == 'show':
result['currentConfigBlock'] = existing_config
return result
elif module.params['action'] == 'add':
result['newConfigBlock'] = config_from_template(module)
elif module.params['action'] == 'remove':
result['newConfigBlock'] = ('interface Ethernet%s\n!'
% module.params['switch_port'])
result['oldConfigBlock'] = existing_config
result['fullConfig'] = updated_configlet_content(module,
configlet['config'],
result['newConfigBlock'])
resp = module.client.api.update_configlet(result['fullConfig'],
configlet['key'],
configlet['name'])
if 'data' in resp:
result['updateConfigletResponse'] = resp['data']
if 'task' in resp['data']:
result['changed'] = True
result['taskCreated'] = True
return result
def current_config(module, config):
''' Parse the full port configuration for the user specified port out of
the full configlet configuration and return as a string.
:param module: Ansible module with parameters and client connection.
:param config: Full config to parse specific port config from.
:return: String of current config block for user specified port.
'''
regex = r'^interface Ethernet%s' % module.params['switch_port']
match = re.search(regex, config, re.M)
if not match:
module.fail_json(msg=str('interface section not found - %s'
% config))
block_start, line_end = match.regs[0]
match = re.search(r'!', config[line_end:], re.M)
if not match:
return config[block_start:]
_, block_end = match.regs[0]
block_end = line_end + block_end
return config[block_start:block_end]
def valid_template(port, template):
''' Test if the user provided Jinja template is valid.
:param port: User specified port.
:param template: Contents of Jinja template.
:return: True or False
'''
valid = True
regex = r'^interface Ethernet%s' % port
match = re.match(regex, template, re.M)
if not match:
valid = False
return valid
def config_from_template(module):
''' Load the Jinja template and apply user provided parameters in necessary
places. Fail if template is not found. Fail if rendered template does
not reference the correct port. Fail if the template requires a VLAN
but the user did not provide one with the port_vlan parameter.
:param module: Ansible module with parameters and client connection.
:return: String of Jinja template rendered with parameters or exit with
failure.
'''
template_loader = jinja2.FileSystemLoader('./templates')
env = jinja2.Environment(loader=template_loader,
undefined=jinja2.DebugUndefined)
template = env.get_template(module.params['template'])
if not template:
module.fail_json(msg=str('Could not find template - %s'
% module.params['template']))
data = {'switch_port': module.params['switch_port'],
'server_name': module.params['server_name']}
temp_source = env.loader.get_source(env, module.params['template'])[0]
parsed_content = env.parse(temp_source)
temp_vars = list(meta.find_undeclared_variables(parsed_content))
if 'port_vlan' in temp_vars:
if module.params['port_vlan']:
data['port_vlan'] = module.params['port_vlan']
else:
module.fail_json(msg=str('Template %s requires a vlan. Please'
' re-run with vlan number provided.'
% module.params['template']))
template = template.render(data)
if not valid_template(module.params['switch_port'], template):
module.fail_json(msg=str('Template content does not configure proper'
' interface - %s' % template))
return template
def updated_configlet_content(module, existing_config, new_config):
''' Update the configlet configuration with the new section for the port
specified by the user.
:param module: Ansible module with parameters and client connection.
:param existing_config: String of current configlet configuration.
:param new_config: String of configuration for user specified port to
replace in the existing config.
:return: String of the full updated configuration.
'''
regex = r'^interface Ethernet%s' % module.params['switch_port']
match = re.search(regex, existing_config, re.M)
if not match:
module.fail_json(msg=str('interface section not found - %s'
% existing_config))
block_start, line_end = match.regs[0]
updated_config = existing_config[:block_start] + new_config
match = re.search(r'!\n', existing_config[line_end:], re.M)
if match:
_, block_end = match.regs[0]
block_end = line_end + block_end
updated_config += '\n%s' % existing_config[block_end:]
return updated_config
def configlet_update_task(module):
''' Poll device info of switch from CVP up to three times to see if the
configlet updates have spawned a task. It sometimes takes a second for
the task to be spawned after configlet updates. If a task is found
return the task ID. Otherwise return None.
:param module: Ansible module with parameters and client connection.
:return: Task ID or None.
'''
for num in range(3):
device_info = switch_info(module)
if (('taskIdList' in device_info) and
(len(device_info['taskIdList']) > 0)):
for task in device_info['taskIdList']:
if ('Configlet Assign' in task['description'] and
task['data']['WORKFLOW_ACTION'] == 'Configlet Push'):
return task['workOrderId']
time.sleep(1)
return None
def wait_for_task_completion(module, task):
''' Poll CVP for the executed task to complete. There is currently no
timeout. Exits with failure if task status is Failed or Cancelled.
:param module: Ansible module with parameters and client connection.
:param task: Task ID to poll for completion.
:return: True or exit with failure if task is cancelled or fails.
'''
task_complete = False
while not task_complete:
task_info = module.client.api.get_task_by_id(task)
task_status = task_info['workOrderUserDefinedStatus']
if task_status == 'Completed':
return True
elif task_status in ['Failed', 'Cancelled']:
module.fail_json(msg=str('Task %s has reported status %s. Please'
' consult the CVP admins for more'
' information.' % (task, task_status)))
time.sleep(2)
def main():
""" main entry point for module execution
"""
argument_spec = dict(
host=dict(required=True),
port=dict(required=False, default=None),
protocol=dict(default='https', choices=['http', 'https']),
username=dict(required=True),
password=dict(required=True, no_log=True),
server_name=dict(required=True),
switch_name=dict(required=True),
switch_port=dict(required=True),
port_vlan=dict(required=False, default=None),
template=dict(require=True),
action=dict(default='show', choices=['show', 'add', 'remove']),
auto_run=dict(type='bool', default=False))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=False)
result = dict(changed=False)
module.client = connect(module)
try:
result['switchInfo'] = switch_info(module)
if module.params['action'] in ['add', 'remove']:
switch_in_compliance(module, result['switchInfo'])
switch_configlet = server_configurable_configlet(module,
result['switchInfo'])
if not switch_configlet:
module.fail_json(msg=str('Switch %s has no configurable server'
' ports.' % module.params['switch_name']))
result['switchConfigurable'] = True
if not port_configurable(module, switch_configlet):
module.fail_json(msg=str('Port %s is not configurable as a server'
' port on switch %s.'
% (module.params['switch_port'],
module.params['switch_name'])))
result['portConfigurable'] = True
result['taskCreated'] = False
result['taskExecuted'] = False
result['taskCompleted'] = False
result.update(configlet_action(module, switch_configlet))
if module.params['auto_run'] and module.params['action'] != 'show':
task_id = configlet_update_task(module)
if task_id:
result['taskId'] = task_id
note = ('Update config on %s with %s action from Ansible.'
% (module.params['switch_name'],
module.params['action']))
module.client.api.add_note_to_task(task_id, note)
module.client.api.execute_task(task_id)
result['taskExecuted'] = True
task_completed = wait_for_task_completion(module, task_id)
if task_completed:
result['taskCompleted'] = True
else:
result['taskCreated'] = False
except CvpApiError, e:
module.fail_json(msg=str(e))
module.exit_json(**result)
if __name__ == '__main__':
main()
| [
"mhartzel@arista.com"
] | mhartzel@arista.com |
63d15beb9622fc5048da342646160ea270b446d8 | c4d379713ad8133c61d427c07b29f4121dcd86c5 | /workspace/root/topology/tani_utils.py | 1d93da9c1c42e8e0c77a07b025ee1c6ebc8e4cf3 | [] | no_license | agiulianomirabella/melanoma-detector | 80fe02d4ca18034ee0119e2c797df886a00ceb9e | 33ab38e3e559505f0225c86ba455a02636d3b839 | refs/heads/master | 2022-12-25T04:54:27.443543 | 2020-09-23T16:23:23 | 2020-09-23T16:23:23 | 292,315,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,546 | py | from root.utils import * # pylint: disable= unused-wildcard-import
from root.utils import makeUnique, getGrayValueCoordinates
import numpy as np
from copy import copy
from scipy.ndimage.morphology import generate_binary_structure
'''
A helpful auxiliary permutation list for subcells computation
'''
#Maximum value for spaceDimension:
maximumSpaceDimension = 2
permutations1 = [[], [[-0.5], [0.5]], [[-0.5, -0.5], [-0.5, 0], [-0.5, 0.5], [0, -0.5], [0, 0.5], [0.5, -0.5], [0.5, 0], [0.5, 0.5]]]
'''
This module will compute cell's features, such as dimension, or subcells.
- A cell is a numpy array
'''
def dim(cell):
return len([i for i in range(len(cell)) if cell[i]%1 == 0])
def getRationalIndices(cell):
return [i for i in range(len(cell)) if cell[i]%1 != 0]
def getSubCells(cell): #return a list of subCells
out = []
x = permutations1[len(cell)]
for l in x:
if all(l[i]==0 for i in getRationalIndices(cell)):
a = cell + np.array(l)
out.append(a)
return makeUnique(out)
'''
This module will define functions to extract CCs eulerchar feature.
- A CC is a list of arrays (coordinates of cells belonging to the CC)
'''
def getAllCells(cc):
out = copy(cc)
for cell in cc:
out = out + getSubCells(cell)
return makeUnique(out)
def euler(cc):
if len(cc)==0:
return 0
out = 0
allCells = getAllCells(cc)
for d in range(len(cc[0])+1):
out = out + ((-1)**d) * len([c for c in allCells if dim(c) == d])
return out
| [
"giulianomirabella@gmail.com"
] | giulianomirabella@gmail.com |
ef058a2f7e1c06430d246fe4dc5decaa6c3441d5 | 19e9939c91674b51c7574c7103d9abb12b3a56bb | /examples/BingAdsPythonConsoleExamples/BingAdsPythonConsoleExamples/v11/bulk_keywords_ads.py | 940766c36b292a5ada4817e24525b47af81bf4af | [
"MIT"
] | permissive | dariusmb/BingAds-Python-SDK | 0257225d304948aa41caff42d7dd7972e1bd7457 | bd5814ed66cf5ff809bea8f3231460cc3724c942 | refs/heads/master | 2020-04-01T14:22:15.911884 | 2018-10-16T15:45:26 | 2018-10-16T15:45:26 | 153,291,404 | 0 | 0 | null | 2018-10-16T13:36:08 | 2018-10-16T13:36:07 | null | UTF-8 | Python | false | false | 17,874 | py | from auth_helper import *
from bulk_service_manager_helper import *
from output_helper import *
# You must provide credentials in auth_helper.py.
def main(authorization_data):
errors=[]
try:
# Let's create a new budget and share it with a new campaign.
upload_entities=[]
bulk_budget=BulkBudget()
bulk_budget.client_id='YourClientIdGoesHere'
budget=set_elements_to_none(campaign_service.factory.create('Budget'))
budget.Amount=50
budget.BudgetType='DailyBudgetStandard'
budget.Id=BUDGET_ID_KEY
budget.Name="My Shared Budget " + strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
bulk_budget.budget=budget
upload_entities.append(bulk_budget)
bulk_campaign=BulkCampaign()
# The client_id may be used to associate records in the bulk upload file with records in the results file. The value of this field
# is not used or stored by the server; it is simply copied from the uploaded record to the corresponding result record.
# Note: This bulk file Client Id is not related to an application Client Id for OAuth.
bulk_campaign.client_id='YourClientIdGoesHere'
campaign=set_elements_to_none(campaign_service.factory.create('Campaign'))
# When using the Campaign Management service, the Id cannot be set. In the context of a BulkCampaign, the Id is optional
# and may be used as a negative reference key during bulk upload. For example the same negative reference key for the campaign Id
# will be used when adding new ad groups to this new campaign, or when associating ad extensions with the campaign.
campaign.Id=CAMPAIGN_ID_KEY
campaign.Name="Summer Shoes " + strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
campaign.Description="Summer shoes line."
# You must choose to set either the shared budget ID or daily amount.
# You can set one or the other, but you may not set both.
campaign.BudgetId=BUDGET_ID_KEY
campaign.DailyBudget=None
campaign.BudgetType=None
campaign.TimeZone='PacificTimeUSCanadaTijuana'
campaign.Status='Paused'
# You can set your campaign bid strategy to Enhanced CPC (EnhancedCpcBiddingScheme)
# and then, at any time, set an individual ad group or keyword bid strategy to
# Manual CPC (ManualCpcBiddingScheme).
# For campaigns you can use either of the EnhancedCpcBiddingScheme or ManualCpcBiddingScheme objects.
# If you do not set this element, then ManualCpcBiddingScheme is used by default.
campaign_bidding_scheme=set_elements_to_none(campaign_service.factory.create('EnhancedCpcBiddingScheme'))
campaign.BiddingScheme=campaign_bidding_scheme
# Used with FinalUrls shown in the expanded text ads that we will add below.
campaign.TrackingUrlTemplate="http://tracker.example.com/?season={_season}&promocode={_promocode}&u={lpurl}"
bulk_campaign.campaign=campaign
bulk_ad_group=BulkAdGroup()
bulk_ad_group.campaign_id=CAMPAIGN_ID_KEY
ad_group=set_elements_to_none(campaign_service.factory.create('AdGroup'))
ad_group.Id=AD_GROUP_ID_KEY
ad_group.Name="Women's Red Shoes"
ad_group.AdDistribution='Search'
end_date=campaign_service.factory.create('Date')
end_date.Day=31
end_date.Month=12
end_date.Year=strftime("%Y", gmtime())
ad_group.EndDate=end_date
search_bid=campaign_service.factory.create('Bid')
search_bid.Amount=0.09
ad_group.SearchBid=search_bid
ad_group.Language='English'
# For ad groups you can use either of the InheritFromParentBiddingScheme or ManualCpcBiddingScheme objects.
# If you do not set this element, then InheritFromParentBiddingScheme is used by default.
ad_group_bidding_scheme=set_elements_to_none(campaign_service.factory.create('ManualCpcBiddingScheme'))
ad_group.BiddingScheme=ad_group_bidding_scheme
# You could use a tracking template which would override the campaign level
# tracking template. Tracking templates defined for lower level entities
# override those set for higher level entities.
# In this example we are using the campaign level tracking template.
ad_group.TrackingUrlTemplate=None
bulk_ad_group.ad_group=ad_group
# In this example only the first 3 ads should succeed.
# The Title of the fourth ad is empty and not valid,
# and the fifth ad is a duplicate of the second ad
bulk_expanded_text_ads=[]
for index in range(5):
bulk_expanded_text_ad=BulkExpandedTextAd()
bulk_expanded_text_ad.ad_group_id=AD_GROUP_ID_KEY
expanded_text_ad=set_elements_to_none(campaign_service.factory.create('ExpandedTextAd'))
expanded_text_ad.TitlePart1='Contoso'
expanded_text_ad.TitlePart2='Fast & Easy Setup'
expanded_text_ad.Text='Huge Savings on red shoes.'
expanded_text_ad.Path1='seattle'
expanded_text_ad.Path2='shoe sale'
expanded_text_ad.Type='ExpandedText'
expanded_text_ad.Status=None
expanded_text_ad.EditorialStatus=None
# With FinalUrls you can separate the tracking template, custom parameters, and
# landing page URLs.
final_urls=campaign_service.factory.create('ns4:ArrayOfstring')
final_urls.string.append('http://www.contoso.com/womenshoesale')
expanded_text_ad.FinalUrls=final_urls
# Final Mobile URLs can also be used if you want to direct the user to a different page
# for mobile devices.
final_mobile_urls=campaign_service.factory.create('ns4:ArrayOfstring')
final_mobile_urls.string.append('http://mobile.contoso.com/womenshoesale')
expanded_text_ad.FinalMobileUrls=final_mobile_urls
# You could use a tracking template which would override the campaign level
# tracking template. Tracking templates defined for lower level entities
# override those set for higher level entities.
# In this example we are using the campaign level tracking template.
expanded_text_ad.TrackingUrlTemplate=None
# Set custom parameters that are specific to this ad,
# and can be used by the ad, ad group, campaign, or account level tracking template.
# In this example we are using the campaign level tracking template.
url_custom_parameters=campaign_service.factory.create('ns0:CustomParameters')
parameters=campaign_service.factory.create('ns0:ArrayOfCustomParameter')
custom_parameter1=campaign_service.factory.create('ns0:CustomParameter')
custom_parameter1.Key='promoCode'
custom_parameter1.Value='PROMO' + str(index)
parameters.CustomParameter.append(custom_parameter1)
custom_parameter2=campaign_service.factory.create('ns0:CustomParameter')
custom_parameter2.Key='season'
custom_parameter2.Value='summer'
parameters.CustomParameter.append(custom_parameter2)
url_custom_parameters.Parameters=parameters
expanded_text_ad.UrlCustomParameters=url_custom_parameters
bulk_expanded_text_ad.ad=expanded_text_ad
bulk_expanded_text_ads.append(bulk_expanded_text_ad)
bulk_expanded_text_ads[1].ad.Title="Quick & Easy Setup"
bulk_expanded_text_ads[2].ad.Title="Fast & Simple Setup"
bulk_expanded_text_ads[3].ad.Title=''
bulk_expanded_text_ads[4].ad.Title="Quick & Easy Setup"
# In this example only the second keyword should succeed. The Text of the first keyword exceeds the limit,
# and the third keyword is a duplicate of the second keyword.
bulk_keywords=[]
for index in range(3):
bulk_keyword=BulkKeyword()
bulk_keyword.ad_group_id=AD_GROUP_ID_KEY
keyword=set_elements_to_none(campaign_service.factory.create('Keyword'))
keyword.Bid=set_elements_to_none(campaign_service.factory.create('Bid'))
keyword.Bid.Amount=0.47
keyword.Param2='10% Off'
keyword.MatchType='Broad'
keyword.Text='Brand-A Shoes'
# For keywords you can use either of the InheritFromParentBiddingScheme or ManualCpcBiddingScheme objects.
# If you do not set this element, then InheritFromParentBiddingScheme is used by default.
keyword_bidding_scheme=set_elements_to_none(campaign_service.factory.create('InheritFromParentBiddingScheme'))
keyword.BiddingScheme=keyword_bidding_scheme
bulk_keyword.keyword=keyword
bulk_keywords.append(bulk_keyword)
bulk_keywords[0].keyword.Text=(
"Brand-A Shoes Brand-A Shoes Brand-A Shoes Brand-A Shoes Brand-A Shoes "
"Brand-A Shoes Brand-A Shoes Brand-A Shoes Brand-A Shoes Brand-A Shoes "
"Brand-A Shoes Brand-A Shoes Brand-A Shoes Brand-A Shoes Brand-A Shoes"
)
# Write the entities created above, to temporary memory.
# Dependent entities such as BulkKeyword must be written after any dependencies,
# for example the BulkCampaign and BulkAdGroup.
upload_entities.append(bulk_campaign)
upload_entities.append(bulk_ad_group)
for bulk_expanded_text_ad in bulk_expanded_text_ads:
upload_entities.append(bulk_expanded_text_ad)
for bulk_keyword in bulk_keywords:
upload_entities.append(bulk_keyword)
output_status_message("\nAdding campaign, budget, ad group, keywords, and ads . . .")
download_entities=write_entities_and_upload_file(bulk_service_manager, upload_entities)
budget_results=[]
campaign_results=[]
adgroup_results=[]
keyword_results=[]
for entity in download_entities:
if isinstance(entity, BulkBudget):
budget_results.append(entity)
output_bulk_budgets([entity])
if isinstance(entity, BulkCampaign):
campaign_results.append(entity)
output_bulk_campaigns([entity])
if isinstance(entity, BulkAdGroup):
adgroup_results.append(entity)
output_bulk_ad_groups([entity])
if isinstance(entity, BulkExpandedTextAd):
output_bulk_expanded_text_ads([entity])
if isinstance(entity, BulkKeyword):
keyword_results.append(entity)
output_bulk_keywords([entity])
# Here is a simple example that updates the keyword bid to use the ad group bid.
update_bulk_keyword=BulkKeyword()
update_bulk_keyword.ad_group_id=adgroup_results[0].ad_group.Id
update_keyword=campaign_service.factory.create('Keyword')
update_keyword.Id=next((keyword_result.keyword.Id for keyword_result in keyword_results if
keyword_result.keyword.Id is not None and keyword_result.ad_group_id==update_bulk_keyword.ad_group_id), None)
# You can set the Bid.Amount property to change the keyword level bid.
update_keyword.Bid=campaign_service.factory.create('Bid')
update_keyword.Bid.Amount=0.46
# The keyword bid will not be updated if the Bid property is not specified or if you create
# an empty Bid.
#update_keyword.Bid=campaign_service.factory.create('Bid')
# The keyword level bid will be deleted ("delete_value" will be written in the bulk upload file), and
# the keyword will effectively inherit the ad group level bid if you explicitly set the Bid property to None.
#update_keyword.Bid=None
# It is important to note that the above behavior differs from the Bid settings that
# are used to update keywords with the Campaign Management servivce.
# When using the Campaign Management service with the Bing Ads Python SDK, if the
# Bid property is not specified or is set explicitly to None, your keyword bid will not be updated.
# For examples of how to use the Campaign Management service for keyword updates, please see KeywordsAds.py.
update_bulk_keyword.keyword=update_keyword
upload_entities=[]
upload_entities.append(update_bulk_keyword)
output_status_message("\nUpdating the keyword bid to use the ad group bid . . .")
download_entities=write_entities_and_upload_file(bulk_service_manager, upload_entities)
for entity in download_entities:
if isinstance(entity, BulkKeyword):
output_bulk_keywords([entity])
# Here is a simple example that updates the campaign budget.
download_parameters=DownloadParameters(
download_entities=[
'Budgets',
'Campaigns'
],
result_file_directory=FILE_DIRECTORY,
result_file_name=DOWNLOAD_FILE_NAME,
overwrite_result_file=True,
last_sync_time_in_utc=None
)
upload_entities=[]
get_budget_results=[]
get_campaign_results=[]
# Download all campaigns and shared budgets in the account.
download_entities=download_file(bulk_service_manager, download_parameters)
output_status_message("Downloaded all campaigns and shared budgets in the account.\n")
for entity in download_entities:
if isinstance(entity, BulkBudget):
get_budget_results.append(entity)
output_bulk_budgets([entity])
if isinstance(entity, BulkCampaign):
get_campaign_results.append(entity)
output_bulk_campaigns([entity])
# If the campaign has a shared budget you cannot update the Campaign budget amount,
# and you must instead update the amount in the Budget record. If you try to update
# the budget amount of a Campaign that has a shared budget, the service will return
# the CampaignServiceCannotUpdateSharedBudget error code.
for entity in get_budget_results:
if entity.budget.Id > 0:
# Increase budget by 20 %
entity.budget.Amount *= Decimal(1.2)
upload_entities.append(entity)
for entity in get_campaign_results:
if entity.campaign.BudgetId == None or entity.campaign.BudgetId <= 0:
# Increase budget by 20 %
entity.campaign.DailyBudget *= 1.2
upload_entities.append(entity)
if len(upload_entities) > 0:
output_status_message("Changed local campaign budget amounts. Starting upload.\n")
download_entities=write_entities_and_upload_file(bulk_service_manager, upload_entities)
for entity in download_entities:
if isinstance(entity, BulkBudget):
get_budget_results.append(entity)
output_bulk_budgets([entity])
if isinstance(entity, BulkCampaign):
get_campaign_results.append(entity)
output_bulk_campaigns([entity])
else:
output_status_message("No campaigns or shared budgets in account.\n")
# Delete the campaign, ad group, keywords, and ads that were previously added.
# You should remove this region if you want to view the added entities in the
# Bing Ads web application or another tool.
upload_entities=[]
for budget_result in budget_results:
budget_result.status='Deleted'
upload_entities.append(budget_result)
for campaign_result in campaign_results:
campaign_result.campaign.Status='Deleted'
upload_entities.append(campaign_result)
output_status_message("\nDeleting campaign, budget, ad group, ads, and keywords . . .")
download_entities=write_entities_and_upload_file(bulk_service_manager, upload_entities)
for entity in download_entities:
if isinstance(entity, BulkBudget):
output_bulk_budgets([entity])
if isinstance(entity, BulkCampaign):
output_bulk_campaigns([entity])
output_status_message("Program execution completed")
except WebFault as ex:
output_webfault_errors(ex)
except Exception as ex:
output_status_message(ex)
# Main execution
if __name__ == '__main__':
print("Python loads the web service proxies at runtime, so you will observe " \
"a performance delay between program launch and main execution...\n")
authorization_data=AuthorizationData(
account_id=None,
customer_id=None,
developer_token=DEVELOPER_TOKEN,
authentication=None,
)
bulk_service_manager=BulkServiceManager(
authorization_data=authorization_data,
poll_interval_in_milliseconds=5000,
environment=ENVIRONMENT,
)
campaign_service=ServiceClient(
service='CampaignManagementService',
authorization_data=authorization_data,
environment=ENVIRONMENT,
version=11,
)
# You should authenticate for Bing Ads production services with a Microsoft Account,
# instead of providing the Bing Ads username and password set.
authenticate(authorization_data)
main(authorization_data)
| [
"eur@microsoft.com"
] | eur@microsoft.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.