blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3e03394d28989c705b6f829c76a9c7e4c36a2689 | 1012f61f46ff7aaf37cd3ce0ead64e035ec201dc | /coding-challange/codewars/6kyu/2020-01-19~2020-04-14/does-my-number-look-big-in-this/does-my-number-look-big-in-this.py | ef92ef56a4daa61186ead7f941fbab381a28b528 | [] | no_license | polyglotm/coding-dojo | 89efe22f5a34088e94c9e3a4e25cad510b04172a | 43da9c75e3125f5cb1ac317d275475f1c0ea6727 | refs/heads/develop | 2023-08-17T11:59:30.945061 | 2023-08-16T14:13:45 | 2023-08-16T14:13:45 | 188,733,115 | 2 | 0 | null | 2023-03-04T05:49:21 | 2019-05-26T21:26:25 | JavaScript | UTF-8 | Python | false | false | 597 | py | # does-my-number-look-big-in-this
# https://www.codewars.com/kata/5287e858c6b5a9678200083c
from unittest import TestCase
from functools import reduce
def narcissistic(value):
length = len(str(value))
return value == reduce(lambda a, b: a + b, [int(char) ** length for char in str(value)])
TestCase().assertEqual(narcissistic(7), True, '7 is narcissistic');
TestCase().assertEqual(narcissistic(371), True, '371 is narcissistic');
TestCase().assertEqual(narcissistic(122), False, '122 is not narcissistic')
TestCase().assertEqual(narcissistic(4887), False, '4887 is not narcissistic')
| [
"polyglot.m@gmail.com"
] | polyglot.m@gmail.com |
a2575af33bb634af6f622eb1520fc75ac98f8c12 | a4586ad2d7c8747c79a0a6fa25a367706f7b5638 | /Chap3/project/api_weather_2.py | 66a2159d6b7e4f82c5ff968372baeee39ff4281a | [] | no_license | AIHackerTest/Bruce-Qiao_Py101-004 | e1ff0272d19b3ff734af2a96fd7fe9de2ef026e7 | 402026e0d2552578de017169ea9a8d17318a8471 | refs/heads/master | 2021-06-25T10:07:19.665410 | 2017-09-12T08:17:58 | 2017-09-12T08:17:58 | 103,240,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,199 | py | """ 使用心知天气API """
import requests
import json
import re
from const_value import API, KEY, UNIT, LANGUAGE
from flask import Flask, render_template, request
def fetchWeather(location):
result = requests.get(
API, params={
'key': KEY,
'location': location,
'language': LANGUAGE,
'unit': UNIT},
timeout=5)
return json.loads(result.content)
def change_date_format(raw_date):
expr = r"\b(?P<year>\d\d\d\d)-(?P<month>\d\d)-(?P<date>\d\d)T(?P<hour>\d\d):(?P<minute>\d\d):(?P<second>\d\d)\b"
x = re.search(expr, raw_date)
return x.group('year') + '-' + x.group('month') + '-' + x.group('date') + ' ' + x.group('hour') + ':' + x.group('minute')
def json_to_dict(weather_json):
weather_dict = {}
weather_dict['city'] = weather_json['results'][0]['location']['name']
weather_dict['weather_condition'] = weather_json['results'][0]['now']['text']
weather_dict['temperature'] = weather_json['results'][0]['now']['temperature']
weather_dict['update_time'] = change_date_format(weather_json['results'][0]['last_update'])
return weather_dict
app = Flask(__name__)
inquiry_list = []
@app.route("/", methods=['POST', 'GET'])
def main():
inquiry_outcome = None
inquiry_history = None
help_information = None
error = None
if request.method == "POST":
if request.form['action'] == u'查询':
result = fetchWeather(request.form['location'])
if "status" in result.keys():
error=result['status']
else:
inquiry_outcome = json_to_dict(result)
inquiry_list.append(inquiry_outcome)
elif request.form['action'] == u'历史':
inquiry_history = inquiry_list
else:
#request.form['action'] == u'帮助':
help_information = 1
return render_template("api_weather.html",
inquiry_outcome=inquiry_outcome,
inquiry_history=inquiry_history,
help_information=help_information,
error=error)
else:
return render_template("api_weather.html")
if __name__ == '__main__':
app.run(debug = True)
| [
"xiaowan5219@gmail.com"
] | xiaowan5219@gmail.com |
eff68dae2504df1a5e60f809aa964bca0e998e02 | 06e34e2dface0b87fa785cab7e65422a5f20ba18 | /Solutions/985-Sum-of-Even-Numbers-After-Queries/python.py | 131d4b9326480a44fbd1879ea6c56af2850dc50c | [] | no_license | JerryHu1994/LeetCode-Practice | c9841b0ce70451c19c8a429a3898c05b6233e1d4 | b0ce69985c51a9a794397cd98a996fca0e91d7d1 | refs/heads/master | 2022-02-10T04:42:28.033364 | 2022-01-02T04:44:22 | 2022-01-02T04:44:22 | 117,118,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | class Solution(object):
def sumEvenAfterQueries(self, A, queries):
"""
:type A: List[int]
:type queries: List[List[int]]
:rtype: List[int]
"""
ans = []
currsum = sum([i for i in A if i%2 == 0])
for val, ind in queries:
if A[ind]%2 == 0: currsum -= A[ind]
A[ind] = A[ind] + val
if A[ind]%2 == 0: currsum += A[ind]
ans.append(currsum)
return ans | [
"hjr01211@gmail.com"
] | hjr01211@gmail.com |
378a2de6fb2b861c7fca12322550f26bd2b5ec40 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part002299.py | 75cd1f983de3a20c28837508a34522dc6bd7b2fc | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,998 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher95023(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.4.0', 1, 1, S(0)), Add)
]),
1: (1, Multiset({1: 1}), [
(VariableWithCount('i2.4.0', 1, 1, S(0)), Add)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Add
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher95023._instance is None:
CommutativeMatcher95023._instance = CommutativeMatcher95023()
return CommutativeMatcher95023._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 95022
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.4.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 95024
if len(subjects) >= 1:
tmp2 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.4.1.0', tmp2)
except ValueError:
pass
else:
pass
# State 95025
if len(subjects) == 0:
pass
# 0: x*d
yield 0, subst2
subjects.appendleft(tmp2)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.4.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 97036
if len(subjects) >= 1:
tmp5 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.4.1.0', tmp5)
except ValueError:
pass
else:
pass
# State 97037
if len(subjects) == 0:
pass
# 1: x*f
yield 1, subst2
subjects.appendleft(tmp5)
if len(subjects) >= 1 and isinstance(subjects[0], Mul):
tmp7 = subjects.popleft()
associative1 = tmp7
associative_type1 = type(tmp7)
subjects8 = deque(tmp7._args)
matcher = CommutativeMatcher95027.get()
tmp9 = subjects8
subjects8 = []
for s in tmp9:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp9, subst0):
pass
if pattern_index == 0:
pass
# State 95028
if len(subjects) == 0:
pass
# 0: x*d
yield 0, subst1
if pattern_index == 1:
pass
# State 97038
if len(subjects) == 0:
pass
# 1: x*f
yield 1, subst1
subjects.appendleft(tmp7)
return
yield
from matchpy.matching.many_to_one import CommutativeMatcher
from collections import deque
from .generated_part002300 import *
from matchpy.utils import VariableWithCount
from multiset import Multiset | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
b472f6f5cc57716d39c06e423808c87c7390c6b7 | a34e3d435f48ef87477d3ae13ca8a43015e5052c | /tifffile_test.py | 07af1815e1e30af537c4481aac2250714b5604aa | [] | no_license | haehn/sandbox | 636069372fc7bb7fd72b5fde302f42b815e8e9b0 | e49a0a30a1811adb73577ff697d81db16ca82808 | refs/heads/master | 2021-01-22T03:39:03.415863 | 2015-02-11T23:16:22 | 2015-02-11T23:16:22 | 26,128,048 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | import tifffile as tif
import time
start_t = time.clock()
i = tif.imread('test.tif')
print time.clock() - start_t
print i.shape
| [
"haehn@seas.harvard.edu"
] | haehn@seas.harvard.edu |
7e2567e4a20aa02342cb0de0b8f2bcc6123bc9b2 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/380/usersdata/318/93006/submittedfiles/principal.py | 0320151c54c0035526eea0587c53dbe5b2639e6f | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
n = int(input('Digite a quantidade de notas :'))
notas = []
for i in range(0,n,1):
notas.append(float(input('Digite a nota%d: ' % (i+1))))
media = 0
for i in range(0,n,1):
media += notas[i]/float(n)
print(notas)
print(media) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
9f5244ca9c999f852f87ed9ccd1c944c0a36fbe3 | 5963c12367490ffc01c9905c028d1d5480078dec | /homeassistant/components/verisure/config_flow.py | 6c2822896e6da67af964a623bce98bdcb9867099 | [
"Apache-2.0"
] | permissive | BenWoodford/home-assistant | eb03f73165d11935e8d6a9756272014267d7d66a | 2fee32fce03bc49e86cf2e7b741a15621a97cce5 | refs/heads/dev | 2023-03-05T06:13:30.354545 | 2021-07-18T09:51:53 | 2021-07-18T09:51:53 | 117,122,037 | 11 | 6 | Apache-2.0 | 2023-02-22T06:16:51 | 2018-01-11T16:10:19 | Python | UTF-8 | Python | false | false | 6,876 | py | """Config flow for Verisure integration."""
from __future__ import annotations
from typing import Any
from verisure import (
Error as VerisureError,
LoginError as VerisureLoginError,
ResponseError as VerisureResponseError,
Session as Verisure,
)
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry, ConfigFlow, OptionsFlow
from homeassistant.const import CONF_EMAIL, CONF_PASSWORD
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from .const import (
CONF_GIID,
CONF_LOCK_CODE_DIGITS,
CONF_LOCK_DEFAULT_CODE,
DEFAULT_LOCK_CODE_DIGITS,
DOMAIN,
LOGGER,
)
class VerisureConfigFlowHandler(ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Verisure."""
VERSION = 1
email: str
entry: ConfigEntry
installations: dict[str, str]
password: str
@staticmethod
@callback
def async_get_options_flow(config_entry: ConfigEntry) -> VerisureOptionsFlowHandler:
"""Get the options flow for this handler."""
return VerisureOptionsFlowHandler(config_entry)
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle the initial step."""
errors: dict[str, str] = {}
if user_input is not None:
verisure = Verisure(
username=user_input[CONF_EMAIL], password=user_input[CONF_PASSWORD]
)
try:
await self.hass.async_add_executor_job(verisure.login)
except VerisureLoginError as ex:
LOGGER.debug("Could not log in to Verisure, %s", ex)
errors["base"] = "invalid_auth"
except (VerisureError, VerisureResponseError) as ex:
LOGGER.debug("Unexpected response from Verisure, %s", ex)
errors["base"] = "unknown"
else:
self.email = user_input[CONF_EMAIL]
self.password = user_input[CONF_PASSWORD]
self.installations = {
inst["giid"]: f"{inst['alias']} ({inst['street']})"
for inst in verisure.installations
}
return await self.async_step_installation()
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_EMAIL): str,
vol.Required(CONF_PASSWORD): str,
}
),
errors=errors,
)
async def async_step_installation(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Select Verisure installation to add."""
if len(self.installations) == 1:
user_input = {CONF_GIID: list(self.installations)[0]}
if user_input is None:
return self.async_show_form(
step_id="installation",
data_schema=vol.Schema(
{vol.Required(CONF_GIID): vol.In(self.installations)}
),
)
await self.async_set_unique_id(user_input[CONF_GIID])
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=self.installations[user_input[CONF_GIID]],
data={
CONF_EMAIL: self.email,
CONF_PASSWORD: self.password,
CONF_GIID: user_input[CONF_GIID],
},
)
async def async_step_reauth(self, data: dict[str, Any]) -> FlowResult:
"""Handle initiation of re-authentication with Verisure."""
self.entry = self.hass.config_entries.async_get_entry(self.context["entry_id"])
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle re-authentication with Verisure."""
errors: dict[str, str] = {}
if user_input is not None:
verisure = Verisure(
username=user_input[CONF_EMAIL], password=user_input[CONF_PASSWORD]
)
try:
await self.hass.async_add_executor_job(verisure.login)
except VerisureLoginError as ex:
LOGGER.debug("Could not log in to Verisure, %s", ex)
errors["base"] = "invalid_auth"
except (VerisureError, VerisureResponseError) as ex:
LOGGER.debug("Unexpected response from Verisure, %s", ex)
errors["base"] = "unknown"
else:
data = self.entry.data.copy()
self.hass.config_entries.async_update_entry(
self.entry,
data={
**data,
CONF_EMAIL: user_input[CONF_EMAIL],
CONF_PASSWORD: user_input[CONF_PASSWORD],
},
)
self.hass.async_create_task(
self.hass.config_entries.async_reload(self.entry.entry_id)
)
return self.async_abort(reason="reauth_successful")
return self.async_show_form(
step_id="reauth_confirm",
data_schema=vol.Schema(
{
vol.Required(CONF_EMAIL, default=self.entry.data[CONF_EMAIL]): str,
vol.Required(CONF_PASSWORD): str,
}
),
errors=errors,
)
class VerisureOptionsFlowHandler(OptionsFlow):
"""Handle Verisure options."""
def __init__(self, entry: ConfigEntry) -> None:
"""Initialize Verisure options flow."""
self.entry = entry
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Manage Verisure options."""
errors = {}
if user_input is not None:
if len(user_input[CONF_LOCK_DEFAULT_CODE]) not in [
0,
user_input[CONF_LOCK_CODE_DIGITS],
]:
errors["base"] = "code_format_mismatch"
else:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_LOCK_CODE_DIGITS,
default=self.entry.options.get(
CONF_LOCK_CODE_DIGITS, DEFAULT_LOCK_CODE_DIGITS
),
): int,
vol.Optional(
CONF_LOCK_DEFAULT_CODE,
default=self.entry.options.get(CONF_LOCK_DEFAULT_CODE),
): str,
}
),
errors=errors,
)
| [
"noreply@github.com"
] | BenWoodford.noreply@github.com |
13d692809403a8b9ca30618e1f0d1aa33761a670 | 548c26cc8e68c3116cecaf7e5cd9aadca7608318 | /payments/paybackpg.py | 9cd0a718e255d26e31ca460a5e6df8742267d7db | [] | no_license | Morphnus-IT-Solutions/riba | b69ecebf110b91b699947b904873e9870385e481 | 90ff42dfe9c693265998d3182b0d672667de5123 | refs/heads/master | 2021-01-13T02:18:42.248642 | 2012-09-06T18:20:26 | 2012-09-06T18:20:26 | 4,067,896 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,487 | py | import urllib, urllib2
import hashlib
from django.utils import simplejson
import logging
from django.conf import settings
log = logging.getLogger('request')
CATALOG_NAME = {
'Future Bazaar': 'futurebazaar',
'Ezone': 'ezoneonline',
}
def create_request(payment_attempt, request):
log.info("Entered create_request")
data = {'action':'create_new',
'gateway': 'payback',
'transactionId': payment_attempt.id,
'sessionId': payment_attempt.id,
'catalogName': CATALOG_NAME.get(request.client.client.name),
'points': "%i" % (4*payment_attempt.amount)
}
url = settings.KHAZANA_SERVER_URL
return get_response(url,data)
def process_response(payment_attempt, rawdata):
log.info('Payback process response data: %s' % rawdata)
return rawdata
def get_response(url, data):
try:
headers = {'Content-Type':'application/json; charset=UTF-8'}
data = simplejson.dumps(data)
req = urllib2.Request(url, data, headers)
proxy_support = urllib2.ProxyHandler({})
opener = urllib2.build_opener(proxy_support)
res = opener.open(req)
res_data = res.read()
log.info('Payment request data: %s' % data)
log.info('Got response for create payback payment request: %s' % res_data)
return simplejson.loads(res_data)
except IOError, e:
log.exception('Error creating payback payment request %s' % repr(e))
| [
"dalal.saumil@gmail.com"
] | dalal.saumil@gmail.com |
6add2f9d7701f8ca136f8bba16cbb839f0137be4 | 0cce9a9d9b9da4a820e9ed5fc674d06f0be9810a | /motorcycles2.py | 4defe2c1445f96a7e9c0c395f7187ccc52b4b112 | [] | no_license | wbroach/python_work | 3f4a85e998805f50b2400e64c5b7cbc31780b245 | 7e1842b317539d61bab0f04d72e71db893c865ff | refs/heads/master | 2020-04-14T22:06:43.164595 | 2019-10-02T01:25:00 | 2019-10-02T01:25:00 | 164,151,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | motorcycles = []
motorcycles.append('honda')
motorcycles.append('yamaha')
motorcycles.append('suzuki')
print('The original list is as follows:')
print(motorcycles)
print(motorcycles[2])
del(motorcycles[0])
del(motorcycles[0])
del(motorcycles[0])
print(len(motorcycles))
| [
"someone@someplace.com"
] | someone@someplace.com |
ce33aec59d60accb1c87fea41a2e45aa5d5dde37 | 91824d746654fe12881b4fc3b55c553aae0d22ac | /py/brick-wall.py | df97ef507ec7e7482058e07765b8af5889315529 | [
"Apache-2.0"
] | permissive | ckclark/leetcode | a1a173c67a36a3256b198f853fcd3d15aa5abbb7 | 844c6f18d06dcb397db76436e5f4b8ddcb1beddc | refs/heads/master | 2021-01-15T08:14:43.368516 | 2020-02-14T07:25:05 | 2020-02-14T07:30:10 | 42,386,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | from collections import Counter
class Solution(object):
def leastBricks(self, wall):
"""
:type wall: List[List[int]]
:rtype: int
"""
c = Counter()
wall_width = sum(wall[0])
max_non_cut = 0
for row in wall:
subsum = 0
for n in row:
subsum += n
c[subsum] += 1
if subsum < wall_width:
max_non_cut = max(c[subsum], max_non_cut)
return len(wall) - max_non_cut
| [
"clark.ck@gmail.com"
] | clark.ck@gmail.com |
b6275e78d3a511285b135c3981a04566ab50cb91 | 25fb2f88b9d6e1d01806c0e90f3fb02bc82e1ce6 | /wisdem/postprocessing/wisdem_get.py | 304169be70583a413ee5b496fe6889c90ec169be | [
"Apache-2.0"
] | permissive | johnjasa/WISDEM | 6b607ea8c3ef1aab8065b82e75e53e5c12fa2cd3 | a4571e71cb5b9869c81790f8abb1bb7fba8fdb02 | refs/heads/master | 2022-07-01T16:32:09.003230 | 2021-11-04T16:24:05 | 2021-11-04T16:24:05 | 250,635,279 | 0 | 0 | NOASSERTION | 2020-03-27T20:04:35 | 2020-03-27T20:04:35 | null | UTF-8 | Python | false | false | 2,140 | py | import numpy as np
def is_floating(prob):
return prob.model.options["modeling_options"]["flags"]["floating"]
def get_tower_diameter(prob):
if is_floating(prob):
return prob["floatingse.tower.outer_diameter"]
else:
return prob["towerse.tower_outer_diameter"]
def get_tower_thickness(prob):
if is_floating(prob):
return prob["floatingse.tower.wall_thickness"]
else:
return prob["towerse.tower_wall_thickness"]
def get_zpts(prob):
if is_floating(prob):
return prob["floatingse.tower.z_param"]
else:
return prob["towerse.z_param"]
def get_section_height(prob):
return np.diff(get_zpts(prob))
def get_transition_height(prob):
if is_floating(prob):
return prob["floating.transition_node"][-1]
else:
return prob["towerse.transition_piece_height"]
def get_tower_E(prob):
if is_floating(prob):
return prob["floatingse.tower.E"]
else:
return prob["towerse.E"]
def get_tower_G(prob):
if is_floating(prob):
return prob["floatingse.tower.G"]
else:
return prob["towerse.G"]
def get_tower_rho(prob):
if is_floating(prob):
return prob["floatingse.tower.rho"]
else:
return prob["towerse.rho"]
def get_tower_mass(prob):
if is_floating(prob):
return prob["floatingse.tower_mass"]
else:
return prob["towerse.tower_mass"]
def get_tower_cost(prob):
if is_floating(prob):
return prob["floatingse.tower_cost"]
else:
return prob["towerse.tower_cost"]
def get_structural_mass(prob):
if is_floating(prob):
return prob["floatingse.tower.structural_mass"]
else:
return prob["towerse.structural_mass"]
def get_tower_freqs(prob):
if is_floating(prob):
return prob["floatingse.tower_freqs"]
else:
return prob["towerse.tower.structural_frequencies"]
def get_tower_cm(prob):
if is_floating(prob):
return prob["floatingse.tower_center_of_mass"]
else:
return prob["towerse.tower_center_of_mass"]
def get_tower_cg(prob):
return get_tower_cm(prob)
| [
"garrett.barter@nrel.gov"
] | garrett.barter@nrel.gov |
ad39824936072d2fefcba287cf9c9844019ccebc | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2953/60761/236880.py | 85dc0205c02d4042309bbbf445e67c9c0b168125 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | def mintimes(a,b):
if(a==1):
return b-1
elif(a==0):
return 10000000
elif(a==b):
return 10000000
else:
return mintimes(b%a,a)+int(b/a)
n=int(input(""))
if(n==1):
print(0,end="")
else:
result=n-1
i=2
while(i<=n/2):
c=mintimes(n%i,i)+int(n/i)
result=min(c,result)
i=i+1
print(result,end="")
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
2369afd1881e2bf778f5cef9c86da552f9890dec | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_167/ch45_2019_06_03_00_38_26_791511.py | 8a1d9f502e38e2832869ccccc55a25ef63d0e533 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | def zera_negativos(lista):
i=0
while i > len (lista):
i+=1
if lista[i]<0:
lista[i]=0
return lista
| [
"you@example.com"
] | you@example.com |
fa524d0167704fd519a3051b4b1daa397349d730 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_arthritics.py | 9b5ba637596aad5009e67b96be5c854ee2065ff9 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py |
#calss header
class _ARTHRITICS():
def __init__(self,):
self.name = "ARTHRITICS"
self.definitions = arthritic
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['arthritic']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
1a8616a27762f3165dd94949cf15cee25aa6c367 | 349daa7f480c7e61e16e5c2fef53d423b77d86fe | /receiver_line.py | c40156a26a46cc43378492660c35027475be919a | [] | no_license | samhaug/SP_setup | 5e68d81eae5abf4a0b47fe4dadd2730eed855921 | 63be3033abc61fb3e8f260b504857bc8c6ef566c | refs/heads/master | 2021-01-22T18:46:14.244231 | 2017-09-04T23:52:25 | 2017-09-04T23:52:25 | 102,411,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,369 | py | #!/home/samhaug/anaconda2/bin/python
'''
==============================================================================
File Name : receiver_line.py
Purpose : make line of stations from some source at some azimuth
Creation Date : 20-07-2017
Last Modified : Thu 20 Jul 2017 01:05:04 PM EDT
Created By : Samuel M. Haugland
==============================================================================
'''
import numpy as np
from matplotlib import pyplot as plt
from subprocess import call
from os import listdir
import h5py
import obspy
import seispy
import geopy
from sys import argv
def main():
st = obspy.read('/home/samhaug/work1/SP_sims/PREM_5s/st_Z.pk')
stout = obspy.core.stream.Stream()
start = geopy.Point(st[0].stats.sac['evla'],st[0].stats.sac['evlo'])
for idx,ii in enumerate(range(180)):
tr = obspy.core.trace.Trace()
tr.stats.sac = {}
d = geopy.distance.VincentyDistance(kilometers=111.195*ii)
e = d.destination(point=start,bearing=45)
tr.stats.sac['evla'] = st[0].stats.sac['evla']
tr.stats.sac['evlo'] = st[0].stats.sac['evlo']
tr.stats.sac['stla'] = e.latitude
tr.stats.sac['stlo'] = e.longitude
tr.stats.station = 'FUCK'
tr.stats.network = 'II'
stout.append(tr)
seispy.mapplot.plot(stout)
seispy.convert.gemini_stations(stout)
main()
| [
"samhaug@umich.edu"
] | samhaug@umich.edu |
d97ce608036c61b0d7f5ff7468932f6c8707e8c7 | 4382d1d3775cf8cfd33cf66c534203f2859d45d4 | /oolearning/evaluators/RegressionEvaluator.py | 4af5fd81469487c57019e20189f0049ca0a20b75 | [
"MIT"
] | permissive | shane-kercheval/oo-learning | 2d5b43a7c515df65b0d903f7b30c74286e609030 | 4cc5f574be258925b2b30fcd90f994b356b97c88 | refs/heads/master | 2022-07-21T22:46:11.640491 | 2022-06-15T02:52:24 | 2022-06-15T02:52:24 | 119,191,623 | 1 | 0 | MIT | 2022-04-06T19:26:33 | 2018-01-27T18:37:48 | Python | UTF-8 | Python | false | false | 5,008 | py | import numpy as np
from matplotlib import pyplot as plt
from oolearning.evaluators.EvaluatorBase import EvaluatorBase
from oolearning.evaluators.MaeScore import MaeScore
from oolearning.evaluators.MseScore import MseScore
from oolearning.evaluators.RSquaredScore import RSquaredScore
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=Warning)
from statsmodels import api as sm # https://github.com/statsmodels/statsmodels/issues/3814
# noinspection SpellCheckingInspection
class RegressionEvaluator(EvaluatorBase):
"""
Evaluates models for regresion (i.e. numeric outcome) problems.
"""
def __init__(self):
self._actual_values = None
self._predicted_values = None
self._residuals = None
self._standard_deviation = None
self._mean_squared_error = None
self._mean_absolute_error = None
self._r_squared = None
def evaluate(self, actual_values: np.ndarray, predicted_values: np.ndarray):
assert len(actual_values) == len(predicted_values)
self._actual_values = actual_values
self._predicted_values = predicted_values
self._residuals = actual_values - predicted_values
self._standard_deviation = np.std(actual_values)
self._mean_squared_error = MseScore().calculate(actual_values=actual_values,
predicted_values=predicted_values)
self._mean_absolute_error = MaeScore().calculate(actual_values=actual_values,
predicted_values=predicted_values)
self._r_squared = RSquaredScore().calculate(actual_values=actual_values,
predicted_values=predicted_values)
return self
@property
def mean_absolute_error(self) -> float:
return self._mean_absolute_error
@property
def mean_squared_error(self) -> float:
return self._mean_squared_error
@property
def root_mean_squared_error(self) -> float:
return np.sqrt(self.mean_squared_error)
@property
def rmse_to_st_dev(self) -> float:
return self.root_mean_squared_error / self._standard_deviation
@property
def r_squared(self) -> float:
return self._r_squared
@property
def total_observations(self):
return len(self._actual_values)
@property
def all_quality_metrics(self) -> dict:
return {'Mean Absolute Error (MAE)': self.mean_absolute_error,
'Mean Squared Error (MSE)': self.mean_squared_error,
'Root Mean Squared Error (RMSE)': self.root_mean_squared_error,
'RMSE to Standard Deviation of Target': self.rmse_to_st_dev,
'R Squared': self.r_squared,
'Total Observations': self.total_observations}
def plot_residuals_vs_fits(self):
lowess = sm.nonparametric.lowess
loess_points = lowess(self._residuals, self._predicted_values)
loess_x, loess_y = zip(*loess_points)
plt.plot(loess_x, loess_y, color='r')
plt.scatter(x=self._predicted_values, y=self._residuals, s=8, alpha=0.5)
plt.title('Residuals vs. Fitted Values')
plt.xlabel('Fitted Values')
plt.ylabel('Residuals (Actual - Predicted)')
return plt.gca()
def plot_predictions_vs_actuals(self):
lowess = sm.nonparametric.lowess
loess_points = lowess(self._predicted_values, self._actual_values)
loess_x, loess_y = zip(*loess_points)
plt.plot(loess_x, loess_y, color='r', alpha=0.5, label='Loess (Predictions vs Actuals)')
plt.plot(self._actual_values, self._actual_values, color='b', alpha=0.5, label='Perfect Prediction')
plt.scatter(x=self._actual_values, y=self._predicted_values, s=8, alpha=0.5)
plt.title('Predicted Values vs. Actual Values')
plt.xlabel('Actuals')
plt.ylabel('Predicted')
ax = plt.gca()
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
plt.figtext(0.99, 0.01,
'Note: observations above blue line mean model is over-predicting; below means under-predicting.', # noqa
horizontalalignment='right')
return ax
def plot_residuals_vs_actuals(self):
lowess = sm.nonparametric.lowess
loess_points = lowess(self._residuals, self._actual_values)
loess_x, loess_y = zip(*loess_points)
plt.plot(loess_x, loess_y, color='r')
plt.scatter(x=self._actual_values, y=self._residuals, s=8, alpha=0.5)
plt.title('Residuals vs. Actual Values')
plt.xlabel('Actual')
plt.ylabel('Residuals (Actual - Predicted)')
plt.figtext(0.99, 0.01,
'Note: Actual > Predicted => Under-predicting (positive residual); negative residuals mean over-predicting', # noqa
horizontalalignment='right')
| [
"shane.kercheval@gmail.com"
] | shane.kercheval@gmail.com |
9988b94d526a323b486685de8ce175e1100badc7 | 61d499546f75e851d8b55293fe52754d01b66502 | /tcpproxy.py | 64e649d646ad3cd11408ae3fa20b753ce57e0040 | [] | no_license | sqrt-9/python-black | 0f6fd41db14d15ba0cb3438fd49066001ea85c8b | 82f0635e113b11bf2dffb1d85679018bf990e63b | refs/heads/master | 2021-01-18T00:15:50.185617 | 2016-09-20T12:12:11 | 2016-09-20T12:12:11 | 68,708,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,352 | py | import sys
import socket
import threading
def server_loop(local_host,local_port,remote_host,remote_port,receive_first):
server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
try:
server.bind((local_host,local_port))
except:
print 'failed on %S:%d'%(local_host,local_port)
print 'check for other'
sys.exit(0)
print 'listen on %s:%d'%(local_host,local_port)
server.listen(5)
while True:
client_socket,addr = server.accept()
print 'received from%s:%d'%(addr[0],addr[1])
proxy_thread = threading.Thread(target = proxy_handler,args = (client_socket,remote_port,receive_first))
proxy_thread.start()
def proxy_handler(client_socket,remote_host,remote_port,receive_first):
remote_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
remote_socket.connect((remote_host,remote_port))
if receive_first:
remote_buffer = receive_from(remote_socket)
hexdump(remote_buffer)
remote_buffer = response_handler(remote_buffer)
if len(remote_buffer):
print 'send %d bytes to localhost'%len(remote_buffer)
client_socket.send(remote_buffer)
while True:
local_buffer = receive_from(client_socket)
if len(local_buffer):
print 'received %d bytes from localhost'%len(local_buffer)
hexdump(local_buffer)
local_buffer = request_handler(local_buffer)
remote_socket.send(local_buffer)
print 'sent to remote'
remote_buffer = receive_from(remote_sockeet)
if len(remote_buffer):
print 'received %d bytes from remote'%len(remote_buffer)
hexdump(remote_buffer):
remote_buffer = response_handler(remote_buffer)
client_soket.send(remote_buffer)
print 'sent to localhost'
if not len(local_buffer) or not len(remote_buffer):
client_socket.close()
remote.socket.close()
print 'no more date. Closing connections.'
break
def hexdump(src,length = 16):
result = []
digits = 4 if isinstance(src,unicode) else 2
for i in xrange(0,len(src,),length):
s = src[i:i+length]
hexa = b' '.join
def main():
if len(sys.argv[1:]) != s:
print 'example:127.0.0.1 9000 10.12.131.1 9000 True'
sys.exit(0)
local_host = sys.argv[1]
local_port = int(sys.argv[2])
remote_host = sys.argv[3]
remote_port = int(sys.argv[4])
receive_first = sys.argv[5]
if 'True' in receive_first:
receive_first = True
else:
receive_first = Flase
server_loop(local_host,local_port,remote_host,remote_port,receive_first)
main()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
f071adc4282e6c3cf2b85418e354abb6b8fdc0d4 | 4c8c0f857500b5f4b572f139602e46a6c813f6e3 | /Polymorhphism_and_Magic_methods_exercises/Animal/animals/birds.py | 4597d2754bc68c1d5ff9190aec3e1a14945a7ebd | [] | no_license | svetoslavastoyanova/Python_OOP | 3d21fb0480c088ecad11211c2d9a01139cde031f | 518f73ecc8a39e7085d4b8bf5657a1556da3dcfa | refs/heads/main | 2023-08-04T19:46:58.906739 | 2021-09-18T07:46:02 | 2021-09-18T07:46:02 | 352,304,158 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | from Animal.animals.animal import Bird
from Animal.food import Meat, Vegetable, Fruit, Food
class Owl(Bird):
def feed(self, food):
if not isinstance(food, Meat):
return f"{self.__class__.__name__} does not eat {food.__class__.__name__}!"
self.weight += 0.25*food.quantity
self.food_eaten += food.quantity
def make_sound(self):
return f"Hoot Hoot"
class Hen(Bird):
def feed(self, food):
self.weight += 0.35 * food.quantity
self.food_eaten += food.quantity
def make_sound(self):
return f"Cluck"
| [
"svetoslava_stoyanova92@abv.bg"
] | svetoslava_stoyanova92@abv.bg |
c6e7240c8e1647d157d1af0e10c600e086b34e15 | e87524319ee0e610c451f1777a5a90883f508fe3 | /top/clearlight/base/runoob/print/percent_oper.py | 1823597198c3561b7a515bdedceca880ca3144ad | [
"Apache-2.0"
] | permissive | ClearlightY/Python_learn | 3a805bbe8883d58d0e60bd73f985b6093b983248 | 93b9b7efae5a1cf05faf8ee7c5e36dcc99c7a232 | refs/heads/master | 2020-12-11T06:58:17.984184 | 2020-07-30T18:10:20 | 2020-07-30T18:10:20 | 233,794,474 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 961 | py | '''
第一个 % 后面的内容为显示的格式说明,6 为显示宽度,3 为小数点位数,f 为浮点数类型
第二个 % 后面为显示的内容来源,输出结果右对齐,2.300 长度为 5,故前面有一空格
'''
print("%6.3f" % 2.3)
# 2.300
'''
x 为表示 16 进制,显示宽度为 10,前面有 8 个空格。
'''
print("%+10x" % 10)
# +a
'''
%s 字符串 (采用str()的显示)
%r 字符串 (采用repr()的显示)
%c 单个字符
%b 二进制整数
%d 十进制整数
%i 十进制整数
%o 八进制整数
%x 十六进制整数
%e 指数 (基底写为e)
%E 指数 (基底写为E)
%f 浮点数
%F 浮点数,与上相同%g 指数(e)或浮点数 (根据显示长度)
%G 指数(E)或浮点数 (根据显示长度)
%% 字符"%"
'''
print("%-5x" % -10)
# -a
pi = 3.1415
print("pi的值是%s" % pi)
print("pi的值是%.8f" % pi)
# pi的值是3.1415
# pi的值是3.14150000
| [
"lxy12531@163.com"
] | lxy12531@163.com |
a33e7ff9a796b765615d6964bf5bf0d3690bd040 | d4d2ed267f63d9b48c74135a124bd2534211145f | /simpleblog/urls.py | fcfb8855430b032a5dba05877c6d41c7836f5f52 | [] | no_license | MerleLiuKun/EasyCase | 0a7a8411f7095f68e4ecd3be6deb7e99808000dd | fde2a88b3e2f31949d3f7cbe9a44704be44974f6 | refs/heads/master | 2021-09-05T00:20:34.622096 | 2017-11-02T03:52:28 | 2017-11-02T03:52:28 | 108,724,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | from django.conf.urls import url
from rest_framework.routers import DefaultRouter
from . import views
router = DefaultRouter()
router.register(
r'tags',
views.TagViewSet,
)
router.register(
r'categorys',
views.CategoryViewSet,
)
router.register(
r'posts',
views.PostViewSet,
)
router.register(
r'users',
views.UserViewSet,
)
urlpatterns = router.urls
| [
"merle.liukun@gmail.com"
] | merle.liukun@gmail.com |
f4fa8fe37323df0691cdd4b41e1b498002191f43 | 87cacb90676e5e7d1d8f0e643f1ad6ed9e35acbf | /need to clean/codes/split_train.py | b8cdd4d50e9135672a21ad3fba518dbe29845926 | [] | no_license | vuhoangminh/Kaggle-TalkingData-AdTracking-Fraud-Detection-Challenge | 3b75d4a7c60574a4875c62e8843a01d945d792d3 | 56045f446f1a0c538d91ac65e536edc4b7b5a417 | refs/heads/master | 2020-03-13T12:56:42.309722 | 2018-05-08T10:50:35 | 2018-05-08T10:50:35 | 131,129,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,561 | py | """
Adding improvements inspired from:
Ravi Teja's fe script: https://www.kaggle.com/rteja1113/lightgbm-with-count-features?scriptVersionId=2815638
"""
import pandas as pd
import time
import numpy as np
from sklearn.cross_validation import train_test_split
import lightgbm as lgb
import gc
import pickle
path = 'E:/kaggle/input/'
dtypes = {
'ip' : 'uint32',
'app' : 'uint16',
'device' : 'uint16',
'os' : 'uint16',
'channel' : 'uint16',
'is_attributed' : 'uint8',
'click_id' : 'uint32'
}
TRAINSAMPLE = 180000000
NROWS = 90000000
# NROWS = 300
num_split = int(TRAINSAMPLE/NROWS)
print (num_split)
def load_write(iSplit):
skip_rows = iSplit*NROWS
print('loading train data...')
if iSplit>0:
train_df = pd.read_csv(path+"train.csv", skiprows=range(1,skip_rows), nrows=NROWS, dtype=dtypes, usecols=['ip','app','device','os', 'channel', 'click_time', 'is_attributed'])
else:
train_df = pd.read_csv(path+"train.csv", nrows=NROWS, dtype=dtypes, usecols=['ip','app','device','os', 'channel', 'click_time', 'is_attributed'])
gc.collect()
print('Extracting new features...')
train_df['sec'] = pd.to_datetime(train_df.click_time).dt.second.astype('uint8')
train_df['min'] = pd.to_datetime(train_df.click_time).dt.minute.astype('uint8')
train_df['hour'] = pd.to_datetime(train_df.click_time).dt.hour.astype('uint8')
train_df['day'] = pd.to_datetime(train_df.click_time).dt.day.astype('uint8')
train_df['wday'] = pd.to_datetime(train_df.click_time).dt.dayofweek.astype('uint8')
print(train_df.head())
gc.collect()
print('grouping by ip-day-hour combination...')
gp = train_df[['ip','day','hour','channel']].groupby(by=['ip','day','hour'])[['channel']].count().reset_index().rename(index=str, columns={'channel': 'qty'})
train_df = train_df.merge(gp, on=['ip','day','hour'], how='left')
print(train_df.head())
del gp
gc.collect()
print('group by ip-app combination...')
gp = train_df[['ip', 'app', 'channel']].groupby(by=['ip', 'app'])[['channel']].count().reset_index().rename(index=str, columns={'channel': 'ip_app_count'})
train_df = train_df.merge(gp, on=['ip','app'], how='left')
print(train_df.head())
del gp
gc.collect()
print('group by ip-app-os combination...')
gp = train_df[['ip','app', 'os', 'channel']].groupby(by=['ip', 'app', 'os'])[['channel']].count().reset_index().rename(index=str, columns={'channel': 'ip_app_os_count'})
print("merging...")
train_df = train_df.merge(gp, on=['ip','app', 'os'], how='left')
print(train_df.head())
del gp
gc.collect()
print("vars and data type: ")
train_df.info()
train_df['qty'] = train_df['qty'].astype('uint16')
train_df['ip_app_count'] = train_df['ip_app_count'].astype('uint16')
train_df['ip_app_os_count'] = train_df['ip_app_os_count'].astype('uint16')
print(train_df.head())
print("after splitted: ")
print(train_df.head())
train_df = train_df.drop(['ip', 'click_time', 'sec'], axis=1)
print(train_df.head())
print("train size: ", len(train_df))
save_name = 'train_' + str(iSplit)
print("save to: ", save_name)
train_df.to_pickle(save_name)
del train_df
gc.collect()
for iSplit in range(num_split):
# for iSplit in range(5):
print('Processing split', iSplit+1)
skip_rows = iSplit*NROWS
print (skip_rows)
load_write(iSplit)
| [
"minhmanutd@gmail.com"
] | minhmanutd@gmail.com |
af6563a14947d2c2eee15062e80ebef5c653c552 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_skimmed.py | 85b96e7c40e8352d9c6ec45728f4ef38b80df091 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py |
from xai.brain.wordbase.verbs._skim import _SKIM
#calss header
class _SKIMMED(_SKIM, ):
def __init__(self,):
_SKIM.__init__(self)
self.name = "SKIMMED"
self.specie = 'verbs'
self.basic = "skim"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
304b8cd13235847181617b46fe7f83d8ae6cc83e | 8019624601ea9d3aa5c98ee8aa47c74a20805ca1 | /twnews/tests/test_chinatimes.py | 3abd2e6b9a57b9d4c0407bb29326521861b11483 | [
"MIT"
] | permissive | sevaroy/twnews | 7825b7717386d61bfe2f96439ad57a7f2b99ff9c | 92d468f36a1fd2031bc5fa315ad02d317eebcf1a | refs/heads/master | 2020-04-04T18:03:46.064045 | 2018-11-02T03:31:04 | 2018-11-02T03:31:04 | 156,148,250 | 1 | 0 | MIT | 2018-11-05T02:17:29 | 2018-11-05T02:17:29 | null | UTF-8 | Python | false | false | 2,292 | py | """
中時電子報單元測試
"""
import unittest
from twnews.soup import NewsSoup, pkgdir
#@unittest.skip
class TestChinatimes(unittest.TestCase):
def setUp(self):
self.url = 'https://www.chinatimes.com/realtimenews/20180916001767-260402'
self.dtf = '%Y-%m-%d %H:%M:%S'
def test_01_sample(self):
"""
測試本地樣本解構
* 如果測試 02 失敗,需要用 bin/getnews.sh 重新製作本地樣本
"""
nsoup = NewsSoup(pkgdir + '/samples/chinatimes.html.gz', mobile=False)
self.assertEqual('chinatimes', nsoup.channel)
self.assertIn('悲慟!北市士林年邁母子 住處上吊自殺身亡', nsoup.title())
self.assertEqual('2018-09-16 15:31:00', nsoup.date().strftime(self.dtf))
self.assertEqual('謝明俊', nsoup.author())
self.assertIn('北市士林區葫蘆街一處民宅', nsoup.contents())
def test_02_desktop(self):
"""
測試桌面版網頁解構
* 務必開啟強制更新,確保解構程式能跟進網站最新版本
* 實際新聞內容有可能更新,需要同步單元測試的預期值
"""
nsoup = NewsSoup(self.url, refresh=True, mobile=False)
self.assertEqual('chinatimes', nsoup.channel)
self.assertIn('悲慟!北市士林年邁母子 住處上吊自殺身亡', nsoup.title())
self.assertEqual('2018-09-16 15:31:00', nsoup.date().strftime(self.dtf))
self.assertEqual('謝明俊', nsoup.author())
self.assertIn('北市士林區葫蘆街一處民宅', nsoup.contents())
def test_03_mobile(self):
"""
測試行動版網頁解構
* 務必開啟強制更新,確保解構程式能跟進網站最新版本
* 實際新聞內容有可能更新,需要同步單元測試的預期值
"""
nsoup = NewsSoup(self.url, refresh=True, mobile=True)
self.assertEqual('chinatimes', nsoup.channel)
self.assertIn('悲慟!北市士林年邁母子 住處上吊自殺身亡', nsoup.title())
self.assertEqual('2018-09-16 15:31:00', nsoup.date().strftime(self.dtf))
self.assertEqual('謝明俊', nsoup.author())
self.assertIn('北市士林區葫蘆街一處民宅', nsoup.contents())
| [
"virus.warnning@gmail.com"
] | virus.warnning@gmail.com |
ab041452c1c435c0d19fabc01ece24d0ee8c3b5f | d8574d8ca5c571fd7ed3b67dac00c55df2ac6580 | /data/kitchen/python/kitchen/evaluatorGui.py | 312321bdd631c125ebc01f300b4833c3d8811df8 | [] | no_license | h2r/slu_core | 19de87e55e84dd4dada65b4d7b65857bdae740e8 | e30c0245177e8481397fd4987422e3d35d79ec08 | refs/heads/master | 2021-07-09T18:19:33.424551 | 2020-07-06T18:49:18 | 2020-07-06T18:49:18 | 143,929,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,331 | py | from kitchen import kitchenState, recipeManager, annotatedRecipe, planningLanguage
import pickle_util
class Evaluator():
def __init__(self):
self.score = 0
self.successCount = 0
self.totalCount = 0
self.noopSuccessCount = 0
def evaluateInstructions(self, targetRecipe):
#Can a single instruction be interpreted as multiple instructions? Does it even matter?
model_fname = "kitchenModel_1.5.pck"
rm = recipeManager.RecipeManager(model_fname)
pl = planningLanguage.PlanningLanguage()
tc = 0
sc = 0
nsc = 0
#A list of (planningLanguage, is_correct) tuples. Used mainly for GUI
completePath = []
print "Evaluating (instruction-level): " + targetRecipe.name
for i in range(len(targetRecipe.instructions)):
self.totalCount += 1
tc += 1
instruction = targetRecipe.instructions[i]
#print "instruction", instruction
initialState = targetRecipe.idx_to_start_state(i)
instructionInferredPlan = rm.find_plan(instruction[0], initialState)
desiredPlan = pl.compileAnnotation(instruction[1], initialState)
desiredEndState = desiredPlan[-1][1]
if len(instructionInferredPlan) == 0:
#print "Zero length instruction for:", instruction
if len(desiredPlan) == 1:
if desiredPlan[-1][0].name == "noop":
self.noopSuccessCount += 1
nsc += 1
completePath.append(("| noop()", True))
else:
completePath.append(("None", False))
else:
completePath.append(("None", False))
else:
#print "inferred plan", instructionInferredPlan
actualEndState = instructionInferredPlan[-1][1][-1][1]
#print "actualEndState", actualEndState
#plInferredPath = planningLanguage.decompile(instructionInferredPlan[-1][1])
plInferredPath = ""
for i in instructionInferredPlan:
plInferredPath = plInferredPath + " | " + planningLanguage.decompile(i[1])
if desiredEndState == actualEndState:
self.successCount += 1
sc += 1
print instructionInferredPlan
completePath.append((plInferredPath, True))
else:
completePath.append((plInferredPath, False))
print "State is not the same for instruction", instruction
print "Inferred path was: ", planningLanguage.decompile(instructionInferredPlan[0][1])
## print "Desired mixing bowl:", desiredEndState.mixing_bowl
## print "Actual mixing bowl:", actualEndState.mixing_bowl
print "\n"
print "\n\nResults for the instruction-level evaluation of :", targetRecipe.name
print "Total Instructions:", tc, "\nSuccess:", sc
print "Noop Success:", nsc
print "Failures:", tc - (sc+nsc), "\n\n"
return completePath
def evaluateEndToEnd(self, targetRecipe, useBeam=True):
#A list of (planningLanguage, is_correct) tuples. Used mainly for GUI
completePath = []
self.totalCount += 1
model_fname = "kitchenModel_1.5.pck"
training_set = pickle_util.load("training.pck")
rm = recipeManager.RecipeManager(model_fname)
pl = planningLanguage.PlanningLanguage()
print "\nEvaluating (end-to-end):", targetRecipe.name
recipeText = targetRecipe.instruction_text
initialState = targetRecipe.start_state
if useBeam:
inferredPlan = rm.find_beam_plan(recipeText, initialState)
else:
inferredPlan = rm.find_plan(recipeText, initialState)
print "\ninferred", inferredPlan
actualEndState = inferredPlan[-1][1][-1][1]
print "\ndesired states", targetRecipe.states
desiredEndState = targetRecipe.states[-1][-1][1]
plInferredPath = ""
for i in inferredPlan:
plInferredPath = plInferredPath + " | " + planningLanguage.decompile(i[1])
print "\nPL inferred:", plInferredPath
plActual = ""
for i in targetRecipe.instructions:
plActual = plActual + " | " + i[1]
print "\nPL Desired:", plActual, "\n"
#print desiredEndState
#print "end state", actualEndState
if desiredEndState == actualEndState:
self.successCount += 1
print "\n\nResults for the End-to-End evaluation for :", targetRecipe.name
print "Success"
else:
print "\nResults for the End-to-End evaluation for :", targetRecipe.name
print "Failure"
return 0
def runInstructionEvaluation(runTestSet=True):
training_set = pickle_util.load("training.pck")
totalRecipes = 0
arc = annotatedRecipe.Corpus(training_set=training_set)
recipeCorpus = arc.recipes
ev = Evaluator()
for i in recipeCorpus:
if i.is_training_set == runTestSet:
continue
totalRecipes += 1
ev.evaluateInstructions(i)
print "\n\nOverall results for the entire instruction-level evaluation."
print "Total Recipes:", totalRecipes
print "Total Instructions:", ev.totalCount, "\nSuccess:", ev.successCount
print "Noop Success:", ev.noopSuccessCount
print "Failures:", ev.totalCount - (ev.successCount+ev.noopSuccessCount)
right = ev.successCount + ev.noopSuccessCount
print "%.3f%% (%d/%d)" % (float(right)/ev.totalCount * 100,
right, ev.totalCount)
print "\n\n"
return (ev.totalCount, ev.successCount, ev.noopSuccessCount)
def runEndToEndEvaluation(runTestSet=True):
training_set = pickle_util.load("training.pck")
totalRecipes = 0
arc = annotatedRecipe.Corpus(training_set=training_set)
recipeCorpus = arc.recipes
ev = Evaluator()
for i in recipeCorpus:
if i.is_training_set == runTestSet:
continue
totalRecipes += 1
result = ev.evaluateEndToEnd(i)
if result != 0:
print "Failure 1"
return 1
print "\n\nOverall results for the entire end-to-end evaluation."
print "Total Recipes:", totalRecipes
print "Success:", ev.successCount
print "Failures:", ev.totalCount - ev.successCount
right = ev.successCount + ev.noopSuccessCount
print "%.3f%% (%d/%d)" % (float(right)/ev.totalCount * 100,
right, ev.totalCount)
print "\n\n"
return (totalRecipes, ev.successCount)
def main(argv):
eArg = 0
print "arg", argv
for i in argv:
if "--evaluate=" in i:
j = i.replace("--evaluate=", "")
eArg = int(j)
print eArg
if eArg == 1:
runInstructionEvaluation()
elif eArg == 2:
runEndToEndEvaluation()
else:
print "Error with the args"
if __name__=="__main__":
import sys
main(sys.argv)
| [
"stefie10@alum.mit.edu"
] | stefie10@alum.mit.edu |
944b91037e97b649e60f3396991edf79261d738d | ebfcae1c5ba2997b2ac4471d5bedc3f5daffcb31 | /incepiton-mysql-master/app/auth/form.py | e8c9b27bf30f223f224929065f1813ff4c42f599 | [
"MIT"
] | permissive | babiato/flaskapp1 | 84de2d0b26a54f5820d3bbe97926782ad41e005c | 530beb9e3b8516e0e93960b99521c23a523ef546 | refs/heads/master | 2023-02-26T16:36:49.760632 | 2021-02-04T09:08:40 | 2021-02-04T09:08:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | from ..models import User
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField
from wtforms.validators import DataRequired, Email, ValidationError
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember me', default=False)
class RegisterForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
def validate_username(self, field):
if User.query.filter(User.name == field.data).first():
raise ValidationError('Username already in use')
| [
"jinxufang@tencent.com"
] | jinxufang@tencent.com |
503a571f039f0c34b2f4e839996f9bb23f5c1d9c | ba66da3901361854b9bb621586f1e49ad0121ee0 | /正式开班/pymysql/mysql_1.py | acf2710c566de36c2631fe7b39728a471f8d3378 | [] | no_license | luobodage/PythonBasis | c4739920055afbda03774d90151ab183a83583f8 | ea65536e759fec221a70d7647ae86120277d5459 | refs/heads/master | 2023-05-14T15:51:56.213282 | 2021-05-31T00:57:56 | 2021-05-31T00:57:56 | 322,145,745 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,697 | py | # -*- coding: UTF-8 -*-
import pymysql
import pandas as pd
# author: luoboovo
# contact: fuyu16032001@gmail.com
# datetime: 2021/1/29 10:46
# software: PyCharm
# = = =
# = = =
# = = =
# ===========
# = 萝 =
# = 卜 =
# = 神 =
# = 保 =
# = 佑 =
# = 永 =
# = 无 =
# = bug =
# = =
# = =
# =
data_before = 'housing.xls'
def dataCleaning():
"""
数据清洗
:return: 新的csv文件
"""
df = pd.read_excel(data_before)
print(df.isnull().sum())
for column in list(df.columns[df.isnull().sum() > 0]):
mean_val = df[column].mean()
df[column].fillna(mean_val, inplace=True)
print(df.isnull().sum())
df.to_csv('housing.csv')
def login_mysql(user, password):
"""
mysql登录
:param user: 用户名
:param password: 密码
:return: 返回一个游标
"""
conn = pymysql.connect(
host='localhost',
port=3306,
charset='utf8',
cursorclass=pymysql.cursors.DictCursor, # 指定类型
user=user,
password=password, )
cursor = conn.cursor()
cursor.execute('CREATE database house')
cursor.execute('use house')
return cursor, conn
def reboot_mysql(user, password):
"""
mysql登录
:param user: 用户名
:param password: 密码
:return: 返回一个游标
"""
conn = pymysql.connect(
host='localhost',
port=3306,
charset='utf8',
cursorclass=pymysql.cursors.DictCursor, # 指定类型
user=user,
password=password, )
cursor = conn.cursor()
cursor.execute('use house')
return cursor, conn
def create_database():
"""
创建数据库
:return:
"""
cursor, conn = reboot_mysql('root', '1334')
try:
cursor.execute("""
CREATE table housing(ID INT PRIMARY KEY,
longitude float not null ,
latitude float not null ,
housing_median_age int not null ,
total_rooms int not null ,
total_bedrooms float not null ,
population int not null ,
households int not null ,
median_income float not null,
median_house_value varchar(10) not null )
""")
except:
print('创建失败')
cursor.close()
conn.close()
def readAndWriteData():
cursor, conn = reboot_mysql('root', '1334')
with open('housing.csv', 'r') as csv:
data = csv.read()
print(data.split('\n'))
a = data.split('\n')
print(type(data))
for i in range(len(a) - 1):
# print(i.split())
print(a[i])
b = a[i].split(',')
print(b)
print(b[0])
sql = f'INSERT into house.housing VALUES({b[0]},{b[1]},{b[2]},{b[3]},{b[4]},{b[5]},{b[6]},{b[7]},{b[8]},{b[9]}) '
cursor.execute(sql)
# for i in data:
# a = i.split(',')
# print(a)
# # print(len(a))
# print(type(a[10]))
# sql = f'INSERT into house.housing VALUES({str(a[0])},{str(a[1])},{str(a[2])},{str(a[3])},{str(a[4])},{str(a[5])},{a[6]},{a[7]},{str(a[8])},{str(a[9])},{str(a[10])}) '
# sql = f'INSERT into house.housing VALUES({a}) '
# cursor.execute(sql)
conn.commit()
cursor.close()
conn.close()
if __name__ == '__main__':
# dataCleaning()
# login_mysql('root', '1334')
readAndWriteData()
# create_database()
| [
"fuyu16032001@gmail.com"
] | fuyu16032001@gmail.com |
83be4867c7f7e8d53df50de5d7ca29ce8d8ccf62 | fdbabbd10c03d20d7b35c00eab682af2d4be6601 | /main.py | 8614a6a798657badf6eb003e3771b0d45bfd5711 | [] | no_license | allenabraham777/cvsm_logic | 44777e8066143d790767029e954b9b52251c9cce | 148e53dcd190d649c8f261e32ad2d5fd18a261c0 | refs/heads/master | 2022-11-14T19:10:27.309242 | 2020-07-06T13:21:46 | 2020-07-06T13:21:46 | 277,547,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,180 | py | informations = []
suppliers = []
activities = []
# Information Flow
print("Enter the information flow (Type 'end' to Stop) : ")
i = 0
while(True):
print("Enter the component "+str(i + 1)+" detail")
information = input()
if information == "end":
break
informations.append(information)
i = i + 1
print(informations)
# Supplier
print("Enter the Supplier Details (Type 'end' to Stop) : ")
i = 0
while(True):
print("Enter the supplier "+str(i + 1)+" details")
supplier_name = input("Name : ")
if supplier_name == "end":
break
supplier_interval = input("Intervel : ")
supplier = {"Name":supplier_name, "Intervel":supplier_interval}
suppliers.append(supplier)
i = i + 1
print(suppliers)
# Process Flow
print("Enter the activity flow (Type 'end' to Stop) : ")
i = 0
while(True):
act_code = input("Enter activity "+ str(i + 1) +" code (end_act to stop)")
if(act_code == "end_act"):
break
flag = True
activity_status = True
sub_activities = []
while(True):
status = int(input("Enter 1 if the process is a waiting process, 10 to exit else 0"))
if status == 10:
break
if status == 1:
activity_status = False
else:
activity_status = True
sub_act_name = input("Enter the name of activity : ")
lot_size = input("Enter the lot size : ")
cycle_time = input("Cycle Time : ")
if activity_status and flag:
flag = False
equipments = []
while(True):
equipment_name = input("Equipment Name (end_eqp to break): ")
if(equipment_name == "end_eqp"):
break
equipment_cost = input("Capital Cost : ")
equipment_maintenance_cost = input("Maintenence Cost : ")
equipment_total_usage = input("Total Usage per Year : ")
equipment = {
"name": equipment_name,
"cost": equipment_cost,
"maintenence": equipment_maintenance_cost,
"usage": equipment_total_usage
}
equipments.append(equipment)
supervisor_count = input("Number of supervisors : ")
operator_count = input("Number of operators : ")
operator_cost = input("Operator cost : ")
operator_time = input("Operation time : ")
material_cost = input("Material cost : ")
sub_activity = {
"type": "activity",
"name": sub_act_name,
"lot_size": lot_size,
"equipments": equipments,
"supervisor_count": supervisor_count,
"operator_count": operator_count,
"operator_cost": operator_cost,
"operator_time": operator_time,
"material_cost": material_cost,
"cycle_time": cycle_time
}
sub_activities.append(sub_activity)
else:
waiting_time = input("Waiting time before process")
sub_activity = {
"type": "waiting",
"name": sub_act_name,
"lot_size": lot_size,
"cycle_time": cycle_time,
"waiting_time": waiting_time
}
sub_activities.append(sub_activity)
rejection = input("Percentage rejection")
activity = {
"code": act_code,
"sub_activities": sub_activities,
"rejection": rejection
}
activities.append(activity)
print(activities) | [
"replituser@example.com"
] | replituser@example.com |
1dcc7298149c256a1f3ef9c06b425f0fdeef4e84 | 7669454a633042da5696f814768f523c8af18b7a | /chiasim/hashable/SpendBundle.py | ab165d05ba98ce2199aafe48f7affff9ef0d71a4 | [
"Apache-2.0"
] | permissive | Chia-Network/ledger_sim | 46422efccf5993b5d2de8f3b0e6e3fb635115980 | de53d4129e10d883b92988e21bd5e248a8f89813 | refs/heads/main | 2023-01-24T13:03:58.343814 | 2023-01-19T22:05:59 | 2023-01-19T22:05:59 | 203,893,213 | 4 | 9 | null | 2023-01-19T22:06:01 | 2019-08-23T00:20:07 | Python | UTF-8 | Python | false | false | 1,474 | py | from ..atoms import streamable
from .BLSSignature import BLSSignature
from .CoinSolution import CoinSolutionList
@streamable
class SpendBundle:
"""
This is a list of coins being spent along with their solution programs, and a single
aggregated signature. This is the object that most closely corresponds to a bitcoin
transaction (although because of non-interactive signature aggregation, the boundaries
between transactions are more flexible than in bitcoin).
"""
coin_solutions: CoinSolutionList
aggregated_signature: BLSSignature
@classmethod
def aggregate(cls, spend_bundles):
coin_solutions = []
sigs = []
for _ in spend_bundles:
coin_solutions += _.coin_solutions
sigs.append(_.aggregated_signature)
aggregated_signature = BLSSignature.aggregate(sigs)
return cls(coin_solutions, aggregated_signature)
def additions(self):
from chiasim.wallet.deltas import additions_for_solution
items = []
for coin_solution in self.coin_solutions._items:
items += additions_for_solution(coin_solution.coin.name(), coin_solution.solution)
return tuple(items)
def removals(self):
return tuple(_.coin for _ in self.coin_solutions)
def fees(self) -> int:
amount_in = sum(_.amount for _ in self.removals())
amount_out = sum(_.amount for _ in self.additions())
return amount_in - amount_out
| [
"him@richardkiss.com"
] | him@richardkiss.com |
b9de0d9977fdf52300400d43ae398ac0ca3bdd53 | a07fd8aca2d69ade2e388054dd2c1c9991232185 | /tests/test_tutorial/test_handling_errors/test_tutorial001.py | 8809c135bd706cbb18e1438335b1a2807035a261 | [
"MIT"
] | permissive | vitalik/fastapi | 76b71bbbade19f12484c73dcbdca426197cc2db6 | 0276f5fd3aafb38dcbb430177a4685aeb58e5c69 | refs/heads/master | 2023-08-01T06:56:06.053824 | 2023-07-25T20:46:02 | 2023-07-25T20:46:02 | 315,668,229 | 1 | 0 | MIT | 2020-11-24T15:07:16 | 2020-11-24T15:07:15 | null | UTF-8 | Python | false | false | 3,298 | py | from fastapi.testclient import TestClient
from docs_src.handling_errors.tutorial001 import app
client = TestClient(app)
def test_get_item():
response = client.get("/items/foo")
assert response.status_code == 200, response.text
assert response.json() == {"item": "The Foo Wrestlers"}
def test_get_item_not_found():
response = client.get("/items/bar")
assert response.status_code == 404, response.text
assert response.headers.get("x-error") is None
assert response.json() == {"detail": "Item not found"}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/{item_id}": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Read Item",
"operationId": "read_item_items__item_id__get",
"parameters": [
{
"required": True,
"schema": {"title": "Item Id", "type": "string"},
"name": "item_id",
"in": "path",
}
],
}
}
},
"components": {
"schemas": {
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
| [
"noreply@github.com"
] | vitalik.noreply@github.com |
07d9bd027d220d26f623bd4bfb006fa601fd7172 | 5e626d4138ef02efae149c41e00ea4d04b8239e8 | /chippedyScrapson.py | 891ab89172f238074c359b540e1258a1adfe6e5c | [] | no_license | ganti/chippedyScrapson | f1ecc57622ea2bbc8a5f7f467081523089fa36e2 | 4de30ff9ae8236ff1083668931cc9f000a34e0eb | refs/heads/master | 2020-07-19T02:38:51.137565 | 2019-09-04T16:30:19 | 2019-09-04T16:30:19 | 206,360,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,045 | py | #!/usr/bin/env python3
import os
import sys
from pprint import pprint
import csv
import json, codecs
import time
import random
import requests
from lxml import html
from scrapy import Selector
from bs4 import BeautifulSoup
allURLs = []
def main():
url = "https://www.tagesanzeiger.ch"
depth = 3
makeListTxtFromURL(url, depth=depth)
call = "python3 ./webscreenshot/webscreenshot.py -i list.txt -v --workers 7"
#
# webscreenshot has many params: screensize, browser etc.
# all arguments of webscreenshot.py are in ./webscreenshot/00_help_webscreenshot.txt
#
os.system(call)
def makeListTxtFromURL(url, depth=5):
global allURLs
domain = url.split("://")[0]+"://" + url.split("://")[1].split("/")[0]
with open('list.txt', 'w') as file:
file.write("")
with open('url_error.txt', 'w') as file:
file.write("")
allURLs = getExtractAllLinksFromPage(url, domain, depth)
#pprint(allURLs)
allURLs = [url] + allURLs
allURLs = list(set(allURLs))
with open('list.txt', 'w') as file:
for row in allURLs:
file.write(row +"\n")
def getExtractAllLinksFromPage(url, domain, depth):
global allURLs
result = []
if depth >= 1 and domain in url:
content = getPageContentOfURL(url)
if content != None:
contentLinks = getLinksFromPageContent(content, domain)
result = result + contentLinks
depth = depth -1
for link in contentLinks:
#print(str(depth)+" "+link)
sublinks = getExtractAllLinksFromPage(link, domain, depth)
for sublink in sublinks:
if not sublink in allURLs:
result.append(sublink)
result = list(set(result))
allURLs = allURLs + result
return result
def getLinksFromPageContent(content, domain):
global allURLs
links = []
bs = BeautifulSoup(content, features="lxml")
for link in bs.findAll('a'):
links.append(link.get('href'))
result = []
for link in links:
addToList = True
if not link is None:
ignoreStartWith = ['javascript:', 'mailto:']
ignoreEndsWith = ['.pdf', '.zip', '.png', '.jpg', '.gif']
for ignorePraefix in ignoreStartWith:
if link.startswith(ignorePraefix):
addToList = False
for ignoreSuffix in ignoreEndsWith:
if link.endswith(ignoreSuffix):
addToList = False
if addToList == True:
if link.startswith('//'):
link = domain.split("://")[0]+link
if link.startswith('/'):
link = domain+link
if domain in link and not link in allURLs:
result.append(link)
addToList = False
result = list(set(result))
allURLs = allURLs + result
return result
def getPageContentOfURL(url, run=3):
content = None
try:
page = requests.get(url)
if page.status_code != 200:
print(str(page.status_code) +" 💩 " + url)
else:
print(str(page.status_code) +" ✅ " + url)
content = page.content
except requests.exceptions.RequestException:
content = getPageContentOfURL(url, run=(run-1))
if content == None and run == 0:
with open('url_error.txt', 'a') as file:
file.write(url+"\n")
content = None
return content
if __name__ == "__main__" :
main() | [
"{ID}+{username}@users.noreply.github.com"
] | {ID}+{username}@users.noreply.github.com |
e41596ef4f52bb1f4192b5be0b6eb1505f775730 | fadc5b7a2e3f83ea69e3812800f2e4aa441e36d9 | /Scripts/Analysis/Tandem_Gene_Similarity.py | eaa2254bfa3f418d9f730549ac59f23feeebb267 | [] | no_license | TomJKono/Maize_Tandem_Evolution | 4e63c58614ec56476f4b4116554206ddf3d73b28 | 413b6c8d0d9a257a370060674a0e3936f3d93fc2 | refs/heads/master | 2022-06-30T04:20:19.228107 | 2022-06-15T22:55:06 | 2022-06-15T22:55:06 | 114,657,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,964 | py | #!/usr/bin/env python
"""Script to generate alignments for tandem duplicates. Translates the sequence
to amino acids, aligns them with clustal-omega, then back-translates them. Takes
three arguments:
1) Tandem duplicates CSV
2) Representative transcripts FASTA
3) Output directory
"""
import sys
import tempfile
import os
import itertools
from Bio import SeqIO
from Bio import SeqRecord
from Bio import Seq
from Bio.Align.Applications import ClustalOmegaCommandline
def parse_transcripts(trans):
"""Return a dictionary of sequences."""
s = SeqIO.parse(trans, 'fasta')
seq_dict = SeqIO.to_dict(s)
# Remove the _whatever at the end
seq_dict_nosuff = {}
for seqid in seq_dict:
seq_dict_nosuff[seqid.split('_')[0]] = seq_dict[seqid]
return seq_dict_nosuff
def get_cds(geneid, seqdict):
"""Return the amino acid sequence of a gene with a given ID."""
nuc_seq = seqdict[geneid]
# Translate it
aa_seq = nuc_seq.seq.translate()
# Decorate it like you would a full SeqRecord object
aa_seq_rec = SeqRecord.SeqRecord(
aa_seq,
id=geneid,
description='')
return aa_seq_rec
def align_genes(gene1, gene2):
"""Align the two genes with clustal-omega"""
# Make temp files for clustal in and out
clust_in = tempfile.NamedTemporaryFile(
prefix='CO_in_',
suffix='.fasta',
mode='w+t')
clust_out = tempfile.NamedTemporaryFile(
prefix='CO_out_',
suffix='.fasta',
mode='w+t')
# Write the sequences into the temp file
SeqIO.write([gene1, gene2], clust_in, 'fasta')
# Seek to the beginning else the file will appear empty
clust_in.seek(0)
# Run the command
cline = ClustalOmegaCommandline(
infile=clust_in.name,
outfile=clust_out.name,
seqtype='protein',
force=True,
iterations=10,
distmat_full=True,
distmat_full_iter=True)
cline()
clust_in.close()
# Return the handle to the output file
return clust_out
def back_translate(aln_file, seqdict):
"""Back-translate the aligned amino acids, using the original CDS
nucleotide sequences."""
aln = SeqIO.parse(aln_file.name, 'fasta')
bt_seq = []
for prot_seq in aln:
codon = 0
bt = ''
nuc = seqdict[prot_seq.id]
for aa in prot_seq:
if aa == '-':
bt += '---'
else:
bt += nuc[codon*3:(codon*3)+3]
codon += 1
bt_seq.append(bt)
return bt_seq
def write_alignment(nuc_aln, outdir):
"""Write the alignment in FASTA format to the alignment directory."""
# Get the gene IDs from the alignment data
gids = [s.id.split('_')[0] for s in nuc_aln]
# Join them to make a filename
fname = '-'.join(gids)
# Generate an output filename
abs_outdir = os.path.abspath(os.path.expanduser(outdir))
outname = os.path.join(abs_outdir, fname + '.fasta')
# Then write the data into the file
handle = open(outname, 'w')
SeqIO.write(nuc_aln, handle, 'fasta')
handle.close()
# Write a lil message to stderr that says we finished
sys.stderr.write('Wrote ' + fname + '\n')
return
def main(tandem, transcripts, outdir):
"""Main function."""
tx_seqs = parse_transcripts(transcripts)
with open(tandem, 'r') as f:
for line in f:
# We want to take two genes at a time.
genes = line.strip().split(',')
gene_pairs = itertools.combinations(genes, 2)
for gp in gene_pairs:
g1 = get_cds(gp[0], tx_seqs)
g2 = get_cds(gp[1], tx_seqs)
aln = align_genes(g1, g2)
nuc_aln = back_translate(aln, tx_seqs)
write_alignment(nuc_aln, outdir)
# Be good, and clean up our open handles
aln.close()
return
main(sys.argv[1], sys.argv[2], sys.argv[3])
| [
"konox006@umn.edu"
] | konox006@umn.edu |
5532c264f012df678571ce895acd1cc83a14f820 | e909e9bb4b2e54bb64d6bee9cf9fbaf14c584e04 | /malib/algorithm/maddpg/loss.py | 6595de6c38d2c580a92fad36c131c5dd868c5dd8 | [
"MIT"
] | permissive | zhihaolyu/malib | 9cd8fdcdc1c613c11fc1e6f385adac5312474509 | 1c7ca1819325796a6ec604aa1ae8c771708fc50c | refs/heads/main | 2023-05-13T03:41:05.211832 | 2021-06-08T04:35:10 | 2021-06-08T04:35:10 | 374,880,657 | 0 | 0 | MIT | 2021-06-08T04:29:26 | 2021-06-08T04:29:25 | null | UTF-8 | Python | false | false | 4,862 | py | import torch
import gym
from malib.algorithm.common import misc
from malib.algorithm.ddpg.loss import DDPGLoss
from malib.backend.datapool.offline_dataset_server import Episode
from malib.algorithm.common.model import get_model
class MADDPGLoss(DDPGLoss):
def __init__(self):
super(MADDPGLoss, self).__init__()
self.cnt = 0
self._params = {
"tau": 0.01,
"grad_norm_clipping": 0.5,
"actor_lr": 1e-2,
"critic_lr": 1e-2,
"optimizer": "Adam",
}
def _set_centralized_critic(self):
global_state_space = self.policy.custom_config["global_state_space"]
self.policy.deregister_state("critic")
self.policy.deregister_state("target_critic")
model_cls = get_model(self.policy.model_config["critic"])
self.policy.set_critic(model_cls(global_state_space, gym.spaces.Discrete(1)))
self.policy.target_critic = model_cls(
global_state_space, gym.spaces.Discrete(1)
)
self.policy.update_target()
def reset(self, policy, config):
"""Replace critic with a centralized critic"""
self._params.update(config)
if policy is not self.policy:
self._policy = policy
self._set_centralized_critic()
self.setup_optimizers()
def step(self):
self.policy.soft_update(tau=self._params["tau"])
return None
def __call__(self, agent_batch):
FloatTensor = (
torch.cuda.FloatTensor
if self.policy.custom_config["use_cuda"]
else torch.FloatTensor
)
cast_to_tensor = lambda x: FloatTensor(x.copy())
cliprange = self._params["grad_norm_clipping"]
# print(all_agent_batch[agent_id])
rewards = cast_to_tensor(agent_batch[self.main_id][Episode.REWARDS]).view(-1, 1)
dones = cast_to_tensor(agent_batch[self.main_id][Episode.DONES]).view(-1, 1)
cur_obs = cast_to_tensor(agent_batch[self.main_id][Episode.CUR_OBS])
gamma = self.policy.custom_config["gamma"]
target_vf_in_list_obs = []
target_vf_in_list_act = []
vf_in_list_obs = []
vf_in_list_act = []
# set target state
for aid in self.agents:
batch = agent_batch[aid]
target_vf_in_list_obs.append(cast_to_tensor(batch[Episode.NEXT_OBS]))
target_vf_in_list_act.append(batch["next_act_by_target"])
vf_in_list_obs.append(cast_to_tensor(batch[Episode.CUR_OBS]))
vf_in_list_act.append(cast_to_tensor(batch[Episode.ACTION_DIST]))
target_vf_state = torch.cat(
[*target_vf_in_list_obs, *target_vf_in_list_act], dim=1
)
vf_state = torch.cat([*vf_in_list_obs, *vf_in_list_act], dim=1)
# ============================== Critic optimization ================================
target_value = rewards + gamma * (1.0 - dones) * self.policy.target_critic(
target_vf_state
)
eval_value = self.policy.critic(vf_state)
assert eval_value.shape == target_value.shape, (
eval_value.shape,
target_value.shape,
)
value_loss = torch.nn.MSELoss()(eval_value, target_value.detach())
self.optimizers["critic"].zero_grad()
value_loss.backward()
torch.nn.utils.clip_grad_norm_(self.policy.critic.parameters(), cliprange)
self.optimizers["critic"].step()
# ==================================================================================
# ================================ Actor optimization ==============================
main_idx = None
for i, aid in enumerate(self.agents):
# replace with tensor
if aid == self.main_id:
vf_in_list_act[i] = self.policy.compute_actions(cur_obs)
main_idx = i
break
vf_state = torch.cat([*vf_in_list_obs, *vf_in_list_act], dim=1)
policy_loss = -self.policy.critic(vf_state).mean() # need add regularization?
policy_loss += (vf_in_list_act[main_idx] ** 2).mean() * 1e-3
self.optimizers["actor"].zero_grad()
policy_loss.backward()
torch.nn.utils.clip_grad_norm_(self.policy.actor.parameters(), cliprange)
self.optimizers["actor"].step()
# ==================================================================================
loss_names = [
"policy_loss",
"value_loss",
"target_value_est",
"value_est",
]
stats_list = [
policy_loss.detach().numpy(),
value_loss.detach().numpy(),
target_value.mean().detach().numpy(),
eval_value.mean().detach().numpy(),
]
return dict(zip(loss_names, stats_list))
| [
"kornbergfresnel@outlook.com"
] | kornbergfresnel@outlook.com |
4d4a4b30e934813ff025d0254db353158afb96f2 | 25872e1ba4f86cbbf77d0130f341b21e5dd9e692 | /SingleNumberIi.py | 3a629bea501e870c6d47a412d50885842d0bef96 | [] | no_license | zongxinwu92/leetcode | dc3d209e14532b9b01cfce6d4cf6a4c2d7ced7de | e1aa45a1ee4edaf72447b771ada835ad73e7f508 | refs/heads/master | 2021-06-10T21:46:23.937268 | 2017-01-09T09:58:49 | 2017-01-09T09:58:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | '''
Created on 1.12.2017
@author: Jesse
''''''
Given an array of integers, every element appears three times except for one, which appears exactly once. Find that single one.
Note:
Your algorithm should have a linear runtime complexity. Could you implement it without using extra memory?
"
'''
| [
"darrencheng0817@gmail.com"
] | darrencheng0817@gmail.com |
1fd67a737285b069a76fd9bef0c7396315d5e1bb | 0104add04cd6da515e2ccb2c27e44bc6693f9bcf | /Yurii_Khomych/l_6_software_engineering/behavioral/memento.py | 1a1a01f4eb315fe84397e2f083b58aafe802ff5c | [] | no_license | YuriiKhomych/ITEA-advanced | c96c3cf9b279caf62fefcd41faf543cee7534626 | 90bc47733c07b5b866aa3a14aa12a169f5df289c | refs/heads/master | 2022-12-09T20:38:23.607426 | 2019-12-22T17:30:59 | 2019-12-22T17:30:59 | 209,354,034 | 0 | 9 | null | 2022-12-08T03:04:04 | 2019-09-18T16:23:12 | Python | UTF-8 | Python | false | false | 1,118 | py |
class Memento:
def __init__(self, state):
self._state = state
def get_state(self):
return self._state
class Caretaker:
def __init__(self):
self._memento = None
def get_memento(self):
return self._memento
def set_memento(self, memento):
self._memento = memento
class Originator:
def __init__(self):
self._state = None
def set_state(self, state):
self._state = state
def get_state(self):
return self._state
def save_state(self):
return Memento(self._state)
def restore_state(self, memento):
self._state = memento.get_state()
originator = Originator()
caretaker = Caretaker()
originator.set_state('on')
print('Originator state:', originator.get_state()) # Originator state: on
caretaker.set_memento(originator.save_state())
originator.set_state('off')
print('Originator change state:', originator.get_state()) # Originator change state: off
originator.restore_state(caretaker.get_memento())
print('Originator restore state:', originator.get_state()) # Originator restore state: on
| [
"yuriykhomich@gmail.com"
] | yuriykhomich@gmail.com |
779652da6a5a24ad321543242382a5833a3019d0 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/completion/typedParameterStringPath/a.after.py | 0ad95ae34e998bde802ce420572b05ffe171fd2c | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 648 | py | from typing import Any, overload, Union
from os import PathLike
def baz(akjlkgjdfsakglkd: PathLike) -> None:
pass
baz("foo")
def bar(akjlkgjdfsakglkd: Union[str, PathLike]) -> None:
pass
bar("foobar.txt")
@overload
def foo(akjlkgjdfsakglkd: str) -> None:
pass
@overload
def foo(akjlkgjdfsakglkd: PathLike) -> None:
pass
def foo(akjlkgjdfsakglkd):
pass
foo("foobar.txt")
def qux(akjlkgjdfsakglkd: Union[str, Any]) -> None:
pass
qux("foo")
@overload
def quux(akjlkgjdfsakglkd: Any) -> None:
pass
@overload
def quux(akjlkgjdfsakglkd: str) -> None:
pass
def quux(akjlkgjdfsakglkd):
pass
quux("foo") | [
"intellij-monorepo-bot-no-reply@jetbrains.com"
] | intellij-monorepo-bot-no-reply@jetbrains.com |
74534eb5988526a54d67aa7eef218c14628636d4 | ef7eabdd5f9573050ef11d8c68055ab6cdb5da44 | /topCoder/srms/500s/srm582/div2/semi_perfect_square.py | 66ebf650bdca212ea2858d5755eeba0b7624349b | [
"WTFPL"
] | permissive | gauravsingh58/algo | cdbf68e28019ba7c3e4832e373d32c71902c9c0d | 397859a53429e7a585e5f6964ad24146c6261326 | refs/heads/master | 2022-12-28T01:08:32.333111 | 2020-09-30T19:37:53 | 2020-09-30T19:37:53 | 300,037,652 | 1 | 1 | WTFPL | 2020-10-15T09:26:32 | 2020-09-30T19:29:29 | Java | UTF-8 | Python | false | false | 244 | py | class SemiPerfectSquare:
def check(self, N):
l, u = int(N**.33), int(N**.5)
for b in xrange(l, u+1):
for a in xrange(1, b):
if a * b * b == N:
return 'Yes'
return 'No'
| [
"elmas.ferhat@gmail.com"
] | elmas.ferhat@gmail.com |
02fe0020965f0e68f5076b7516d6a72a049849a9 | 10fbe5526e5f0b8588b65f70f088cd86b6e9afbe | /tyfslutb/migrations/0015_auto_20150218_1627.py | 97838971133cd35a5966606ccc5466e072829c6d | [] | no_license | MarkusH/django-migrations-benchmark | eb4b2312bb30a5a5d2abf25e95eca8f714162056 | e2bd24755389668b34b87d254ec8ac63725dc56e | refs/heads/master | 2016-09-05T15:36:45.250134 | 2015-03-31T23:44:28 | 2015-03-31T23:44:28 | 31,168,231 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ygnakzgjxu', '0008_auto_20150218_1626'),
('tyfslutb', '0014_auto_20150218_1626'),
]
operations = [
migrations.CreateModel(
name='Bmzhg',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nlcvjjxtmx', models.IntegerField(default=0)),
],
),
migrations.AddField(
model_name='qcwbo',
name='helryvwow',
field=models.OneToOneField(null=True, related_name='+', to='ygnakzgjxu.Xdwhlpqgw'),
),
migrations.AddField(
model_name='ynbpgqn',
name='dxemnqzz',
field=models.CharField(default='', max_length=208),
),
]
| [
"info@markusholtermann.eu"
] | info@markusholtermann.eu |
886d06ed7a374a174b7fe278b07f3f68764a4b7f | d046fd4ac8e52ed8054199765893f7e1a71302f2 | /master/bt5/slapos_cloud/SkinTemplateItem/portal_skins/slapos_cloud/Instance_tryToGarbageCollectNonAllocatedRootTree.py | 905e534dc8829ee1bea07b4d4019168e67e6e6ac | [] | no_license | jakop345/slapos.core | 7538418056be6541e9ee7a70d2d6b694e03daafc | 410dfb506b7ec17745365d573e7401f217b74ed4 | refs/heads/master | 2020-06-18T21:43:39.012812 | 2016-11-24T11:06:49 | 2016-11-24T11:06:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,814 | py | from zExceptions import Unauthorized
if REQUEST is not None:
raise Unauthorized
instance = context
portal = context.getPortalObject()
if instance.getValidationState() != 'validated' \
or instance.getSlapState() not in ('start_requested', 'stop_requested') \
or instance.getAggregateValue(portal_type='Computer Partition') is not None:
return
latest_comment = portal.portal_workflow.getInfoFor(instance, 'comment', wf_id='edit_workflow')
if latest_comment != 'Allocation failed: no free Computer Partition':
# No nothing if allocation alarm didn't run on it
return
latest_edit_time = portal.portal_workflow.getInfoFor(instance, 'time', wf_id='edit_workflow')
if (int(DateTime()) - int(latest_edit_time)) < 259200:
# Allow 3 days gap betweeb latest allocation try and deletion
return
# Only destroy if the instance is the only one in the tree
hosting_subscription = instance.getSpecialiseValue("Hosting Subscription")
if (hosting_subscription.getPredecessor() != instance.getRelativeUrl()):
return
if (len(hosting_subscription.getPredecessorList()) != 1):
return
instance_list = portal.portal_catalog(
portal_type=["Software Instance", "Slave Instance"],
default_specialise_uid=hosting_subscription.getUid(),
limit=2)
if len(instance_list) != 1:
return
# OK, destroy hosting subscription
hosting_subscription.requestDestroy(
software_release=hosting_subscription.getUrlString(),
software_title=hosting_subscription.getTitle(),
software_type=hosting_subscription.getSourceReference(),
instance_xml=hosting_subscription.getTextContent(),
sla_xml=hosting_subscription.getSlaXml(),
shared=hosting_subscription.isRootSlave(),
state='destroyed',
comment="Garbage collect %s not allocated for more than 3 days" % instance.getRelativeUrl(),
)
hosting_subscription.archive()
| [
"alain.takoudjou@nexedi.com"
] | alain.takoudjou@nexedi.com |
479972621f87be75415d882e77a8e70f347f8405 | 60696fa455101fbd2bef5efc19910d2fc856c324 | /libqi-python/qi/__init__.py | 2140178b74a899ab8af5849eac331e9023a047a3 | [] | no_license | yumilceh/libqi-python | 263baca432f118f15057c8a2b43e0321bb5609e1 | 900643316c272fcdb9f5de76111125fb05927108 | refs/heads/master | 2020-04-10T22:52:11.210697 | 2018-12-11T13:50:30 | 2018-12-11T13:50:30 | 161,334,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,444 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" QiMessaging Python bindings """
from __future__ import absolute_import
import os
import sys
import ctypes
import platform
import traceback
PATH_LIBQI = os.path.dirname(os.path.realpath(__file__))
# Add LibQi Python Folder to the Path
sys.path.append(PATH_LIBQI)
# Set Path and Load Dependancies for the Platform
if "aldebaran" in platform.platform():
path_robot_lib = os.path.join(PATH_LIBQI, "robot")
sys.path.append(path_robot_lib)
current_lib_path = os.environ.get("LD_LIBRARY_PATH", "")
if current_lib_path:
current_lib_path += ":"
current_lib_path += ":" + path_robot_lib
os.environ["LD_LIBRARY_PATH"] = current_lib_path
robot_dependencies = [
"libc.so.6",
"libstdc++.so.6",
"ld-linux.so.2",
"librt.so.1",
"libm.so.6",
"libdl.so.2",
"libgcc_s.so.1",
"libssl.so.1.0.0",
"libpthread.so.0",
"libsystemd.so.0",
"libcrypto.so.1.0.0",
"libpython2.7.so.1.0",
"libboost_chrono.so.1.59.0",
"libboost_date_time.so.1.59.0",
"libboost_filesystem.so.1.59.0",
"libboost_locale.so.1.59.0",
"libboost_program_options.so.1.59.0",
"libboost_python.so.1.59.0",
"libboost_regex.so.1.59.0",
"libboost_system.so.1.59.0",
"libboost_thread.so.1.59.0",
"libqi.so",
"libqipython.so",
]
for dependency in robot_dependencies:
library_path = os.path.join(PATH_LIBQI, "robot", dependency)
try:
ctypes.cdll.LoadLibrary(library_path)
except:
print("Unable to load %s\n%s" % (library_path, traceback.format_exc()))
elif sys.platform.startswith("linux"):
path_linux_lib = os.path.join(PATH_LIBQI, "linux")
sys.path.append(path_linux_lib)
current_lib_path = os.environ.get("LD_LIBRARY_PATH", "")
if current_lib_path:
current_lib_path += ":"
current_lib_path += ":" + path_linux_lib
os.environ["LD_LIBRARY_PATH"] = current_lib_path
linux_dependencies = [
"libicudata.so",
"libicuuc.so",
"libicui18n.so",
"libcrypto.so",
"libssl.so",
"libpython2.7.so",
"libboost_system.so",
"libboost_thread.so",
"libboost_python.so",
"libboost_chrono.so",
"libboost_program_options.so",
"libboost_filesystem.so",
"libboost_regex.so",
"libboost_locale.so",
"libqi.so",
"libqipython.so",
]
for dependency in linux_dependencies:
library_path = os.path.join(PATH_LIBQI, "linux", dependency)
try:
ctypes.cdll.LoadLibrary(library_path)
except:
print("Unable to load %s\n%s" % (library_path, traceback.format_exc()))
elif sys.platform.startswith("darwin"):
path_mac_lib = os.path.join(PATH_LIBQI, "mac")
path_mac_qi = os.path.join(PATH_LIBQI, "mac", "python2.7", "site-packages")
sys.path.append(path_mac_lib)
sys.path.append(path_mac_qi)
current_lib_path = os.environ.get("DYLD_LIBRARY_PATH", "")
if current_lib_path:
current_lib_path += ":"
current_lib_path += ":" + path_mac_lib
os.environ["DYLD_LIBRARY_PATH"] = current_lib_path
mac_dependencies = [
"libcrypto.1.0.0.dylib",
"libssl.1.0.0.dylib",
"libboost_system.dylib",
"libboost_python.dylib",
"libboost_date_time.dylib",
"libboost_chrono.dylib",
"libboost_filesystem.dylib",
"libboost_regex.dylib",
"libboost_program_options.dylib",
"libboost_locale.dylib",
"libboost_thread.dylib",
"libqi.dylib",
"libqipython.dylib",
"python2.7/site-packages/_qi.so",
]
for dependency in mac_dependencies:
library_path = os.path.join(PATH_LIBQI, "mac", dependency)
try:
ctypes.cdll.LoadLibrary(library_path)
except:
print("Unable to load %s\n%s" % (library_path, traceback.format_exc()))
elif sys.platform.startswith("win"):
sys.path.append(os.path.join(PATH_LIBQI, "win"))
ctypes.windll.kernel32.SetDllDirectoryA(os.path.join(PATH_LIBQI, "win"))
# Import LibQi Functionnalities
from _qi import Application as _Application
from _qi import ApplicationSession as _ApplicationSession
from _qi import (FutureState, FutureTimeout,
Future, futureBarrier, Promise,
Property, Session, Signal,
async, PeriodicTask)
from _qi import (clockNow, steadyClockNow, systemClockNow)
from _qi import (module, listModules)
from . import path
from ._type import (Void, Bool, Int8, UInt8, Int16, UInt16,
Int32, UInt32, Int64, UInt64,
Float, Double, String, List,
Map, Struct, Object, Dynamic,
Buffer, AnyArguments, typeof, _isinstance)
from ._binder import bind, nobind, singleThreaded, multiThreaded
from .logging import fatal, error, warning, info, verbose, Logger
from .logging import getLogger, logFatal, logError, logWarning, logInfo, logVerbose, logDebug # Deprecated
from .translator import defaultTranslator, tr, Translator
from .version import version
# Set the Version Number
__version__ = version
def PromiseNoop(*args, **kwargs):
""" No operation function deprecated:: 2.5 """
pass
# Rename isinstance here. (isinstance should not be used in this file)
isinstance = _isinstance
_app = None
# We want to stop all thread before python start destroying module and the like.
# (this avoid callback calling python while it's destroying)
def _stopApplication():
""" Stop the Application """
global _app
if _app is not None:
_app.stop()
del _app
_app = None
# Application is a singleton, it should live till the end
# of the program because it owns eventloops
def Application(args=None, raw=False, autoExit=True, url=None):
""" Instanciate and return the App """
global _app
if _app is None:
if args is None:
args = sys.argv
if url is None:
url = "tcp://127.0.0.1:9559"
if not args:
args = ['python']
elif args[0] == '':
args[0] = 'python'
if raw:
_app = _Application(args)
else:
_app = _ApplicationSession(args, autoExit, url)
else:
raise Exception("Application was already initialized")
return _app
ApplicationSession = Application
__all__ = [
"FutureState",
"FutureTimeout",
"Future",
"futureBarrier",
"Promise",
"PromiseNoop",
"Property",
"Session",
"Signal",
"createObject",
"registerObjectFactory",
"async",
"Void", "Bool", "Int8", "UInt8", "Int16", "UInt16", "Int32", "UInt32", "Int64", "UInt64",
"Float", "Double", "String", "List", "Map", "Struct", "Object", "Dynamic", "Buffer", "AnyArguments",
"typeof", "isinstance",
"bind", "nobind", "singleThreaded", "multiThreaded",
"fatal", "error", "warning", "info", "verbose",
"getLogger", "logFatal", "logError", "logWarning", "logInfo", "logVerbose", "logDebug", # Deprecated
"Logger", "defaultTranslator", "tr", "Translator",
"module", "listModules",
"clockNow", "steadyClockNow", "systemClockNow"
]
# Register _stopApplication as a function to be executed at termination
import atexit
atexit.register(_stopApplication)
del atexit
| [
"you@example.com"
] | you@example.com |
5ed922dea4cd1d1bbea38444f453f27061c5c7c7 | b4537dfc431cba7ff40e0692ab6c223394ae4d69 | /151-replace-adjacent-colors.py | 4c56543292a015d62c66c55ae5fe43c2df89044e | [] | no_license | ericgarig/daily-coding-problem | fdc04f5bf823933100686c4129575f5ef3746676 | d3e1a6ab102c7af1eea4ab6b1282e4d44e5b80ba | refs/heads/master | 2020-03-31T09:59:11.767162 | 2019-12-20T21:44:43 | 2019-12-20T21:44:43 | 152,117,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,728 | py | """
Daily Coding Problem - 2019-03-08.
Given a 2-D matrix representing an image, a location of a pixel in the
screen and a color C, replace the color of the given pixel and all
adjacent same colored pixels with C.
For example, given the following matrix, and location pixel of (2, 2),
and 'G' for green:
B B W
W W W
W W W
B B B
Becomes
B B G
G G G
G G G
B B B
"""
def solve(arr, pos, color):
"""Given a 2D array, position, and color, change all adjacent colors."""
i = 0
same_color = [pos]
while i < len(same_color):
for j in get_neighbors(arr, same_color[i], arr[pos[0]][pos[1]]):
if j not in same_color:
same_color.append(j)
i += 1
for i in same_color:
arr[i[0]][i[1]] = color
return arr
def get_neighbors(arr, pos, color):
"""Return neighbors with the same color."""
neighbors = []
try:
if arr[pos[0] + 1][pos[1]] == color:
neighbors.append((pos[0] + 1, pos[1]))
except IndexError:
pass
try:
if arr[pos[0] - 1][pos[1]] == color:
neighbors.append((pos[0] - 1, pos[1]))
except IndexError:
pass
try:
if arr[pos[0] + 1][pos[1]] == color:
neighbors.append((pos[0] + 1, pos[1]))
except IndexError:
pass
try:
if arr[pos[0]][pos[1] - 1] == color:
neighbors.append((pos[0], pos[1] - 1))
except IndexError:
pass
return neighbors
input_matrix = [
["B", "B", "W"],
["W", "W", "W"],
["W", "W", "W"],
["B", "B", "B"],
]
output_matrx = [
["B", "B", "G"],
["G", "G", "G"],
["G", "G", "G"],
["B", "B", "B"],
]
assert (solve(input_matrix, (2, 2), "G")) == output_matrx
| [
"erik.shagdar@gmail.com"
] | erik.shagdar@gmail.com |
c8345d46b7e6acca3cd3f2de57a0bf85e4e79016 | 60eb98538025c61cf94a91f6c96f9ee81dcd3fdf | /monai/engines/__init__.py | 36719ae61c18553d8e31520cae7b70625028094c | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | gagandaroach/MONAI | 167e7746995d4b6136731881e22ad4df333b16a9 | 79b83d9fac41efae9b90ed2f9ad078d6d664bf64 | refs/heads/master | 2023-06-02T19:54:47.737846 | 2021-06-24T18:34:02 | 2021-06-24T18:34:02 | 270,741,899 | 0 | 0 | Apache-2.0 | 2020-06-08T16:29:32 | 2020-06-08T16:29:31 | null | UTF-8 | Python | false | false | 989 | py | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .evaluator import EnsembleEvaluator, Evaluator, SupervisedEvaluator
from .multi_gpu_supervised_trainer import create_multigpu_supervised_evaluator, create_multigpu_supervised_trainer
from .trainer import GanTrainer, SupervisedTrainer, Trainer
from .utils import (
GanKeys,
IterationEvents,
default_make_latent,
default_prepare_batch,
engine_apply_transform,
get_devices_spec,
)
| [
"noreply@github.com"
] | gagandaroach.noreply@github.com |
e54cd6e2a2e416870412ed33580505381d15801f | 9db281fbed35bb8384eeacaa81d1a32a9dcc5cca | /class-28/demo/full-stack-snacks/snacks/migrations/0001_initial.py | ad9b33da216b354507f1c25488783863e143cca1 | [] | no_license | corey-marchand/seattle-python-401d14 | aab3f48c82229f1958989ce8318de60b9abbe4e2 | ae9ffebc9e5250cb5ec1760fd7764da0d3ad4e4c | refs/heads/master | 2022-11-15T16:09:37.248530 | 2020-07-09T19:10:49 | 2020-07-09T19:10:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | # Generated by Django 3.0.7 on 2020-06-17 06:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Snack',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('description', models.TextField()),
('purchaser', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"jb.tellez@gmail.com"
] | jb.tellez@gmail.com |
a0a4fee1ad51b2720d16c8a31097e86016f07fa8 | d0eb2004a98fa79e280be2535f337604e96ccece | /dict_oper.py | 484f5f458b76e280da0ad82a2809bd9ab5fb4d66 | [] | no_license | Parya1112009/python | f33a9ccfbc858886f8d065eee3f29bca430e5688 | 2550f5c54ff8310977806bab3b39adb8ce252496 | refs/heads/master | 2022-11-22T04:15:16.822655 | 2022-11-16T03:56:26 | 2022-11-16T03:56:26 | 88,561,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | prices = {"mango": 24,"apple": 25,"banana": 20}
product = input("enter the product you want to buy : ")
price = prices.get(product)
if price:
print (f"the price of {product} is {price}")
else:
print("this product is not available today SORRY!!!")
del prices["apple"]
print (prices)
prices["apple"] = 40
print (prices)
prices.pop("mango")
print(f"pop will remove the value of key mango,the final dictionary is\n {prices}")
prices["mango"] = 49
prices["apple"] = 49
prices.popitem()
print(f"popitem will remove the value of most recent key value pair, the final dictionary is\n {prices}")
prices.clear()
print(f"clear will empty the dictionary , the final dictionary is \n{prices}")
| [
"noreply@github.com"
] | Parya1112009.noreply@github.com |
de956c8607638b1cb558fc81b01ce67bbdff7bb9 | 3ee1bb0d0acfa5c412b37365a4564f0df1c093fb | /python_import/p72_main.py | 65026687f67b56914ee89cf2150c49f736c6eb29 | [] | no_license | moileehyeji/Study | 3a20bf0d74e1faec7a2a5981c1c7e7861c08c073 | 188843c6415a4c546fdf6648400d072359d1a22b | refs/heads/main | 2023-04-18T02:30:15.810749 | 2021-05-04T08:43:53 | 2021-05-04T08:43:53 | 324,901,835 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | import p71_byunsu as p71
# p71.aaa = 3 #print(p71.aaa) 3출력
print(p71.aaa)
print(p71.square(10))
# 2
# 1024
print('===================================')
from p71_byunsu import aaa, square
print(aaa) #p71의 aaa 메모리
# 2
aaa = 3
print(aaa) #p72의 aaa 메모리
print(square(10)) #p71의 square함수 메모리
# 3
# 1024
# p71의 aaa(=2)변수, square함수가 메모리에 할당
# p72의 aaa(=3)변수가 다른 메모리에 할당 | [
"noreply@github.com"
] | moileehyeji.noreply@github.com |
2c6ec495b46bf3f29802da6521109de0872fd8fb | e770533cc7d8517134d6f9159f5f9e52747c7153 | /python/11pywinauto/test2.py | 52aaf498cb5e6b5a8be30e39d1a472c9edaf21d3 | [] | no_license | code1990/bootPython | 5d878f7fac8aaa09a2b9e4a6d50a3c0f86c6dea5 | e5debd59b07a2c713f3e692aa4f44a9d2e5baeae | refs/heads/master | 2022-07-27T04:31:00.292692 | 2020-08-07T07:07:15 | 2020-08-07T07:07:23 | 206,805,170 | 0 | 0 | null | 2020-10-13T15:51:34 | 2019-09-06T13:56:39 | Python | UTF-8 | Python | false | false | 5,200 | py | import pywinauto
import pyautogui
from pywinauto.keyboard import SendKeys
from pywinauto.mouse import *
# from pywinauto.keyboard import *
import time
#1.运行360浏览器
browser_path = 'D:\\360\\360se6\\Application\\360se.exe'
user_page = 373
app = pywinauto.Application().start(browser_path)
#2.打开360浏览器主窗口
mainWindow = app.window(class_name=r'360se6_Frame')
time.sleep(10)
print("配置https://www.linkedin.com/feed/为360主页")
# print("鼠标点击领跑插件>>>>>")
# pyautogui.moveTo(935, 44)
# pyautogui.click()
# time.sleep(12)
# print("鼠标点击关闭360主页>>>>>")
# pyautogui.moveTo(279, 12)
# pyautogui.click()
time.sleep(3)
# print("鼠标移动到屏幕中间>>>>>")
# pyautogui.moveTo(300, 200)
# time.sleep(10)
# currentMouseX, currentMouseY = pyautogui.position()
# print(currentMouseX)
# print(currentMouseY)
print("鼠标点击关闭领英帮助界面>>>>>")
pyautogui.moveTo(1211, 286)
pyautogui.click()
time.sleep(3)
print("点击人脉菜单>>>>>")
pyautogui.moveTo(1167, 158)
pyautogui.click()
# time.sleep(3)
print("移动到input输入框,点击输入框获取输入状态>>>>>")
pyautogui.moveTo(930, 187)
pyautogui.click()
# time.sleep(3)
print("请务必保持英文输入法状态>>>>>")
print("模拟键盘输入文字>>>>>")
#模拟输入信息
pyautogui.typewrite(message='bolts inc',interval=0.5)
pyautogui.press('enter')
time.sleep(10)
# 鼠标左击一次
# pyautogui.click()
# time.sleep(3)
# currentMouseX, currentMouseY = pyautogui.position()
# print(currentMouseX)
# print(currentMouseY)
# print("鼠标点击搜索按钮>>>>>")
# pyautogui.moveTo(1230, 185)
# pyautogui.click()
# time.sleep(3)
print("鼠标点击多选选择框>>>>>")
pyautogui.moveTo(935, 227)
pyautogui.click()
# time.sleep(3)
# print("鼠标点击添加按钮>>>>>")
# pyautogui.moveTo(1060, 229)
# pyautogui.click()
# # time.sleep(3)
# print("鼠标移动到文本输入框>>>>>")
# pyautogui.moveTo(932, 274)
# pyautogui.click()
# print("全选删除文本输入框>>>>>")
# pyautogui.hotkey('ctrl', 'a')
# pyautogui.hotkey('ctrl', 'x')
# pyautogui.click()
# time.sleep(3)
# print("鼠标点击发送按钮>>>>>")
# pyautogui.moveTo(1220, 422)
# pyautogui.click()
# time.sleep(10*28)
print("鼠标点击下一页>>>>>")
pyautogui.moveTo(1231, 227)
pyautogui.click()
time.sleep(20)
currentMouseX, currentMouseY = pyautogui.position()
print(currentMouseX)
print(currentMouseY)
pyautogui.alert(text='This is an alert box.', title='Test')
# app.kill()
# #4.点击新弹出窗体的确定按钮
# out_note=u'关于记事本'
# button_name_ok='确定'
# app[out_note][button_name_ok].click()
# #5.查看一个窗体含有的控件,子窗体,菜单
# print(app[title_notepad].print_control_identifiers())
# #-------------------无标题记事本的含有的控件,子窗体,菜单-----------------
# # Control Identifiers:
# #
# # Notepad - '无标题 - 记事本' (L8, T439, R892, B815)
# # ['无标题 - 记事本Notepad', 'Notepad', '无标题 - 记事本']
# # child_window(title="无标题 - 记事本", class_name="Notepad")
# # |
# # | Edit - '' (L16, T490, R884, B807)
# # | ['无标题 - 记事本Edit', 'Edit']
# # | child_window(class_name="Edit")
# # |
# # | StatusBar - '' (L16, T785, R884, B807)
# # | ['StatusBar', '无标题 - 记事本StatusBar', 'StatusBar 第 1 行,第 1 列']
# # | child_window(class_name="msctls_statusbar32")
# # None
#
# #6.在记事本中输入一些文本
# #[tips-> ctrl+点击鼠标左键快速查看被调用函数]
# app.title_notepad.Edit.type_keys('pywinauto works!\n',with_spaces=True,with_newlines=True)
# app.title_notepad.Edit.type_keys('hello word !\n',with_spaces=True,with_newlines=True)
# #7.选择编辑菜单->编辑时间/日期
# # app[title_notepad].menu_select('编辑->时间/日期(&d)')
# #8.连接已运行程序
# #如连接微信 借助spy++找到运行程序的handle
# app1=pywinauto.Application(backend='uia').connect(handle=0x00320830)
# #9.查看运行窗口窗体名称
# print(app1.window())
# print(app1['Dialog'].print_control_identifiers())
# # Dialog - '微信' (L968, T269, R1678, B903)
# # ['微信Dialog', 'Dialog', '微信']
# # child_window(title="微信", control_type="Window")
# # |
# # | Pane - 'ChatContactMenu' (L-10000, T-10000, R-9999, B-9999)
# # | ['ChatContactMenu', 'ChatContactMenuPane', 'Pane', 'Pane0', 'Pane1']
# # | child_window(title="ChatContactMenu", control_type="Pane")
# # | |
# # | | Pane - '' (L-10019, T-10019, R-9980, B-9980)
# # | | ['', 'Pane2', '0', '1']
# # |
# # | Pane - '' (L948, T249, R1698, B923)
# # | ['2', 'Pane3']
# # None
# #10.通过路径去打开一个已有程序
# #11.鼠标控制
# x=0
# y=0
# for i in range(20):
# step_x = i*8
# step_y = i*5
# move(coords=(step_x,step_y ))
# time.sleep(1)
#
# #12.键盘控制
# #键盘对应的ascii http://www.baike.com/wiki/ASCII
# #发送键盘指令,打开命令行,输入一条命令for /l %i in (1,1,100) do tree
# SendKeys('{VK_LWIN}')
# SendKeys('cmd')
# SendKeys('{VK_RETURN}')
# time.sleep(3)
# SendKeys('for /L +5i in +9 1,1,100+0 do tree {VK_RETURN}',with_spaces=True) | [
"s1332177151@sina.com"
] | s1332177151@sina.com |
024edc7ca11aa1d922a9f0786b76767268c596b9 | 76d43c6ee84b8c6cc90dd61a097fa57ecb85a17e | /term2/Computational_Neuroscience/Coursework2/Question4.py | 8ee3bdfe41c1c991856ddac64bea8fac8526d6b0 | [] | no_license | junghyun4425/UoB_Projects | 071708d4150117de1650b22d836fad4ac8fbf559 | 49f5eac9697c709da21b519619a28303bd83e728 | refs/heads/master | 2021-06-27T05:44:21.988846 | 2021-06-19T03:17:06 | 2021-06-19T03:17:06 | 231,607,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,399 | py | from numpy import *
import numpy as np
import matplotlib.pyplot as plt
def load_data(filename,T):
data_array = [T(line.strip()) for line in open(filename, 'r')]
return data_array
def cal_STA(stim, data_array, width, sample_rate, interval, nec_adj):
interval /= sample_rate
time_bin = int(width / sample_rate)
sta = np.zeros(time_bin)
spike_tmp = np.nonzero(data_array)[0]
spike_times=[]
# necessarily adjacent case
if nec_adj == 1:
for i in range(0, len(spike_tmp)):
index_s = spike_tmp[i] + interval
if data_array[int(index_s)] != 0:
check_s = data_array[int(spike_tmp[i]) + 1:int(spike_tmp[i] + interval)]
if sum(check_s) == 0:
spike_times.append(spike_tmp[i])
# not necessarily adjacent case
else:
for i in range(0, len(spike_tmp)):
index_s = spike_tmp[i] + interval
if data_array[int(index_s)] != 0:
spike_times.append(spike_tmp[i])
num = len(spike_times)
for tau in range(0, time_bin):
dist = 0
windows = []
for i in range(0, num):
if spike_times[i] < tau:
dist += 1
windows.append(stim[spike_times[i] - tau])
sta[tau] = sum(windows) / (num - dist)
return sta
stimulus=load_data("stim.dat",float)
spikes=load_data("rho.dat",int)
sample_rate = 2
width = 100
interval = [2, 10, 20, 50]
sta_adj = []
sta_not_adj = []
for i in range(0, 4):
sta_adj.append(cal_STA(stimulus, spikes, width, sample_rate, interval[i], 1))
sta_not_adj.append(cal_STA(stimulus, spikes, width, sample_rate, interval[i], 0))
time = np.arange(0, width / sample_rate)
plt.figure()
plt.plot(time, sta_adj[0], label='2ms')
plt.plot(time, sta_adj[1], label='10ms')
plt.plot(time, sta_adj[2], label='20ms')
plt.plot(time, sta_adj[3], label='50ms')
plt.legend()
plt.xlabel('Time (ms)')
plt.ylabel('Stimulus')
plt.title('STA (spikes are necessarily adjacent)')
plt.savefig('adjacent.png')
plt.figure()
plt.plot(time, sta_not_adj[0], label='2ms')
plt.plot(time, sta_not_adj[1], label='10ms')
plt.plot(time, sta_not_adj[2], label='20ms')
plt.plot(time, sta_not_adj[3], label='50ms')
plt.legend()
plt.xlabel('Time (ms)')
plt.ylabel('Stimulus')
plt.title('STA (spikes are not necessarily adjacent)')
plt.savefig('notnecessarilyadjacent.png')
plt.show()
| [
"junghyun153@naver.com"
] | junghyun153@naver.com |
8711969cce6874a941f33df8118fe018a65c20b2 | 723ea3f47a45fe756c4a77809eb2a4d6b98bc733 | /crackfun/cc150/Dijkstra’s shortest path algorithm.py | 7f3ed39306673ee737dc04aa9a712ef9cbf9f7fc | [] | no_license | JoyiS/Leetcode | a625e7191bcb80d246328121669a37ac81e30343 | 5510ef424135783f6dc40d3f5e85c4c42677c211 | refs/heads/master | 2021-10-21T05:41:00.706086 | 2019-03-03T06:29:14 | 2019-03-03T06:29:14 | 110,296,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,504 | py | '''
Given a graph and a source vertex in graph, find shortest paths from source to all vertices in the given graph.
Time Complexity O((|V|+|E|)log|V|) geeks say O(E+VLogV) (with the use of Fibonacci heap
Space Complexity O(|V|)
下面写的代码中值得注意的是:
1) Graph 用二维matrix来实现,graph[u][v] 表示 u 到 v 的距离。如果 uv 不通, 那么 graph[u][v] = 0
2) dijkstra 的输入是 src 和 des 两个vertex。需要keep一个visited的set来表示这个vertex有没有被访问过。需要一个dist[]来记录每一个vertex的distance.
3) 在dijkstra中每一次循环的第一步是找一个当前没有visited的vertex这个vertex的distance是最小的。然后去更新这个vertex的每一个neighbor的dist [].
'''
# Python program for Dijkstra's single
# source shortest path algorithm. The program is
# for adjacency matrix representation of the graph
class Graph():
def __init__(self, vertices):
self.V = vertices
self.graph = [[0 for column in range(vertices)]
for row in range(vertices)]
def minDistance(self, dist, visited):
# Initilaize minimum distance for next node
mindis = float('inf')
# Search not nearest vertex not in the
# shortest path tree
for v in range(self.V):
if visited[v] == False and dist[v] < mindis:
mindis = dist[v]
min_index = v
return min_index, mindis
def dijkstra(self, src, des):
dist = [float('inf')] * self.V
dist[src] = 0
visited = [False] * self.V
output = []
for x in range(self.V):
u, mindis = self.minDistance(dist, visited)
output += [u]
if u == des:
print('Min distance is : ' + str(mindis))
return output
visited[u] = True
for v in range(self.V):
if self.graph[u][v] > 0 and visited[v] == False and dist[v] > dist[u] + self.graph[u][v]:
dist[v] = dist[u] + self.graph[u][v]
# Driver program
g = Graph(9)
g.graph = [[0, 4, 0, 0, 0, 0, 0, 8, 0],
[4, 0, 8, 0, 0, 0, 0, 11, 0],
[0, 8, 0, 7, 0, 4, 0, 0, 2],
[0, 0, 7, 0, 9, 14, 0, 0, 0],
[0, 0, 0, 9, 0, 10, 0, 0, 0],
[0, 0, 4, 14, 10, 0, 2, 0, 0],
[0, 0, 0, 0, 0, 2, 0, 1, 6],
[8, 11, 0, 0, 0, 0, 1, 0, 7],
[0, 0, 2, 0, 0, 0, 6, 7, 0]
]
g.dijkstra(0,8) | [
"california.sjy@gmail.com"
] | california.sjy@gmail.com |
5d9833dcdd35e411ee6f3e223d2a9feb7cdcdf14 | 553af49f5937ac8fdf47e826315ea5a761b56d0d | /lecture_07/wall_04.py | 694163031ac2190cef911550de1877e4e0eb2a85 | [] | no_license | compas-ITA20/ITA20 | 6ef232b6376de8a8b6391394f6b2f6e37f5d2068 | 244ddbfdc9480f4ea8dda7f983f600f4793594d8 | refs/heads/main | 2023-01-24T20:30:12.360193 | 2020-12-09T10:47:44 | 2020-12-09T10:47:44 | 300,848,724 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,867 | py | from compas.geometry import Bezier
from compas.geometry import Point, Polyline, Vector
from compas.geometry import offset_polyline
from compas.geometry import intersection_line_segment_xy
from compas.utilities import linspace, pairwise
from compas_plotters import GeometryPlotter
def intersection_line_polyline(line, polyline):
for segment in pairwise(polyline.points):
x = intersection_line_segment_xy(line, segment)
if x:
return Point(*x)
controlpoints = [Point(0, 0, 0), Point(4, 2.5, 0), Point(6, -2.5, 0), Point(10, 0, 0)]
controlpoly = Polyline(controlpoints)
curve = Bezier(controlpoints)
poly = Polyline(curve.locus())
poly1 = Polyline(offset_polyline(poly, +0.15))
poly2 = Polyline(offset_polyline(poly, -0.15))
points = [curve.point(t) for t in linspace(0, 1, 20)]
tangents = [curve.tangent(t) for t in linspace(0, 1, 20)]
normals = [Vector(0, 0, 1).cross(t) for t in tangents]
lines = [[point, point + normal] for point, normal in zip(points, normals)]
points1 = [intersection_line_polyline(line, poly1) for line in lines[1:-1]]
points2 = [intersection_line_polyline(line, poly2) for line in lines[1:-1]]
# ==============================================================================
# Visualization
# ==============================================================================
plotter = GeometryPlotter(figsize=(16, 9))
plotter.add(controlpoly, linestyle='dotted', linewidth=1.0, color=(0.5, 0.5, 0.5))
for point in controlpoints:
plotter.add(point, edgecolor=(1.0, 0.0, 0.0))
for point in points:
plotter.add(point, size=2)
for point in points1:
plotter.add(point, size=2)
for point in points2:
plotter.add(point, size=2)
plotter.add(poly, color=(0.4, 0.4, 0.4))
plotter.add(poly1, color=(0.0, 0.0, 0.0))
plotter.add(poly2, color=(0.0, 0.0, 0.0))
plotter.zoom_extents()
plotter.show()
| [
"vanmelet@ethz.ch"
] | vanmelet@ethz.ch |
e498ed42df11a9002be442314e0dfb847530b219 | 2aba3c043ce4ef934adce0f65bd589268ec443c5 | /atcoder/Indeed_now_qual_A/B.py | 0b953deb7658d51f31f98245cbd081e2b99bb72c | [] | no_license | kambehmw/algorithm_python | 4f66593b77039d90515d1fcbecacdab8c811b92f | 17222399dcc92fd8f908e5774a9883e2e89c486e | refs/heads/master | 2020-06-02T12:44:11.322356 | 2020-05-18T13:22:05 | 2020-05-18T13:22:05 | 191,157,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | from collections import Counter
N = int(input())
S = [input() for _ in range(N)]
T = "indeednow"
counter = Counter(T)
for s in S:
if len(s) != len(T):
print("NO")
continue
counter2 = Counter(s)
flag = True
for k, v in counter2.items():
if k not in counter:
flag = False
break
if counter[k] != v:
flag = False
break
if flag:
print("YES")
else:
print("NO") | [
"kanbe.hmw@gmail.com"
] | kanbe.hmw@gmail.com |
5198c8f0ff598ce68a93865125ae5b40802f99a2 | 068070cc53bb033f6bf5cf2fe7660e6231ff1ae9 | /command.py | 798097d60fa12ca800a16cc8e4d788a7d2ebb179 | [
"MIT"
] | permissive | Answeror/yacron | afbed6ac6e0ba02a8d2fc4d67b5f7d0e512dfd82 | 45060bbaeffc2f06583b5c38607608e8770f4d51 | refs/heads/master | 2016-09-06T11:43:11.559755 | 2013-09-09T16:04:12 | 2013-09-09T16:04:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,287 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. module:: command
:synopsis: Some command used to build pyqt.
.. moduleauthor:: Answeror <answeror@gmail.com>
These code was borrowed from `https://bitbucket.org/jbmohler/pyhacc/src/b0ad3a0b1e58/setup.py`_.
"""
import os
import distutils
from distutils.core import Command
from cx_Freeze import build
def needsupdate(src, targ):
return not os.path.exists(targ) or os.path.getmtime(src) > os.path.getmtime(targ)
class PySideUiBuild:
def qrc(self, qrc_file, py_file):
import subprocess
import PySide
pyside_path = os.path.dirname(PySide.__file__)
rcc_path = os.path.join(pyside_path, 'pyside-rcc')
rccprocess = subprocess.Popen([rcc_path, qrc_file, '-py3', '-o', py_file])
rccprocess.wait()
def uic(self, ui_file, py_file):
import subprocess
import PySide
pyside_path = os.path.dirname(PySide.__file__)
uic_path = os.path.join(pyqt_path, 'pyside-uic')
rccprocess = subprocess.Popen([uic_path, ui_file, '-o', py_file])
rccprocess.wait()
class PyQt4UiBuild:
def qrc(self, qrc_file, py_file):
import subprocess
import PyQt4
pyqt_path = os.path.dirname(PyQt4.__file__)
pyrcc4_path = os.path.join(pyside_path, 'pyrcc4')
rccprocess = subprocess.Popen([pyrcc4_path, qrc_file, '-py3', '-o', py_file])
rccprocess.wait()
def uic(self, ui_file, py_file):
from PyQt4 import uic
fp = open(py_file, 'w')
uic.compileUi(ui_file, fp)
fp.close()
class QtUiBuild(Command, PySideUiBuild):
description = "build Python modules from Qt Designer .ui files"
user_options = []
ui_files = []
qrc_files = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def compile_ui(self, ui_file, py_file):
if not needsupdate(ui_file, py_file):
return
print("compiling %s -> %s" % (ui_file, py_file))
try:
self.uic(ui_file, py_file)
except Exception as e:
raise distutils.errors.DistutilsExecError('Unable to compile user interface %s' % str(e))
return
def compile_qrc(self, qrc_file, py_file):
if not needsupdate(qrc_file, py_file):
return
print("compiling %s -> %s" % (qrc_file, py_file))
try:
self.qrc(qrc_file, py_file)
except Exception as e:
raise distutils.errors.DistutilsExecError('Unable to compile resource file %s' % str(e))
return
def run(self):
for f in self.ui_files:
dir, basename = os.path.split(f)
self.compile_ui(f, os.path.join(dir, "ui_" + basename.replace(".ui", ".py")))
for f in self.qrc_files:
dir, basename = os.path.split(f)
self.compile_qrc(f, os.path.join(dir, basename.replace(".qrc", "_rc.py")))
QtUiBuild.ui_files = []
QtUiBuild.qrc_files = [os.path.join(dir, f) \
for dir in ['yacron'] \
for f in os.listdir(dir) if f.endswith('.qrc')]
class Build(build):
sub_commands = [('build_ui', None)] + build.sub_commands
cmds = {
'build': Build,
'build_ui': QtUiBuild,
}
| [
"answeror@gmail.com"
] | answeror@gmail.com |
a94afbba47b18f02c8f95fe6db0082ed58da616c | 721f75630501927efb9f4bb9527d9ad5173714ea | /nbviewer/tests/base.py | 7f3458ff48d24a5e5bc7783e1d376db1e6cfc0e7 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | charsmith/nbviewer | 3bf8faa3b0806888cb14125a6e4a1ce414763c46 | f2356c5c82f3c3d95f2201bf1ac6f106cdfd5426 | refs/heads/master | 2021-01-16T20:49:40.496538 | 2018-01-10T20:57:46 | 2018-01-10T20:57:46 | 20,975,398 | 0 | 0 | BSD-3-Clause | 2018-01-10T20:57:46 | 2014-06-18T19:36:05 | Python | UTF-8 | Python | false | false | 2,399 | py | """Base class for nbviewer tests.
Derived from IPython.html notebook test case in 2.0
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import os
import sys
import time
import requests
from contextlib import contextmanager
from subprocess import Popen, PIPE
from unittest import TestCase
from nbviewer.utils import url_path_join
class NBViewerTestCase(TestCase):
"""A base class for tests that need a running nbviewer server."""
port = 12341
@classmethod
def wait_until_alive(cls):
"""Wait for the server to be alive"""
while True:
try:
requests.get(cls.url())
except Exception:
time.sleep(.1)
else:
break
@classmethod
def wait_until_dead(cls):
"""Wait for the server to stop getting requests after shutdown"""
while True:
try:
requests.get(cls.url())
except Exception:
break
else:
time.sleep(.1)
@classmethod
def get_server_cmd(cls):
return [
sys.executable, '-m', 'nbviewer',
'--port=%d' % cls.port,
# '--logging=debug',
]
@classmethod
def setup_class(cls):
server_cmd = cls.get_server_cmd()
devnull = open(os.devnull, 'w')
cls.server = Popen(server_cmd,
stdout=devnull,
stderr=devnull,
)
cls.wait_until_alive()
@classmethod
def teardown_class(cls):
cls.server.terminate()
cls.wait_until_dead()
@classmethod
def url(cls, *parts):
return url_path_join('http://localhost:%i' % cls.port, *parts)
@contextmanager
def assert_http_error(status, msg=None):
try:
yield
except requests.HTTPError as e:
real_status = e.response.status_code
assert real_status == status, \
"Expected status %d, got %d" % (real_status, status)
if msg:
assert msg in str(e), e
else:
assert False, "Expected HTTP error status"
| [
"benjaminrk@gmail.com"
] | benjaminrk@gmail.com |
243492441759892aacad82cd08e364136522c70a | 391a95ef338ac01956346438b597e2d23e13d708 | /libpysat/transform/multiply_vector.py | 895be654990c40dbf5538d356ae45946e758a498 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-public-domain"
] | permissive | wateryi/PyHAT | 34fecf822d673f35009c1d7172757b5bc5223341 | e7977d945ba1e928bf3163ed7992796fb833527d | refs/heads/master | 2020-06-18T20:18:30.090262 | 2020-05-01T01:12:50 | 2020-05-01T01:12:50 | 196,433,305 | 0 | 0 | NOASSERTION | 2019-07-11T16:52:45 | 2019-07-11T16:52:45 | null | UTF-8 | Python | false | false | 505 | py | #this function multiplies all the spectra in a data frame by a vector.
import numpy as np
import pandas as pd
def multiply_vector(df, vectorfile):
df_spectra = df['wvl']
# TODO: check to make sure wavelengths match before multiplying
vector = np.array(pd.read_csv(vectorfile, sep=',', header=None))[:, 1]
if df_spectra.shape[1] == vector.shape[0]:
df['wvl'] = df_spectra.multiply(vector, axis=1)
else:
print('Vector is not the same size as the spectra!')
return df | [
"rbanderson@usgs.gov"
] | rbanderson@usgs.gov |
af808a5caa79930f8276ae03bb82eb0cf792acb0 | 5b1ff6054c4f60e4ae7315db9f20a334bc0b7634 | /Push2/observable_property_alias.py | 2e3d81d968f12d6a54dbfadafc7979283138bf09 | [] | no_license | maratbakirov/AbletonLive9_RemoteScripts | 2869122174634c75405a965401aa97a2dae924a1 | 4a1517c206353409542e8276ebab7f36f9bbd4ef | refs/heads/master | 2021-06-05T14:38:27.959025 | 2021-05-09T11:42:10 | 2021-05-09T11:42:10 | 13,348,327 | 3 | 4 | null | 2016-10-16T13:51:11 | 2013-10-05T16:27:04 | Python | UTF-8 | Python | false | false | 1,448 | py | #Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/Push2/observable_property_alias.py
from __future__ import absolute_import, print_function
from ableton.v2.base import SlotManager, Slot
class ObservablePropertyAlias(SlotManager):
def __init__(self, alias_host, property_host = None, property_name = '', alias_name = None, getter = None, *a, **k):
super(ObservablePropertyAlias, self).__init__(*a, **k)
self._alias_host = alias_host
self._alias_name = alias_name or property_name
self._property_host = property_host
self._property_name = property_name
self._property_slot = None
self._setup_alias(getter)
def _get_property_host(self):
return self._property_host
def _set_property_host(self, host):
self._property_host = host
self._property_slot.subject = host
property_host = property(_get_property_host, _set_property_host)
def _setup_alias(self, getter):
aliased_prop = property(getter or self._get_property)
setattr(self._alias_host.__class__, self._alias_name, aliased_prop)
notifier = getattr(self._alias_host, 'notify_' + self._alias_name)
self._property_slot = self.register_slot(Slot(self.property_host, notifier, self._property_name))
def _get_property(self, _):
return getattr(self.property_host, self._property_name, None) | [
"julien@julienbayle.net"
] | julien@julienbayle.net |
dc024a4ba381f04fe26481171fa4aed643ffad8b | ee86ad4b38f6ba13f195246f14224ba781f933cc | /09_start/비트연산예제1.py | 52bacb459e2633987a88ed983f9c64c703ebd1e8 | [] | no_license | yejikk/Algorithm | aed7adf00c1e32d21b735b3b34dc6cb75049f164 | 531f43305b3a23c824c9e153151b7280c1dc2535 | refs/heads/master | 2020-04-17T06:17:28.961656 | 2019-11-16T08:02:49 | 2019-11-16T08:02:49 | 166,318,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | def Bbit_print(i):
output = ''
for j in range(7, -1, -1):
if i & (1 << j):
output += '1'
else:
output += '0'
# output += '1' if i & (1 << j) else '0'
print(output)
for i in range(-5, 6):
print('{} = '.format(i), end='')
Bbit_print(i) | [
"dpwl7484@gmail.com"
] | dpwl7484@gmail.com |
9672d446fc6d70d32c8ae69520ee2a435e1d1943 | 4e5b20fdcca20f458322f0a8cd11bbdacb6fb3e5 | /suning/api/selfmarket/OrderReturnAddRequest.py | d55710e0b5f5c80e89f665c34fb0b6e8f10f5a29 | [] | no_license | shijingyu/sunningAPI | 241f33b0660dc84635ce39688fed499f5c57a5da | 4a3b2ef7f9bdc4707d1eaff185bc7eb636fe90d5 | refs/heads/master | 2020-04-24T22:15:11.584028 | 2019-02-24T06:41:20 | 2019-02-24T06:41:20 | 172,305,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,388 | py | # -*- coding: utf-8 -*-
'''
Created on 2015-12-28
@author: suning
'''
from suning.api.abstract import AbstractApi
class OrderReturnAddRequest(AbstractApi):
'''
'''
def __init__(self):
AbstractApi.__init__(self)
self.outOrderId = None
self.oldOrderId = None
self.orderSource = None
self.expectStartTime = None
self.expectEndTime = None
self.remark = None
self.senderZipCode = None
self.senderProvince = None
self.senderCity = None
self.senderArea = None
self.senderTown = None
self.senderAddress = None
self.senderName = None
self.senderMobile = None
self.senderPhone = None
self.takeFlag = None
self.orderFlag = None
self.orderProductList = None
self.setParamRule({
'outOrderId':{'allow_empty':False},
'oldOrderId':{'allow_empty':False},
'orderSource':{'allow_empty':False},
'expectStartTime':{'allow_empty':False},
'expectEndTime':{'allow_empty':False},
'senderProvince':{'allow_empty':False},
'senderCity':{'allow_empty':False},
'senderName':{'allow_empty':False},
})
def getApiBizName(self):
return 'addOrderReturn'
def getApiMethod(self):
return 'suning.fourps.orderreturn.add'
| [
"945090896@qq.com"
] | 945090896@qq.com |
782c942b2fc75fcf6b9a2197fdadadf5d66b9070 | 378d848acbcb495c164af9766c80ea8817d4b510 | /字符串/重排回文.py | 2b1e9b0545ff3a5c1808a080385f2da35947fe3a | [] | no_license | youkaede77/Data-Structure-and-Algorithm | fa0fb70e6016f8d0290310d356a8b1830867eed6 | 068fa82f29dc8ad41f00055cc76aeb6b8c24410d | refs/heads/master | 2022-04-09T06:52:18.448641 | 2020-02-24T16:15:47 | 2020-02-24T16:15:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | # 对一个字符串进行重排,是否存在回文
# 如:aabccdd —— acdbdca 是
# 如: abc 不是
# 思路:
# 如果s长度为偶数,则每个字母必须出现偶数次
# 如果长度为奇数,只有一个字母出现奇数次
from collections import Counter
def is_permucate_palindrome(s):
n = len(s)
count = Counter(s)
odd = 0
for c in count.values():
if n%2 and odd >=2: # 奇数
return False
if n%2 == 0: # 偶数
if odd:
return False
if c % 2:
odd += 1
return True
print(is_permucate_palindrome('abcac')) | [
"weidafeng.edu@gmail.com"
] | weidafeng.edu@gmail.com |
f8bcbca4ba2bd7e3422b4fb0acf48f8fde4128ae | 5730110af5e4f0abe538ed7825ddd62c79bc3704 | /pacu/pacu/core/svc/vstim/clock/tcp/__init__.py | e5209a96fc0aebe9b6cdcd813c6e40f09bc677ce | [] | no_license | jzeitoun/pacu-v2 | bdbb81def96a2d87171ca20b89c878b2f66975e7 | 0ccb254a658263b4fe8c80ea623f860cb7dc1428 | refs/heads/master | 2021-06-03T18:50:50.890399 | 2020-04-27T16:31:59 | 2020-04-27T16:31:59 | 110,889,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | from pacu.core.svc.vstim.clock.tcp.oneway import OnewayTCPClock
from pacu.core.svc.vstim.clock.tcp.twoway import TwowayTCPClock
| [
"jzeitoun@uci.edu"
] | jzeitoun@uci.edu |
2ef0ef2379ebf4494a924278b069c3642cd630f3 | de428c011b56db862f05ec0ceab17b85f83f94b1 | /pythongame/game_data/abilities/ability_fireball.py | 34724c8299dfd5c68821f72061ffdb217bf7c69f | [] | no_license | risooonho/python-2d-game | c6d1fceaf09c72a6f7573230a4a899bf79164b7f | 24b02646ed56f9017069b243b774e0ee46951aea | refs/heads/master | 2021-05-17T06:02:13.538699 | 2020-02-15T23:59:54 | 2020-02-15T23:59:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,794 | py | import random
from pythongame.core.ability_effects import register_ability_effect, AbilityWasUsedSuccessfully, AbilityResult
from pythongame.core.buff_effects import get_buff_effect, AbstractBuffEffect, register_buff_effect
from pythongame.core.common import Sprite, ProjectileType, AbilityType, Millis, \
Direction, SoundId, BuffType, PeriodicTimer, HeroUpgradeId
from pythongame.core.damage_interactions import deal_player_damage_to_enemy, DamageType
from pythongame.core.game_data import register_ability_data, AbilityData, UiIconSprite, \
register_ui_icon_sprite_path, register_entity_sprite_map, ABILITIES
from pythongame.core.game_state import GameState, WorldEntity, Projectile, NonPlayerCharacter
from pythongame.core.hero_upgrades import register_hero_upgrade_effect
from pythongame.core.math import get_position_from_center_position, translate_in_direction
from pythongame.core.projectile_controllers import create_projectile_controller, AbstractProjectileController, \
register_projectile_controller
from pythongame.core.sound_player import play_sound
from pythongame.core.view.image_loading import SpriteSheet
from pythongame.core.visual_effects import VisualCircle, VisualParticleSystem
# Note: Projectile size must be smaller than hero entity size (otherwise you get a collision when shooting next to wall)
FIREBALL_MANA_COST = 4
FIREBALL_UPGRADED_MANA_COST = 3
PROJECTILE_SIZE = (28, 28)
MIN_DMG = 3
MAX_DMG = 4
FIREBALL_TALENT_BURN_DURATION = Millis(2500)
FIREBALL_TALENT_BURN_INTERVAL = Millis(500)
FIREBALL_TALENT_BURN_TOTAL_DAMAGE = int(round(FIREBALL_TALENT_BURN_DURATION / FIREBALL_TALENT_BURN_INTERVAL))
BUFF_TYPE = BuffType.BURNT_BY_FIREBALL
def _create_visual_splash(effect_position, game_state):
game_state.visual_effects.append(
VisualCircle((250, 100, 50), effect_position, 22, 45, Millis(100), 0))
particle_colors = [(250, 100, 100),
(250, 50, 100),
(250, 100, 50)]
particle_system = VisualParticleSystem(
num_particles=10,
position=effect_position,
colors=particle_colors,
alpha=100,
duration_interval=(Millis(50), Millis(200)))
game_state.visual_effects.append(particle_system)
class ProjectileController(AbstractProjectileController):
def __init__(self):
super().__init__(1500)
def apply_enemy_collision(self, npc: NonPlayerCharacter, game_state: GameState, projectile: Projectile):
damage_amount: float = MIN_DMG + random.random() * (MAX_DMG - MIN_DMG)
deal_player_damage_to_enemy(game_state, npc, damage_amount, DamageType.MAGIC)
_create_visual_splash(npc.world_entity.get_center_position(), game_state)
has_burn_upgrade = game_state.player_state.has_upgrade(HeroUpgradeId.ABILITY_FIREBALL_BURN)
if has_burn_upgrade:
npc.gain_buff_effect(get_buff_effect(BUFF_TYPE), FIREBALL_TALENT_BURN_DURATION)
play_sound(SoundId.ABILITY_FIREBALL_HIT)
projectile.has_collided_and_should_be_removed = True
def apply_wall_collision(self, game_state: GameState, projectile: Projectile):
_create_visual_splash(projectile.world_entity.get_center_position(), game_state)
play_sound(SoundId.ABILITY_FIREBALL_HIT)
projectile.has_collided_and_should_be_removed = True
class BurntByFireball(AbstractBuffEffect):
def __init__(self):
self.timer = PeriodicTimer(FIREBALL_TALENT_BURN_INTERVAL)
def apply_middle_effect(self, game_state: GameState, buffed_entity: WorldEntity, buffed_npc: NonPlayerCharacter,
time_passed: Millis):
if self.timer.update_and_check_if_ready(time_passed):
deal_player_damage_to_enemy(game_state, buffed_npc, 1, DamageType.MAGIC)
game_state.visual_effects.append(
VisualCircle((180, 50, 50), buffed_npc.world_entity.get_center_position(), 10, 20, Millis(50), 0,
buffed_entity))
def get_buff_type(self):
return BUFF_TYPE
def _apply_ability(game_state: GameState) -> AbilityResult:
player_entity = game_state.player_entity
distance_from_player = 35
projectile_pos = translate_in_direction(
get_position_from_center_position(player_entity.get_center_position(), PROJECTILE_SIZE),
player_entity.direction,
distance_from_player)
projectile_speed = 0.3
entity = WorldEntity(projectile_pos, PROJECTILE_SIZE, Sprite.PROJECTILE_PLAYER_FIREBALL, player_entity.direction,
projectile_speed)
projectile = Projectile(entity, create_projectile_controller(ProjectileType.PLAYER_FIREBALL))
game_state.projectile_entities.append(projectile)
effect_position = (projectile_pos[0] + PROJECTILE_SIZE[0] // 2,
projectile_pos[1] + PROJECTILE_SIZE[1] // 2)
game_state.visual_effects.append(VisualCircle((250, 150, 50), effect_position, 15, 5, Millis(300), 0))
has_lightfooted_upgrade = game_state.player_state.has_upgrade(HeroUpgradeId.MAGE_LIGHT_FOOTED)
if not has_lightfooted_upgrade:
game_state.player_state.gain_buff_effect(get_buff_effect(BuffType.RECOVERING_AFTER_ABILITY), Millis(300))
return AbilityWasUsedSuccessfully()
def _upgrade_fireball_mana_cost(_game_state: GameState):
ABILITIES[AbilityType.FIREBALL].mana_cost = FIREBALL_UPGRADED_MANA_COST
def register_fireball_ability():
register_ability_effect(AbilityType.FIREBALL, _apply_ability)
description = "Shoot a fireball, dealing " + str(MIN_DMG) + "-" + str(MAX_DMG) + \
" magic damage to the first enemy that it hits."
ability_data = AbilityData("Fireball", UiIconSprite.ABILITY_FIREBALL, FIREBALL_MANA_COST, Millis(500), description,
SoundId.ABILITY_FIREBALL)
register_ability_data(AbilityType.FIREBALL, ability_data)
register_ui_icon_sprite_path(UiIconSprite.ABILITY_FIREBALL, "resources/graphics/icon_fireball.png")
register_projectile_controller(ProjectileType.PLAYER_FIREBALL, ProjectileController)
sprite_sheet = SpriteSheet("resources/graphics/projectile_player_fireball.png")
original_sprite_size = (64, 64)
indices_by_dir = {
Direction.LEFT: [(x, 0) for x in range(8)],
Direction.UP: [(x, 2) for x in range(8)],
Direction.RIGHT: [(x, 4) for x in range(8)],
Direction.DOWN: [(x, 6) for x in range(8)]
}
scaled_sprite_size = (48, 48)
register_entity_sprite_map(Sprite.PROJECTILE_PLAYER_FIREBALL, sprite_sheet, original_sprite_size,
scaled_sprite_size, indices_by_dir, (-9, -9))
register_buff_effect(BUFF_TYPE, BurntByFireball)
register_hero_upgrade_effect(HeroUpgradeId.ABILITY_FIREBALL_MANA_COST, _upgrade_fireball_mana_cost)
| [
"jonte.murray@gmail.com"
] | jonte.murray@gmail.com |
82530bb5148f663c6037aa9b33d89c402bc72c63 | ee66a0e854e196300f7d88bdb2bb2e88530a2571 | /code/data.py | dff0715c34d181d708993ec98ff4082054978bc0 | [
"MIT"
] | permissive | RuthAngus/kinematics-and-rotation | 343532d70ba79466f559a9abea7c525ce2d28b47 | 7cad283612bc70ca9d12c79978561b938f527198 | refs/heads/master | 2022-07-31T20:14:37.090658 | 2020-05-19T12:16:50 | 2020-05-19T12:16:50 | 187,388,579 | 2 | 2 | MIT | 2020-04-23T15:10:45 | 2019-05-18T17:47:54 | Jupyter Notebook | UTF-8 | Python | false | false | 6,758 | py | # Converting Exploring data into a script.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from astropy.io import fits
import astropy.utils as au
from astropy.coordinates import SkyCoord
from dustmaps.bayestar import BayestarQuery
import astropy.units as units
from tools import getDust
from stardate.lhf import age_model
from calc_velocities import calc_vb, calc_vz, calc_vl
import astropy.units as u
from astropy.coordinates import ICRS
from astropy.coordinates import Galactic
from astropy.table import Table
from pyia import GaiaData
import astropy.coordinates as coord
from photometric_teff import bprp_to_teff
plotpar = {'axes.labelsize': 30,
'font.size': 30,
'legend.fontsize': 15,
'xtick.labelsize': 30,
'ytick.labelsize': 30,
'text.usetex': True}
plt.rcParams.update(plotpar)
print("Load McQuillan data")
mc = pd.read_csv("../Table_1_Periodic.txt")
print("Loading Gaia catalog")
with fits.open("../kepler_dr2_1arcsec.fits") as data:
gaia = pd.DataFrame(data[1].data, dtype="float64")
gaia_mc = pd.merge(mc, gaia, on="kepid", how="left")
print(len(gaia_mc), "stars")
# S/N cuts
sn = gaia_mc.parallax.values/gaia_mc.parallax_error.values
m = (sn > 10)
m &= (gaia_mc.parallax.values > 0) * np.isfinite(gaia_mc.parallax.values)
m &= gaia_mc.astrometric_excess_noise.values < 5
print(len(gaia_mc.iloc[m]), "stars after S/N cuts")
# Jason's wide binary cuts
# m &= gaia_mc.astrometric_excess_noise.values > 0
# m &= gaia_mc.astrometric_excess_noise_sig.values > 6
# Jason's short-period binary cuts
# m &= radial_velocity_error < 4
# print(len(gaia_mc.iloc[m]), "stars after Jason's binary cuts")
# assert 0
gaia_mc = gaia_mc.iloc[m]
print("Loading Dustmaps")
bayestar = BayestarQuery(max_samples=2, version='bayestar2019')
print("Calculating Ebv")
coords = SkyCoord(gaia_mc.ra.values*units.deg, gaia_mc.dec.values*units.deg,
distance=gaia_mc.r_est.values*units.pc)
ebv, flags = bayestar(coords, mode='percentile', pct=[16., 50., 84.],
return_flags=True)
# Calculate Av
Av_bayestar = 2.742 * ebv
print(np.shape(Av_bayestar), "shape")
Av = Av_bayestar[:, 1]
Av_errm = Av - Av_bayestar[:, 0]
Av_errp = Av_bayestar[:, 2] - Av
Av_std = .5*(Av_errm + Av_errp)
# Catch places where the extinction uncertainty is zero and default to an
# uncertainty of .05
m = Av_std == 0
Av_std[m] = .05
gaia_mc["ebv"] = ebv[:, 1] # The median ebv value.
gaia_mc["Av"] = Av
gaia_mc["Av_errp"] = Av_errp
gaia_mc["Av_errm"] = Av_errm
gaia_mc["Av_std"] = Av_std
# Calculate dereddened photometry
AG, Abp, Arp = getDust(gaia_mc.phot_g_mean_mag.values,
gaia_mc.phot_bp_mean_mag.values,
gaia_mc.phot_rp_mean_mag.values, gaia_mc.ebv.values)
gaia_mc["bp_dered"] = gaia_mc.phot_bp_mean_mag.values - Abp
gaia_mc["rp_dered"] = gaia_mc.phot_rp_mean_mag.values - Arp
gaia_mc["bprp_dered"] = gaia_mc["bp_dered"] - gaia_mc["rp_dered"]
gaia_mc["G_dered"] = gaia_mc.phot_g_mean_mag.values - AG
# Calculate Absolute magntitude
def mM(m, D):
return 5 - 5*np.log10(D) + m
abs_G = mM(gaia_mc.G_dered.values, gaia_mc.r_est)
gaia_mc["abs_G"] = abs_G
# Remove NaNs
m2 = np.isfinite(gaia_mc.abs_G.values)
gaia_mc = gaia_mc.iloc[m2]
# Remove binaries
x = gaia_mc.bp_dered - gaia_mc.rp_dered
y = gaia_mc.abs_G
AT = np.vstack((x**6, x**5, x**4, x**3, x**2, x, np.ones_like(x)))
ATA = np.dot(AT, AT.T)
w = np.linalg.solve(ATA, np.dot(AT, y))
minb, maxb, extra = 0, 2.2, .27
xs = np.linspace(minb, maxb, 1000)
subcut = 4.
m = (minb < x) * (x < maxb)
m &= (y < np.polyval(w, x) - extra) + (subcut > y)
flag = np.zeros(len(gaia_mc))
flag[~m] = np.ones(len(flag[~m]))
gaia_mc["flag"] = flag
test = gaia_mc.iloc[gaia_mc.flag.values == 1]
plt.plot(gaia_mc.bp_dered - gaia_mc.rp_dered, gaia_mc.abs_G, ".", alpha=.1)
plt.plot(test.bp_dered - test.rp_dered, test.abs_G, ".", alpha=.1)
plt.ylim(10, 1)
plt.savefig("test")
# Calculate photometric Teff
teffs = bprp_to_teff(gaia_mc.bp_dered - gaia_mc.rp_dered)
gaia_mc["color_teffs"] = teffs
print("Calculating gyro ages")
logages = []
for i, p in enumerate(gaia_mc.Prot.values):
logages.append(age_model(np.log10(p), gaia_mc.phot_bp_mean_mag.values[i] -
gaia_mc.phot_rp_mean_mag.values[i]))
gaia_mc["log_age"] = np.array(logages)
gaia_mc["age"] = (10**np.array(logages))*1e-9
plt.figure(figsize=(16, 9), dpi=200)
singles = gaia_mc.flag.values == 1
plt.scatter(gaia_mc.bprp_dered.values[singles], gaia_mc.abs_G.values[singles],
c=gaia_mc.age.values[singles], vmin=0, vmax=5, s=50, alpha=.2,
cmap="viridis", rasterized=True, edgecolor="none")
plt.xlabel("$\mathrm{G_{BP}-G_{RP}~[dex]}$")
plt.ylabel("$\mathrm{G~[dex]}$")
plt.colorbar(label="$\mathrm{Gyrochronal~age~[Gyr]}$")
plt.ylim(11, 5.5)
plt.xlim(.8, 2.7);
plt.savefig("age_gradient.pdf")
print("Calculating vb")
pmb_samples, vb_samples = calc_vb(gaia_mc)
pmb, vb = np.median(pmb_samples, axis=1), np.median(vb_samples, axis=1)
pmb_err, vb_err = np.std(pmb_samples, axis=1), np.std(vb_samples, axis=1)
vb_errp = np.percentile(vb_samples, 84, axis=1) - vb
vb_errm = vb - np.percentile(vb_samples, 16, axis=1)
gaia_mc["vb"] = vb
gaia_mc["vb_err"] = vb_err
# print("Calculating vl")
# vl_samples = calc_vl(gaia_mc)
# vl, vl_err = np.median(vl_samples, axis=1), np.std(vl_samples, axis=1)
# vl_errp = np.percentile(vl_samples, 84, axis=1) - vl
# vl_errm = vl - np.percentile(vl_samples, 16, axis=1)
# gaia_mc["vl"] = vl
# gaia_mc["vl_err"] = vl_err
# Calculate b
icrs = ICRS(ra=gaia_mc.ra.values*u.degree,
dec=gaia_mc.dec.values*u.degree)
lb = icrs.transform_to(Galactic)
b = lb.b*u.degree
l = lb.l*u.degree
gaia_mc["b"] = b.value
gaia_mc["l"] = l.value
print("Calculating VZ")
mrv = gaia_mc.radial_velocity.values != 0.00
vz, vz_err = calc_vz(gaia_mc)
vz[~mrv] = np.ones(len(vz[~mrv]))*np.nan
vz_err[~mrv] = np.ones(len(vz_err[~mrv]))*np.nan
gaia_mc["vz"] = vz
gaia_mc["vz_err"] = vz_err
# Calculate v_ra and v_dec
d = gaia_mc.r_est.values*u.pc
vra = (gaia_mc.pmra.values*u.mas/u.yr * d).to(u.km/u.s,
u.dimensionless_angles())
vdec = (gaia_mc.pmdec.values*u.mas/u.yr * d).to(u.km/u.s,
u.dimensionless_angles())
c = coord.SkyCoord(ra=gaia_mc.ra.values*u.deg, dec=gaia_mc.dec.values*u.deg,
distance=d, pm_ra_cosdec=gaia_mc.pmra.values*u.mas/u.yr,
pm_dec=gaia_mc.pmdec.values*u.mas/u.yr)
gal = c.galactic
v_b = (gal.pm_b * gal.distance).to(u.km/u.s, u.dimensionless_angles())
gaia_mc["v_ra"] = vra.value
gaia_mc["v_dec"] = vdec.value
gaia_mc["v_b"] = v_b
print("Saving file")
gaia_mc.to_csv("gaia_mc5.csv")
| [
"ruth.angus@astro.ox.ac.uk"
] | ruth.angus@astro.ox.ac.uk |
1ad6a6fe4e509cff53c845770777678f0b1d73f9 | 25b4fc4a54faf0f4217f3661477fa8f26cd60164 | /Orders/migrations/0004_rename_city_order_town_city.py | f8f82d71f368704c408a0e1eeaf7fb9050510edf | [] | no_license | AshtiNematian/Book_Store_Nematian_ | 6f601f69f0a25522ac351e4ad963f17011254289 | b83ea7319dbead2be5812e2d001c58e7d906fff9 | refs/heads/master | 2023-07-21T03:56:48.386869 | 2021-09-03T17:03:17 | 2021-09-03T17:04:24 | 402,333,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | # Generated by Django 3.2.6 on 2021-08-26 09:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Orders', '0003_auto_20210826_1349'),
]
operations = [
migrations.RenameField(
model_name='order',
old_name='city',
new_name='town_city',
),
]
| [
"you@example.com"
] | you@example.com |
404fb6aeb98af6e4b45d47570ca4d57c33888f8c | acbfcd794191dc4919c6ab18a41dab128c9546ad | /setup.py | 122029909f03ba261a77e0373140abbf19ae7cc5 | [
"MIT"
] | permissive | nigma/django-request-id | 9b9562a7658d842557416c1cdf51c8dd6f5fa2ea | 0850e04e91b616b9aa8443959edf4a73a62289e4 | refs/heads/master | 2021-05-24T04:21:32.339021 | 2016-10-23T15:27:48 | 2016-10-23T15:27:48 | 16,383,094 | 28 | 13 | MIT | 2021-01-11T19:42:34 | 2014-01-30T16:36:59 | Python | UTF-8 | Python | false | false | 1,854 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = "1.0.0"
if sys.argv[-1] == "publish":
os.system("python setup.py sdist bdist_wheel upload")
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
readme = open("README.rst").read()
history = open("HISTORY.rst").read().replace(".. :changelog:", "")
setup(
name="django-request-id",
version=version,
description="""Augment each request with unique id for logging purposes""",
license="MIT",
author="Filip Wasilewski",
author_email="en@ig.ma",
url="https://github.com/nigma/django-request-id",
long_description=readme + "\n\n" + history,
packages=[
"request_id",
],
include_package_data=True,
install_requires=[
"django",
],
zip_safe=True,
keywords="django request-id",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Django :: 1.8",
"Framework :: Django :: 1.9",
"Framework :: Django :: 1.10",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries :: Python Modules"
],
)
| [
"en@ig.ma"
] | en@ig.ma |
886ec42f03ae959f306ac16499444fbbe5fbec73 | 270363be5ea94d33469fe4271eccb343357d4fa6 | /linalg/optim/conjugate_gradient.py | 49f095c2ebeba493d88fd167c9316edb9ddffc69 | [] | no_license | tkelestemur/learn-linalg | c487389e9802b0223232bcb8c9ec0003cc7df091 | a6e04e903e5c9e00801b56a228c56fd8b8ba8c71 | refs/heads/master | 2023-03-19T05:53:34.407780 | 2021-01-02T13:54:40 | 2021-01-02T14:26:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 929 | py | import numpy as np
from .base import IterativeSolver
class ConjugateGradient(IterativeSolver):
"""A conjugate gradient (CG) solver.
Unlike gradient descent, CG is guaranteed to converge in at most `n` steps.
"""
def __init__(self, max_iters, tol=1e-6):
self.max_iters = max_iters
self.tol = tol
def _solve(self, A, b):
n = A.shape[0]
x = np.random.randn(n) # Randomly initialize an estimate of x.
r = b - A @ x
v = np.array(r, copy=True)
beta = 0
for i in range(self.max_iters):
v = r + beta*v # Search direction.
alpha = r@r / (v.T @ A @ v) # Line search.
x = x + alpha*v # Update estimate.
r_old = r # Save the old residual.
r = r - alpha*A@v # Update the residual.
if (r@r) < self.tol*(r_old@r_old):
print("Converged in {} iterations.".format(i))
break
beta = (r@r) / (r_old@r_old) # Direction step.
return x
| [
"kevinarmandzakka@gmail.com"
] | kevinarmandzakka@gmail.com |
bf34a1e3f50cd8ba8fade62d4a408ed0f8953a02 | 36dfe29989de2005f5ad8f61b7575e16bc7f46ed | /Blue/BFS/JumpingNumber.py | 5a79cc610740b1fee81ec5e97ffb8142d69c7881 | [] | no_license | chrislevn/Coding-Challenges | ca17e26476a721ca855fef6e2e1587148758dde5 | dee52baaf5d15046a901b174e8b319deaa1c6107 | refs/heads/master | 2022-03-26T00:05:09.817752 | 2022-02-03T17:37:53 | 2022-02-03T17:37:53 | 242,436,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,786 | py | # Check if the value is a jumping number or not
def checkJumping(value):
#Split number into a list of string
splitNumber = list(str(value))
# Take first and second values and convert them into integer
firstValue = int(splitNumber[0])
secondValue = int(splitNumber[1])
# Calculate difference between the second and the first value
diff = secondValue - firstValue
# Based on the trend of the first 2 values, define whether the trend is increasing or not
if diff > 0:
increasing = True
elif diff == 0:
return False
else:
increasing = False
# Define the trend from the second value
if increasing:
# Reversed loop
for i in range(len(splitNumber) - 1, 1, -1):
diff = int(splitNumber[i]) - int(splitNumber[i-1])
# if diff < 0, mean the trend is different. Therefore, the value is a jumping number
if diff < 0:
return True
else:
# Reversed loop
for i in range(len(splitNumber) - 1, 1, -1):
diff = int(splitNumber[i]) - int(splitNumber[i-1])
# if diff > 0, mean the trend is different. Therefore, the value is a jumping number
if diff > 0:
return True
return False
count = 100
# create a realCounter variable to check if the sequence has total 500 numbers
realCounter = 0
# Result value
total = 0
# Create a sample test array to check if the sequence is a list of jumpy numbers or not
# testArr = []
# Run the loop from 100, the loop stops when the sequence has total 500 numbers.
while realCounter < 500:
if checkJumping(count):
# testArr.append(count)
realCounter += 1
total += count
count += 1
# print(testArr)
print(total) | [
"locvicvn1234@gmail.com"
] | locvicvn1234@gmail.com |
03c0b569271add7aed620e1f3b32dea58117f35c | ed15e441d4cd7a54d989610b8070a5d14bfda4c8 | /1805/python高级/5/8-重写.py | e395db85cdba412a97661b2bab7bab65a2cf3a0a | [] | no_license | jmh9876/p1804_jmh | 24593af521749913b65685e21ffc37281c43998f | a52a6366c21ad7598e71d8e82aeee746ecee7c6b | refs/heads/master | 2020-03-15T23:30:02.769818 | 2018-08-02T09:10:20 | 2018-08-02T09:10:20 | 132,395,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | class Animal():
def wark(self):
print("叫")
class Dog(Animal):
def wark(self):
print("汪汪叫")
class xiaotianquan(Dog):
def wark(self):
print("狂叫")
d = Dog()
d.wark()
xtq = xiaotianquan()
xtq.wark()
| [
"2210744940@qq.com"
] | 2210744940@qq.com |
82efc495c463906297ad2423d973ea7de9e5139b | 8c861caac797ecc24d9e907989af3d8b4493a2b4 | /gwt_pt/oanda/order/get.py | 4851450cd58e7821403824d52b0a59a2c9fa6020 | [] | no_license | eggyolktech/gwtPT | b607242ce5bbc13854f05eff69bf98ca6de2d218 | 23d60e54966fcd2ba85a1038ff807921d6b70028 | refs/heads/master | 2021-09-14T21:03:00.347549 | 2018-05-19T16:12:43 | 2018-05-19T16:12:43 | 112,349,960 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,061 | py | #!/usr/bin/env python
import argparse
import gwt_pt.oanda.common.config
import gwt_pt.oanda.common.view
def main():
"""
Get the details of an Order in an Account
"""
parser = argparse.ArgumentParser()
#
# Add the command line argument to parse to the v20 config
#
gwt_pt.oanda.common.config.add_argument(parser)
parser.add_argument(
"orderid",
help=(
"The ID of the Order to get. If prepended "
"with an '@', this will be interpreted as a client Order ID"
)
)
args = parser.parse_args()
#
# Create the api context based on the contents of the
# v20 config file
#
api = args.config.create_context()
#
# Submit the request to create the Market Order
#
response = api.order.get(
args.config.active_account,
args.orderid
)
print("Response: {} ({})".format(response.status, response.reason))
print("")
order = response.get("order", 200)
print(order)
if __name__ == "__main__":
main()
| [
"eggyolktech@gmail.com"
] | eggyolktech@gmail.com |
a078ceb80fb08d947461e11dc935026a821823d3 | 8881a4927d893e1e755c0488f76ba7941b379f26 | /just_django/dashboard/notepad/views.py | 645436b0609ecc27be5307948ddc99273f019660 | [] | no_license | SatishNitk/Django | 6bb839fcf2bc7d70413e3d56ac98124a7a96a5de | d9260c032322a34410d783c39a8f13e8f63b8be4 | refs/heads/master | 2020-05-24T23:01:35.767388 | 2019-07-06T13:56:50 | 2019-07-06T13:56:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,229 | py | from django.shortcuts import render,redirect, get_object_or_404
from notepad.models import *
from notepad.forms import *
def create_view(request):
form = NoteModelForm(request.POST or None, request.FILES or None)
if form.is_valid():
form.instance.user = request.user
form.save()
return redirect('/')
context = {
'form':form
}
return render(request,"notepad/create.html",context)
def list_view(request):
notes = Note.objects.all()
context = {
'object_list' : notes
}
return render(request,"notepad/list.html",context)
def delete_view(request, note_id):
item_to_delete = Note.objects.filter(pk=note_id)
if item_to_delete.exists():
if request.user == item_to_delete[0].user: # check for user is the owner for this note to delete
item_to_delete[0].delete()
return redirect("/notes/list")
def update_view(request, note_id):
unique_note = get_object_or_404(Note,id=note_id)
form = NoteModelForm(request.POST or None, request.FILES or None, instance=unique_note) # due to instance form will be pre populated with data
if form.is_valid():
form.instance.user = request.user
form.save()
return redirect('/')
context = {
'form':form
}
return render(request,"notepad/create.html",context)
| [
"satishkrgu95@gmail.com"
] | satishkrgu95@gmail.com |
5134d00deabcf5b4c674b1a99748d87fc878a810 | 4918c2450e4f5c74246257e6f77e4443db8594d7 | /src/alchemtest/tests/test_namd.py | 2249eb1cc5fa485d2e96ccd25518334dca809fa5 | [
"BSD-3-Clause"
] | permissive | ttjoseph/alchemtest-broken | 58ab5470cdaf112911a7b08048056a71597147a9 | d106cafcf02570d170f4ac78a11fb07517b5de11 | refs/heads/master | 2023-07-05T19:20:47.586928 | 2021-08-01T22:10:18 | 2021-08-01T22:10:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | '''Tests for all the NAMD datasets'''
import pytest
from alchemtest.namd import load_tyr2ala, load_idws
from . import BaseDatasetTest
class TestNAMD(BaseDatasetTest):
@pytest.fixture(scope="class",
params = [(load_tyr2ala, ('forward', 'backward'), (1, 1)),
(load_idws, ('forward', ), (2,)),
])
def dataset(self, request):
return super(TestNAMD, self).dataset(request)
| [
"orbeckst@gmail.com"
] | orbeckst@gmail.com |
ca667e57f2843a81d8bdb44aa0f217287f0f2a5a | 212ea42ae6425e4a5e4563167d391e8ffe7e090b | /click_app_template_repository_demo/cli.py | 1bdd73daaec7a8a7c7223b2fbfb8613e4ad30338 | [
"Apache-2.0"
] | permissive | simonw/click-app-template-repository-demo-prev | 80563221203a7c6a909be50d5bdc9952dd927700 | 058f5b6fb2e4dee1ab80f7479b390c84c8deb984 | refs/heads/main | 2023-07-05T06:58:28.109577 | 2021-08-30T01:06:11 | 2021-08-30T01:06:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | import click
@click.group()
@click.version_option()
def cli():
"Demo of simonw/click-app-template-repository"
@cli.command(name="command")
@click.argument(
"example"
)
@click.option(
"-o",
"--option",
help="An example option",
)
def first_command(example, option):
"Command description goes here"
click.echo("Here is some output")
| [
"actions@github.com"
] | actions@github.com |
b6f5ac176c14ef3851f5eb729f9346af4e42b399 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/173a9e3eb37826e2524f5dd66aab24fcf203068b-<_get_outer_edges>-bug.py | 6b77e21432d3ffaf2f38b31d8a9f3d2dbeb18926 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | def _get_outer_edges(a, range):
'\n Determine the outer bin edges to use, from either the data or the range\n argument\n '
if (range is not None):
(first_edge, last_edge) = range
elif (a.size == 0):
(first_edge, last_edge) = (0, 1)
else:
(first_edge, last_edge) = (a.min(), a.max())
if (first_edge > last_edge):
raise ValueError('max must be larger than min in range parameter.')
if (not (np.isfinite(first_edge) and np.isfinite(last_edge))):
raise ValueError('range parameter must be finite.')
if (first_edge == last_edge):
first_edge = (first_edge - 0.5)
last_edge = (last_edge + 0.5)
return (first_edge, last_edge) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
d5623029647237a1c70c28295148fe8a911cf82c | 7297f40a65a84c5acd38cfa2822c8366d2f49b45 | /auth_demo/settings.py | 5383ac9ac0f2ca330924f6a1429a9ac16919727b | [
"Apache-2.0"
] | permissive | aleducode/shopify-django-custom | 4041b224a52995718aeb15ea5b5bedf7083cf8b7 | d6199fbb42d944baa3ee36119e5e47c008cad1fd | refs/heads/master | 2020-12-21T22:45:35.629700 | 2020-01-27T20:57:09 | 2020-01-27T20:57:09 | 236,589,887 | 0 | 0 | Apache-2.0 | 2020-01-27T20:57:11 | 2020-01-27T20:40:31 | Python | UTF-8 | Python | false | false | 6,698 | py | # Django settings for auth_demo project.
import os
import environ
import dotenv
from django.conf import settings as global_settings
env = environ.Env()
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load environment variables from a .env file so that they can be read later in settings via os.environ.get().
# See https://github.com/theskumar/python-dotenv.
PROJECT_PATH = os.path.dirname(os.path.dirname(__file__))
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': env('POSTGRES_DB'),
'USER': env('POSTGRES_USER'),
'PASSWORD': env('POSTGRES_PASSWORD'),
'HOST': env('POSTGRES_HOST'),
'PORT': env('POSTGRES_PORT'),
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = '/auth_app/static/'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS=(
os.path.join(BASE_DIR,'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'zbopgazej3!+%#8r226!%d*o-7we-*vap7=^mdh30-1*r95nb('
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'shopify_auth.context_processors.shopify_auth'
],
},
},
]
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
]
ROOT_URLCONF = 'auth_demo.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'auth_demo.wsgi.application'
# Start off with the default context processors.
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'shopify_auth',
'auth_app',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Use the Shopify Auth authentication backend.
AUTHENTICATION_BACKENDS = (
'shopify_auth.backends.ShopUserBackend',
)
# Use the Shopify Auth user model.
AUTH_USER_MODEL = 'auth_app.AuthAppShopUser'
# Set a default login redirect location.
LOGIN_REDIRECT_URL = 'auth_app.views.home'
# Add Shopify Auth configuration.
#
# Note that sensitive credentials SHOPIFY_APP_API_KEY and SHOPIFY_APP_API_SECRET are read from environment variables,
# as is best practice. These environment variables are in turn read from a .env file in the project directory.
# See https://github.com/theskumar/python-dotenv for more.
SHOPIFY_APP_NAME = 'auth_app'
SHOPIFY_APP_API_KEY = 'f1d5f6e0179fd8ab27a96d69267cf3b8'
SHOPIFY_APP_API_SECRET = '1d08b63979cc8fe3de5f35e285e0ffa2'
SHOPIFY_APP_API_SCOPE = ['read_products', 'read_orders']
SHOPIFY_APP_IS_EMBEDDED = True
SHOPIFY_APP_DEV_MODE = False
# Set secure proxy header to allow proper detection of secure URLs behind a proxy.
# See https://docs.djangoproject.com/en/1.7/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
| [
"ducode@outlook.com"
] | ducode@outlook.com |
6f5575a487b01c27d32de08574f1e27559e0a69a | d9eef8dd3489682c8db41f2311e3058d1f369780 | /.history/abel-network-files/mcmc_alg_implementation_own_20180627095012.py | a9c18eebfc9fc6c0d2176b210806e6bc43b539e7 | [] | no_license | McKenzie-Lamb/Gerrymandering | 93fe4a49fe39a0b307ed341e46ba8620ea1225be | b7a7c4129d6b0fcd760ba8952de51eafa701eac3 | refs/heads/master | 2021-01-25T06:06:43.824339 | 2018-10-16T14:27:01 | 2018-10-16T14:27:01 | 93,526,515 | 0 | 0 | null | 2018-07-12T19:07:35 | 2017-06-06T14:17:47 | Python | UTF-8 | Python | false | false | 2,109 | py | # Author: Abel Gonzalez
# Date: 06/26/18
#
# Description:
# This program uses the .shp file to create a network graph where each node
# represents a census tract and the edge represents adjacency between each
# tract, usign graph-tool instead of networkx
import graph_tool.all as gt
from pathlib import Path
# Paths
data_folder = Path("abel-network-files/data/")
images_folder = Path("abel-network-files/images/")
# Loading the previous created Graph and creating the prop maps
graph = gt.load_graph(str(data_folder / "tmp_graph.gt"))
district_no = graph.new_vertex_property("int")
color = graph.new_vertex_property("vector<double>")
ring_color = graph.new_vertex_property("vector<double>")
# Assigning the district to each vertex as a property map
def get_districts_data(graph, color):
districts_data = {}
districts = gt.minimize_blockmodel_dl(graph, 2,2)
blocks = districts.get_blocks()
for i in graph.vertices():
district_no[graph.vertex(i)] = blocks[i]
color[graph.vertex(i)] = (255, 255, 0, 1) if blocks[i] == 1 else (0, 255, 255, 1)
if district_no[graph.vertex(i)] in districts_data.keys():
for j in districts_data[blocks[i]].keys():
districts_data[blocks[i]][j] += graph.vp.data[i][j]
else:
districts_data[blocks[i]] = graph.vp.data[i]
return districts_data
# Assign ring color based on democrats total votes:
def adjust_color(districts_data, ring_color):
for i in districts_data.keys():
if districts_data[i]['CONDEM14'] > districts_data[i]['CONREP14']:
ring_color_ = (0, 0, 255, 1)
else:
ring_color_ = (255, 0, 0, 1)
print(ring_color_)
matched_vertices = gt.find_vertex(graph, district_no, i)
for j in matched_vertices:
ring_color[graph.vertex(j)] = ring_color_
adjust_color(get_districts_data(graph, color), ring_color)
gt.graph_draw(graph, bg_color=(255, 255, 255, 1), vertex_fill_color=ring_color, vertex_color=color, pos=graph.vp.pos,
vertex_text=graph.vertex_index, output='abel-network-files/tmp.png')
| [
"gonzaleza@ripon.edu"
] | gonzaleza@ripon.edu |
62e39e9ef55d6356ad92505d98340e913905cb6d | 3c4df269135bacb78b5e56892789bc5bd315ce5b | /tests/chainer_tests/dataset_tests/tabular_tests/test_tabular_dataset.py | 228b4b630f25971a3e7a07b3e143414828870b18 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | dl4fugaku/chainer | 64affb7786644c96e267f5ae47c60f0af66e34de | 34655eff5986522eae56f47fc82a8cc2b78e1617 | refs/heads/master | 2020-06-18T04:02:08.194893 | 2019-07-10T06:00:59 | 2019-07-10T06:00:59 | 196,157,185 | 2 | 0 | MIT | 2019-07-10T07:43:29 | 2019-07-10T07:43:29 | null | UTF-8 | Python | false | false | 2,442 | py | import unittest
import numpy as np
from chainer import testing
from chainer_tests.dataset_tests.tabular_tests import dummy_dataset
@testing.parameterize(*testing.product({
'mode': [tuple, dict, None],
'return_array': [True, False],
}))
class TestTabularDataset(unittest.TestCase):
def test_fetch(self):
def callback(indices, key_indices):
self.assertIsNone(indices)
self.assertIsNone(key_indices)
dataset = dummy_dataset.DummyDataset(
mode=self.mode, return_array=self.return_array, callback=callback)
output = dataset.fetch()
if self.mode is tuple:
expected = tuple(dataset.data)
elif self.mode is dict:
expected = dict(zip(('a', 'b', 'c'), dataset.data))
elif self.mode is None:
expected = dataset.data[0]
np.testing.assert_equal(output, expected)
if self.mode is dict:
output = output.values()
elif self.mode is None:
output = output,
for out in output:
if self.return_array:
self.assertIsInstance(out, np.ndarray)
else:
self.assertIsInstance(out, list)
def test_get_example(self):
def callback(indices, key_indices):
self.assertEqual(indices, [3])
self.assertIsNone(key_indices)
dataset = dummy_dataset.DummyDataset(
mode=self.mode, return_array=self.return_array, callback=callback)
if self.mode is tuple:
expected = tuple(dataset.data[:, 3])
elif self.mode is dict:
expected = dict(zip(('a', 'b', 'c'), dataset.data[:, 3]))
elif self.mode is None:
expected = dataset.data[0, 3]
self.assertEqual(dataset.get_example(3), expected)
def test_iter(self):
dataset = dummy_dataset.DummyDataset(
mode=self.mode, return_array=self.return_array)
it = iter(dataset)
for i in range(10):
if self.mode is tuple:
expected = tuple(dataset.data[:, i])
elif self.mode is dict:
expected = dict(zip(('a', 'b', 'c'), dataset.data[:, i]))
elif self.mode is None:
expected = dataset.data[0, i]
self.assertEqual(next(it), expected)
with self.assertRaises(StopIteration):
next(it)
testing.run_module(__name__, __file__)
| [
"Hakuyume@users.noreply.github.com"
] | Hakuyume@users.noreply.github.com |
b28d6aa720de968226e1d6e9d08a1ba1ab0dc952 | 2a28a94fc8eb08961e76c61ab73889135153502b | /asposecellscloud/requests/get_worksheet_shape_request.py | ad7111df252c027654d13a68a5479b3fda851a03 | [
"MIT"
] | permissive | aspose-cells-cloud/aspose-cells-cloud-python | 45fc7e686b442302a29a8223e7dbddb71950438c | 270d70ce7f8f3f2ecd9370b1dacfc4789293097e | refs/heads/master | 2023-09-04T01:29:44.242037 | 2023-08-23T13:13:30 | 2023-08-23T13:13:30 | 123,092,364 | 6 | 5 | null | null | null | null | UTF-8 | Python | false | false | 4,324 | py | # coding: utf-8
"""
<copyright company="Aspose" file="GetWorksheetShapeRequest.cs">
Copyright (c) 2023 Aspose.Cells Cloud
</copyright>
<summary>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
</summary>
"""
import json
from six import iteritems
from asposecellscloud import *
from asposecellscloud.models import *
from asposecellscloud.requests import *
from six.moves.urllib.parse import quote
class GetWorksheetShapeRequest(object):
def __init__(self , name ,sheet_name ,shapeindex ,folder =None ,storage_name =None ):
self.name = name
self.sheet_name = sheet_name
self.shapeindex = shapeindex
self.folder = folder
self.storage_name = storage_name
def create_http_request(self, api_client):
# verify the required parameter 'name' is set
if self.name is None:
raise ValueError("Missing the required parameter `name` when calling `get_worksheet_shape`")
# verify the required parameter 'sheet_name' is set
if self.sheet_name is None:
raise ValueError("Missing the required parameter `sheet_name` when calling `get_worksheet_shape`")
# verify the required parameter 'shapeindex' is set
if self.shapeindex is None:
raise ValueError("Missing the required parameter `shapeindex` when calling `get_worksheet_shape`")
collection_formats = {}
path_params = {}
if self.name is not None:
path_params['name'] = self.name
if self.sheet_name is not None:
path_params['sheetName'] = self.sheet_name
if self.shapeindex is not None:
path_params['shapeindex'] = self.shapeindex
query_params = []
if self.folder is not None:
query_params.append(('folder',self.folder ))
if self.storage_name is not None:
query_params.append(('storageName',self.storage_name ))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
resource_path = "/cells/{name}/worksheets/{sheetName}/shapes/{shapeindex}"
# path parameters
if path_params:
path_params = api_client.sanitize_for_serialization(path_params)
path_params = api_client.parameters_to_tuples(path_params, collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
resource_path = resource_path.replace('{%s}' % k, quote(str(v), safe='/'))
return {
"method": "GET",
"path":resource_path,
"query_params": query_params,
"header_params": header_params,
"form_params": form_params,
"files":local_var_files,
"auth_settings":auth_settings,
"body": body_params,
"collection_formats": collection_formats,
"response_type": 'ShapeResponse'
}
| [
"roy.wang@aspose.com"
] | roy.wang@aspose.com |
f012ee7eb3d7e55e579d51802426fbc3b902d52b | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5631989306621952_0/Python/farizazmi/A.py | ed241a8ad3d77775b45e57d65fc2d39acced49b0 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | for tc in range(1, input() + 1):
s = raw_input()
res = s[0]
for c in s[1:]:
res = (c + res) if (c >= res[0]) else res + c
print "Case #" + str(tc) +": " +str(res) | [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
e9454cb8b2ce93c128e3e190ddca723d58101c9d | 9b50b3a7dda2711c5665909f6801249de53e70f6 | /0x08-python-more_classes/7-rectangle.py | 054052e23ccdae50a8c09cd1654bab028818244d | [] | no_license | nikolasribeiro/holbertonschool-higher_level_programming | 3119e5442887f06da104dc8aa93df371f92b9f2b | 7dcdf081d8a57ea1f5f6f9830555f73bf2ae6993 | refs/heads/main | 2023-04-21T05:22:03.617609 | 2021-05-05T11:38:51 | 2021-05-05T11:38:51 | 319,198,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,268 | py | #!/usr/bin/python3
""" defining rectangle based on 6-rectangle """
class Rectangle:
""" defining Rectangle class """
number_of_instances = 0
print_symbol = "#"
def __init__(self, width=0, height=0):
""" intializing self """
if not isinstance(width, int):
raise TypeError("width must be an integer")
if width < 0:
raise ValueError("width must be >= 0")
if not isinstance(height, int):
raise TypeError("height must be an integer")
if height < 0:
raise ValueError("height must be >= 0")
self.__width = width
self.__height = height
self.__class__.number_of_instances += 1
@property
def width(self):
""" getting wdith """
return self.__width
@width.setter
def width(self, value):
""" setting width """
if not isinstance(value, int):
raise TypeError("width must be an integer")
if value < 0:
raise ValueError("width must be >= 0")
self.__width = value
@property
def height(self):
""" getting height """
return self.__height
@height.setter
def height(self, value):
""" setting height """
if not isinstance(value, int):
raise TypeError("height must be an integer")
if value < 0:
raise ValueError("height must be >= 0")
self.__height = value
def area(self):
""" defining area """
return self.__height * self.__width
def perimeter(self):
""" defining perimeter """
if self.__width is 0 or self.__height is 0:
return 0
return (self.__height * 2) + (self.__width * 2)
def __str__(self):
""" defining str """
str1 = ""
if self.__width == 0 or self.__height == 0:
return str1
str1 += (str(self.print_symbol) * self.__width + "\n") * self.__height
return str1[:-1]
def __repr__(self):
""" defining repr """
str1 = "Rectangle({:d}, {:d})".format(self.__width, self.__height)
return str1
def __del__(self):
""" defining delete """
print("Bye rectangle...")
self.__class__.number_of_instances -= 1
| [
"nikolasribeiro2@outlook.com"
] | nikolasribeiro2@outlook.com |
794af4c80e6438d9bca37d38bfc0a8c39c1a78ed | d7016f69993570a1c55974582cda899ff70907ec | /sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/v2019_11_01/_subscription_client.py | d0a6b9a75effd9721990b295820f4ddd79f83d86 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 4,526 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from . import models as _models
from .._serialization import Deserializer, Serializer
from ._configuration import SubscriptionClientConfiguration
from .operations import Operations, SubscriptionClientOperationsMixin, SubscriptionsOperations, TenantsOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class SubscriptionClient(SubscriptionClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword
"""All resource groups and resources exist within subscriptions. These operation enable you get
information about your subscriptions and tenants. A tenant is a dedicated instance of Azure
Active Directory (Azure AD) for your organization.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.resource.subscriptions.v2019_11_01.operations.Operations
:ivar subscriptions: SubscriptionsOperations operations
:vartype subscriptions:
azure.mgmt.resource.subscriptions.v2019_11_01.operations.SubscriptionsOperations
:ivar tenants: TenantsOperations operations
:vartype tenants: azure.mgmt.resource.subscriptions.v2019_11_01.operations.TenantsOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2019-11-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self, credential: "TokenCredential", base_url: str = "https://management.azure.com", **kwargs: Any
) -> None:
self._config = SubscriptionClientConfiguration(credential=credential, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.subscriptions = SubscriptionsOperations(self._client, self._config, self._serialize, self._deserialize)
self.tenants = TenantsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self) -> None:
self._client.close()
def __enter__(self) -> "SubscriptionClient":
self._client.__enter__()
return self
def __exit__(self, *exc_details) -> None:
self._client.__exit__(*exc_details)
| [
"noreply@github.com"
] | kurtzeborn.noreply@github.com |
166cdbbc1b7be4aedad563e778df023b40d613c5 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_misappropriated.py | c37b06a6f096fe5ec79442af9b84df72df3d68c5 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py |
from xai.brain.wordbase.verbs._misappropriate import _MISAPPROPRIATE
#calss header
class _MISAPPROPRIATED(_MISAPPROPRIATE, ):
def __init__(self,):
_MISAPPROPRIATE.__init__(self)
self.name = "MISAPPROPRIATED"
self.specie = 'verbs'
self.basic = "misappropriate"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
66a6c2e6b8e1264e6f44f286d041502183cecaf5 | dc8a337ea1d8a285577d33e5cfd4dbbe846ee1a0 | /src/main/scala/Sudoku.py | 4c45af801b7459a5a0bee3ceef9b890e6c2cbb3e | [] | no_license | joestalker1/leetcode | 8a5cdda17abd33c3eef859732f75d7bec77a9d0e | ae392ddbc7eb56cb814b9e9715043c98a89a6314 | refs/heads/master | 2023-04-13T22:09:54.407864 | 2023-04-09T19:22:54 | 2023-04-09T19:22:54 | 131,803,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,895 | py | # A Backtracking program in Python to solve Sudoku problem
# A Utility Function to print the Grid
def print_grid(arr):
for i in range(9):
for j in range(9):
print(arr[i][j]),
print('\n')
# Function to Find the entry in the Grid that is still not used
# Searches the grid to find an entry that is still unassigned. If
# found, the reference parameters row, col will be set the location
# that is unassigned, and true is returned. If no unassigned entries
# remains, false is returned.
# 'l' is a list variable that has been passed from the solve_sudoku function
# to keep track of incrementation of Rows and Columns
def find_empty_location(arr, l):
for row in range(9):
for col in range(9):
if (arr[row][col] == 0):
l[0] = row
l[1] = col
return True
return False
# Returns a boolean which indicates whether any assigned entry
# in the specified row matches the given number.
def used_in_row(arr, row, num):
for i in range(9):
if (arr[row][i] == num):
return True
return False
# Returns a boolean which indicates whether any assigned entry
# in the specified column matches the given number.
def used_in_col(arr, col, num):
for i in range(9):
if (arr[i][col] == num):
return True
return False
# Returns a boolean which indicates whether any assigned entry
# within the specified 3x3 box matches the given number
def used_in_box(arr, row, col, num):
for i in range(3):
for j in range(3):
if (arr[i + row][j + col] == num):
return True
return False
# Checks whether it will be legal to assign num to the given row, col
# Returns a boolean which indicates whether it will be legal to assign
# num to the given row, col location.
def check_location_is_safe(arr, row, col, num):
# Check if 'num' is not already placed in current row,
# current column and current 3x3 box
return not used_in_row(arr, row, num) and not used_in_col(arr, col, num) and not used_in_box(arr, row - row % 3,
col - col % 3, num)
# Takes a partially filled-in grid and attempts to assign values to
# all unassigned locations in such a way to meet the requirements
# for Sudoku solution (non-duplication across rows, columns, and boxes)
def solve_sudoku(arr):
# 'l' is a list variable that keeps the record of row and col in find_empty_location Function
l = [0, 0]
# If there is no unassigned location, we are done
if (not find_empty_location(arr, l)):
return True
# Assigning list values to row and col that we got from the above Function
row = l[0]
col = l[1]
# consider digits 1 to 9
for num in range(1, 10):
# if looks promising
if (check_location_is_safe(arr, row, col, num)):
# make tentative assignment
arr[row][col] = num
# return, if success, ya ! if(solve_sudoku(arr)):
return True
# failure, unmake & try again
arr[row][col] = 0
# this triggers backtracking
return False
# Driver main function to test above functions
# creating a 2D array for the grid
grid = [[0 for x in range(9)] for y in range(9)]
# assigning values to the grid
grid = [[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0]]
# if success print the grid
if (solve_sudoku(grid)):
print_grid(grid)
else:
print
"No solution exists"
# The above code has been contributed by Harshit Sidhwa.
| [
"stalker.comp@gmail.com"
] | stalker.comp@gmail.com |
b459c08ff024d80fab1c4cc22f00a975da13d9a8 | cfc6fa337c7e14bae58b9e0398df0b3867670e96 | /FinalReact/ReactBlog/R_Blog/views.py | 87f24c52c7cecabe100ae5aca14337861026e716 | [] | no_license | nahidhasan007/Django-React | f9f5d5e9085bcbe51b0d733bd9f8443452648630 | 447a48c978b492dca25ba63ee9de2d0d3a38a4c4 | refs/heads/master | 2023-04-08T05:44:45.743812 | 2021-04-16T10:09:06 | 2021-04-16T10:09:06 | 352,295,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | from django.shortcuts import render
from rest_framework import viewsets
from .models import Post, Profile
from .serializers import PostSerializer
class PostView(viewsets.ModelViewSet):
queryset = Post.objects.all().order_by("-id")
serializer_class = PostSerializer
| [
"teachpeach007@gmail.com"
] | teachpeach007@gmail.com |
a8b576214b7eab96cf1de89a58b6b5785690b0bb | 167c6226bc77c5daaedab007dfdad4377f588ef4 | /python/ql/test/query-tests/Testing/test.py | 1c79f177ac606504ddb02ae7bad95bd14ff3fb4a | [
"MIT",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Python-2.0"
] | permissive | github/codeql | 1eebb449a34f774db9e881b52cb8f7a1b1a53612 | d109637e2d7ab3b819812eb960c05cb31d9d2168 | refs/heads/main | 2023-08-20T11:32:39.162059 | 2023-08-18T14:33:32 | 2023-08-18T14:33:32 | 143,040,428 | 5,987 | 1,363 | MIT | 2023-09-14T19:36:50 | 2018-07-31T16:35:51 | CodeQL | UTF-8 | Python | false | false | 213 | py | from unittest import TestCase
class MyTest(TestCase):
def test1(self):
self.assertTrue(1 == 1)
self.assertFalse(1 > 2)
self.assertTrue(1 in [1])
self.assertFalse(0 is "")
| [
"mark@hotpy.org"
] | mark@hotpy.org |
82c59e960c39ab218f51fa905bfc31f414b29f22 | c98a1f74ea576d670d094d5e5259bfe2e4449b88 | /PKUTreeMaker/test/CrabJobsSrc/MC/crab3_analysisWGToLNuG.py | afc4cbad75468874b348a42e7c1d3813ea0b8177 | [] | no_license | AndrewLevin/VBSWG_Ntuple | d71544d2b233eb0e88d185d947bbf32a32fbca18 | 37eac7ad4d03eb0d4a463f9f121c4e998f0c5a34 | refs/heads/main | 2023-02-21T11:42:28.642572 | 2020-12-01T15:31:37 | 2020-12-01T15:31:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,364 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = 'fullrun2_2017_version5_WGJets_v1_2'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.maxMemoryMB = 3000
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles = ['Fall17_17Nov2017_V32_MC_L1FastJet_AK4PFchs.txt','Fall17_17Nov2017_V32_MC_L1FastJet_AK4PFPuppi.txt','Fall17_17Nov2017_V32_MC_L2L3Residual_AK4PFchs.txt','Fall17_17Nov2017_V32_MC_L2L3Residual_AK4PFPuppi.txt','Fall17_17Nov2017_V32_MC_L2Relative_AK4PFchs.txt','Fall17_17Nov2017_V32_MC_L2Relative_AK4PFPuppi.txt','Fall17_17Nov2017_V32_MC_L3Absolute_AK4PFchs.txt','Fall17_17Nov2017_V32_MC_L3Absolute_AK4PFPuppi.txt']
config.JobType.psetName = 'analysis_mc.py'
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
##config.Data.outputPrimaryDataset = 'VBS_WGAMMA_94X'
config.Data.inputDataset = '/WGToLNuG_01J_5f_TuneCP5_13TeV-amcatnloFXFX-pythia8/RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v3/MINIAODSIM'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob = 2
config.Data.totalUnits = -1
config.Data.publication = False
config.Data.outputDatasetTag = 'fullrun2_2017_version5_WGJets_v1_2'
config.section_("Site")
config.Site.storageSite = 'T2_CN_Beijing'
| [
"15827238926@163.com"
] | 15827238926@163.com |
fa2eb58fd5bab605020259e31f53b962d74cc371 | adf65dbe1a15560f3b4930fa393c2327f29ab3c2 | /myapp/web/tests.py | d96c0860d6d36ca40c0104798f20c4fc07269a90 | [] | no_license | sanix-sandel/my_app | ebe59d186824f6539a2b392dd7cf992bccb221c0 | 652e83f2128039ed979cc90f9254fb54d21ebcea | refs/heads/master | 2023-03-30T09:21:46.513036 | 2021-03-26T06:06:09 | 2021-03-26T06:06:09 | 351,671,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | from django.test import SimpleTestCase
from django.urls import reverse
class HomePageTests(SimpleTestCase):
def test_homepage_status_code(self):
response=self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_homepage_url_name(self):
response=self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200) | [
"sanicksikani@gmail.com"
] | sanicksikani@gmail.com |
87bcb438a3c173c9b0e8bb2115a0d3b8841aef86 | 20d88c37924ec96d5b5d02eb13edc93c21a25b9a | /Uniq/urls.py | efbcc5ab61794b6fd3d5dd26134c158836230ffd | [] | no_license | AthifSaheer/UniqWalls-Wallpaper | d51c9e99153473427ead8a7b16631f860502a09c | c392732d239fb3174bd6e7c7c9b758cf12545f20 | refs/heads/main | 2023-06-03T22:33:02.606873 | 2021-06-05T01:28:48 | 2021-06-05T01:28:48 | 337,617,902 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,089 | py | """Uniq URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('UniqApp.urls')),
path('accounts/', include('django.contrib.auth.urls')),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"liteboook@gmail.com"
] | liteboook@gmail.com |
ae903173e3955111b0b3161395c12e408a48aa9e | 940d7b93fb27e8eead9b6e52bc5c7444666744dd | /python/src/Lib/test/test_unary.py | 9854f64d0ce1c69165d5d924590565058f570e20 | [
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Python-2.0",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-python-cwi",
"Apache-2.0"
] | permissive | pilotx45/sl4a | d446531d310cc17d93f24aab7271a0813e8f628d | 150e3e46b5103a9b9a391034ef3fbc5bd5160d0f | refs/heads/master | 2022-03-24T19:48:30.340479 | 2022-03-08T16:23:58 | 2022-03-08T16:23:58 | 277,016,574 | 1 | 0 | Apache-2.0 | 2022-03-08T16:23:59 | 2020-07-04T01:25:36 | null | UTF-8 | Python | false | false | 1,802 | py | """Test compiler changes for unary ops (+, -, ~) introduced in Python 2.2"""
import unittest
from test.test_support import run_unittest, have_unicode
class UnaryOpTestCase(unittest.TestCase):
def test_negative(self):
self.assert_(-2 == 0 - 2)
self.assert_(-0 == 0)
self.assert_(--2 == 2)
self.assert_(-2L == 0 - 2L)
self.assert_(-2.0 == 0 - 2.0)
self.assert_(-2j == 0 - 2j)
def test_positive(self):
self.assert_(+2 == 2)
self.assert_(+0 == 0)
self.assert_(++2 == 2)
self.assert_(+2L == 2L)
self.assert_(+2.0 == 2.0)
self.assert_(+2j == 2j)
def test_invert(self):
self.assert_(-2 == 0 - 2)
self.assert_(-0 == 0)
self.assert_(--2 == 2)
self.assert_(-2L == 0 - 2L)
def test_no_overflow(self):
nines = "9" * 32
self.assert_(eval("+" + nines) == eval("+" + nines + "L"))
self.assert_(eval("-" + nines) == eval("-" + nines + "L"))
self.assert_(eval("~" + nines) == eval("~" + nines + "L"))
def test_negation_of_exponentiation(self):
# Make sure '**' does the right thing; these form a
# regression test for SourceForge bug #456756.
self.assertEqual(-2 ** 3, -8)
self.assertEqual((-2) ** 3, -8)
self.assertEqual(-2 ** 4, -16)
self.assertEqual((-2) ** 4, 16)
def test_bad_types(self):
for op in '+', '-', '~':
self.assertRaises(TypeError, eval, op + "'a'")
if have_unicode:
self.assertRaises(TypeError, eval, op + "u'a'")
self.assertRaises(TypeError, eval, "~2j")
self.assertRaises(TypeError, eval, "~2.0")
def test_main():
run_unittest(UnaryOpTestCase)
if __name__ == "__main__":
test_main()
| [
"damonkohler@gmail.com"
] | damonkohler@gmail.com |
805e86d4a9542ddd6fa295eb01cfeba9b28cf056 | 7e29e8e9979d05cd4521512a0e12ffd516b1cdd3 | /parts/migrations/0001_initial.py | e0d166212c7b992eb75d30fbb195f10e6f8cd762 | [] | no_license | ShipraShalini/UrParts | d838b9b21485c169136ca7ac6d7b892ac8245e33 | 1327681450b9a57b058f4a9a95d833c06f171095 | refs/heads/main | 2023-04-11T12:16:10.867606 | 2021-04-16T14:50:42 | 2021-04-16T14:50:42 | 358,036,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,271 | py | # Generated by Django 3.2 on 2021-04-15 10:26
import uuid
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Part",
fields=[
(
"uuid",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("manufacturer", models.CharField(max_length=100)),
("category", models.CharField(max_length=100)),
("model", models.CharField(max_length=100)),
("part", models.CharField(max_length=100)),
("part_category", models.CharField(max_length=100)),
("created_at", models.DateTimeField(auto_now_add=True)),
],
),
migrations.AddConstraint(
model_name="part",
constraint=models.UniqueConstraint(
fields=("manufacturer", "category", "model", "part", "part_category"),
name="unique_part_entry",
),
),
]
| [
"code.shipra@gmail.com"
] | code.shipra@gmail.com |
951dd813a10fa627d6f3fcc50d81d97bc753ee18 | 7bc54bae28eec4b735c05ac7bc40b1a8711bb381 | /src/trainer_v2/per_project/transparency/splade_regression/data_loaders/pairwise_eval.py | 580e05023b05169f286b6833aa70f8dfbda1ea89 | [] | no_license | clover3/Chair | 755efd4abbd5f3f2fb59e9b1bc6e7bc070b8d05e | a2102ebf826a58efbc479181f1ebb5de21d1e49f | refs/heads/master | 2023-07-20T17:29:42.414170 | 2023-07-18T21:12:46 | 2023-07-18T21:12:46 | 157,024,916 | 0 | 0 | null | 2023-02-16T05:20:37 | 2018-11-10T21:55:29 | Python | UTF-8 | Python | false | false | 3,775 | py | from collections import defaultdict
import tensorflow as tf
from typing import List, Iterable, Callable, Dict, Tuple, Set
from tensorflow.python.distribute.distribute_lib import Strategy
from transformers import AutoTokenizer
from trainer_v2.chair_logging import c_log
from trainer_v2.custom_loop.train_loop_helper import fetch_metric_result
from trainer_v2.per_project.transparency.splade_regression.data_loaders.iterate_data import iterate_triplet
from trainer_v2.per_project.transparency.splade_regression.path_helper import partitioned_triplet_path_format_str
pairwise_roles = ["q", "d1", "d2"]
def load_pairwise_mmp_data(target_partition: List[int]) -> List[Tuple[str, str, str]]:
c_log.info("load_pairwise_eval_data")
partitioned_format_str = partitioned_triplet_path_format_str()
triplet_list = []
for i in target_partition:
text_path = partitioned_format_str.format(i)
text_itr = iterate_triplet(text_path)
for triplet in text_itr:
triplet_list.append(triplet)
return triplet_list
def dict_to_tuple(encoded):
input_ids = encoded['input_ids']
attention_mask = encoded['attention_mask']
return input_ids, attention_mask
class PairwiseAccuracy(tf.keras.metrics.Mean):
def __init__(self, name='pairwise_accuracy', **kwargs):
super().__init__(name=name, **kwargs)
def update_state(self, s1, s2):
is_correct = tf.cast(tf.less(s2, s1), tf.float32)
is_correct_f = tf.reduce_mean(is_correct)
super(PairwiseAccuracy, self).update_state(is_correct_f)
# each instance is (query, d_pos, d_neg), where each of documents are (input_ids, attention_masks)
def build_pairwise_eval_dataset(
triplet_list, checkpoint_model_name, batch_size, max_seq_length) -> tf.data.Dataset:
c_log.info("build_pairwise_eval_dataset")
tokenizer = AutoTokenizer.from_pretrained(checkpoint_model_name)
def encode(text):
d = tokenizer(text, padding="max_length", max_length=max_seq_length)
return dict_to_tuple(d)
items = []
for q, d1, d2 in triplet_list:
e = encode(q), encode(d1), encode(d2)
items.append(e)
def get_generator() -> Iterable[Tuple]:
yield from items
int_list = tf.TensorSpec([None], dtype=tf.int32)
int_pair_list = (int_list, int_list)
output_signature = int_pair_list, int_pair_list, int_pair_list
dataset = tf.data.Dataset.from_generator(get_generator, output_signature=output_signature)
dataset = dataset.batch(batch_size)
return dataset
class PairwiseEval:
def __init__(self,
triplet_encoded: tf.data.Dataset,
strategy: Strategy,
model: tf.keras.models.Model
):
self.triplet_encoded = triplet_encoded
self.strategy = strategy
self.model = model
self.metrics = {
'pairwise_accuracy': PairwiseAccuracy()
}
@tf.function
def eval_fn(self, item):
q, d1, d2 = item
q_enc = self.model(q, training=False)
d1_enc = self.model(d1, training=False)
d2_enc = self.model(d2, training=False)
def score(q_enc, d_enc):
return tf.reduce_sum(tf.multiply(q_enc, d_enc), axis=1)
s1 = score(q_enc, d1_enc)
s2 = score(q_enc, d2_enc)
print(s1, s2)
for m in self.metrics.values():
m.update_state(s1, s2)
def do_eval(self):
c_log.info("PairwiseEval::do_eval")
iterator = iter(self.triplet_encoded)
for item in iterator:
args = item,
per_replica = self.strategy.run(self.eval_fn, args=args)
metrics = self.metrics
metric_res = fetch_metric_result(metrics)
return 0.0, metric_res
| [
"lesterny@gmail.com"
] | lesterny@gmail.com |
0a79c1a51d52335e7b62064a7d0b834bda785a9f | d07cc99f0729658b785961a7a93b55e0ccc045d8 | /APscheduler/OtherClass.py | 600c5666d00626bf2d7a8bc3726ad143347ef753 | [] | no_license | dajun928/MyPyCharm | 8b6f3c7ea934dcdbb88e126e810467612b7505ad | 5cde8db68b0f2396cd6e114d19e35cd025d52d98 | refs/heads/master | 2020-03-27T04:48:55.985426 | 2019-03-06T13:48:54 | 2019-03-06T13:48:54 | 145,970,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | #!/usr/bin/python
#coding=utf-8
class OtherClass:
def my_job02(self):
print 'task01'
if __name__ == '__main__':
# o=OtherClass()
# o.my_job02()
pass
| [
"1663177102@qq.com"
] | 1663177102@qq.com |
5b03a8a6be6ea64cd9268addff2dd028e033cce1 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /wt5/wt5/tasks_test.py | 680d1c69a9e75204e4b9ce58eaf356a7ffd22173 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 2,356 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for WT5 tasks."""
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import t5
import tensorflow.compat.v1 as tf
import wt5.wt5.mixtures # pylint:disable=unused-import
import wt5.wt5.tasks # pylint:disable=unused-import
tf.disable_v2_behavior()
tf.enable_eager_execution()
MixtureRegistry = t5.data.MixtureRegistry
TaskRegistry = t5.data.TaskRegistry
_SEQUENCE_LENGTH = {'inputs': 2048, 'targets': 512}
_TASKS = [
'esnli_v010',
'esnli_v010_0_expln',
'esnli_explanations_take100_v010',
'esnli_labels_skip100_v010',
'mnli_v002',
'cos_e_v001',
'cos_e_v001_0_expln_like_esnli',
'cos_e_explanations_take100_v001',
'cos_e_labels_skip100_v001',
'movie_rationales_v010',
'movie_rationales_v010_no_expl',
'imdb_reviews_v100',
'amazon_reviews_books_v1_00_v010',
]
_MIXTURES = [
'cos_e_100_explanations',
'esnli_100_explanations',
'esnli_mnli_all_explanations',
'imdb_reviews_movie_rationales',
'esnli_cos_e_transfer',
'movie_rationales_100_explanations',
'amazon_books_movies_equal',
]
class TasksTest(parameterized.TestCase):
@parameterized.parameters(((name,) for name in _TASKS))
def test_task(self, name):
task = TaskRegistry.get(name)
logging.info('task=%s', name)
ds = task.get_dataset(_SEQUENCE_LENGTH, 'train')
for d in ds:
logging.info(d)
break
@parameterized.parameters(((name,) for name in _MIXTURES))
def test_mixture(self, name):
mixture = MixtureRegistry.get(name)
logging.info('mixture=%s', name)
ds = mixture.get_dataset(_SEQUENCE_LENGTH, 'train')
for d in ds:
logging.info(d)
break
if __name__ == '__main__':
absltest.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
9aec651b26e2ac105c1b0d295e746bb1ed5c2c5b | 2ce27b05f45cef6ce3ae5c02b8e83e548def2fc6 | /INTERMEDIATE/Tuple/Slicing Tuple.py | 9e8bd24716bf46495184cff8f503efa307ce49d6 | [] | no_license | Ajay2521/Python | 775b7d99736e83e4d0c37302b91d1413dd2c0d3b | a426dd7717de8a5e60e584d208ae7120bb84c1b3 | refs/heads/master | 2022-12-01T17:49:12.672061 | 2020-08-15T14:55:12 | 2020-08-15T14:55:12 | 273,632,074 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,574 | py | # In this lets see more about "tuple Datatype" in Python.
# A tuple in Python is used to store the sequence of various types of data.
# The items in the tuple are separated with the comma ( , ) and enclosed with the square brackets [ ].
# Characteristics of tuple are as follow :
# 1) tuple are "Ordered".
# 2) Elements of the tuple can be accessed by using "Index" as same as String.
# 3) tuple are "Mutable".
# 4) tuple can able to store various data elements.
# Here is the Program to understand "Slicing the tuple"
# The elements of the tuple can be accessed by using the slice operator [].
# The index starts from 0 and goes to length - 1 of the length of tuple.
# Syntax for getting sub - tuple by Slice and range is
# tuple_variable ( Start : Stop : Step Size )
# Start - Is the Starting Index position of the tuple.
# Stop - Is the Last Index position of the tuple.
# Step Size - Is the used to skip the nth element within the start and stop.
tuple = ( 1 , 2, 3, 4, 5 , 6 )
# Slicing the elements.
print ( "\nSlicing element in the index place 3 : " , tuple [ 3 ] )
# Slicing the elements using Range.
print ( "\nAll the value of the \"tuple\" is : " , tuple [ : ] )
print ( "\nAll the elements after the index value 2 is : " , tuple [ 2 : ] )
print ( "\nAll the elements in the range from index value 1 to index value 4 is : " , tuple [ 1 : 4 ] )
print ( "\nAll the elements in the range from index value 0 to index value 5 with the Step size oftwo element is : " , tuple [ 0 : 5 : 2 ] )
| [
"noreply@github.com"
] | Ajay2521.noreply@github.com |
5cc2c2bc4435d633df0d71b4786e520e48900f98 | 557314cb5444754cedc04d0e4c25654537268c9b | /pytorch/rnn/dataset.py | 671b0016776803b1d505899517c1dcf1c8142d44 | [] | no_license | wj-Mcat/python-code-snippet | 9a6d1a7ad4a95a87f64051aa2ce3ef5c3910ddc4 | c5ca58a7779676e20f15a631484c4f20e5c94688 | refs/heads/master | 2022-06-28T21:42:21.942328 | 2020-05-10T13:08:50 | 2020-05-10T13:08:50 | 262,786,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | from torch.utils import data
class ATIS(data.Dataset):
"""
一个非常简单的Dataset示例代码
"""
def __init__(self, X, slots, intents):
self.X = X
self.slots = slots
self.intents = intents
self.size = X.shape[0]
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.X[idx], self.slots[idx], self.intents[idx]
| [
"1435130236@qq.com"
] | 1435130236@qq.com |
666bf4a3c1b1ac5a795c6d9f8c4380cf23a17a5b | c6f22a6155b0627bf792a321fccba2f5d3f1bf19 | /backend/home/migrations/0004_ghfdgfdgfd.py | a5ef0e3edb0bb387850c4161813dc5aa09fc29ff | [] | no_license | crowdbotics-apps/mobile-4-dec-dev-16281 | 938d8902c0f61057a968e09cb114a7a0a72966aa | e842ed5457ab19bc66c1de400159e919b33b29ba | refs/heads/master | 2023-01-22T09:31:48.333341 | 2020-12-04T14:21:17 | 2020-12-04T14:21:17 | 318,406,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py | # Generated by Django 2.2.17 on 2020-12-04 06:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("home", "0003_gghhh"),
]
operations = [
migrations.CreateModel(
name="Ghfdgfdgfd",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("hgfhfh", models.BigIntegerField()),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
516f9676a2cae761aaa56ca9c7a04d95db500ed2 | 4f0385a90230c0fe808e8672bb5b8abcceb43783 | /LNH/day4-teacher/2 装饰器/4 装饰器修订.py | caadd80dff07be7347d92b71cc44056d91b65a9e | [] | no_license | lincappu/pycharmlearningproject | 4084dab7adde01db9fa82a12769a67e8b26b3382 | b501523e417b61373688ba12f11b384166baf489 | refs/heads/master | 2023-07-10T05:21:15.163393 | 2023-06-29T14:02:35 | 2023-06-29T14:02:35 | 113,925,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,385 | py | # 有参装饰器的修订
# 1.参数
# 2.返回值
# 3.函数基本信息。
# import time
# from functools import wraps
#
# def timmer(func):
# @wraps(func)
# def inner(*args,**kwargs):
# start_time=time.time()
# res=func(*args,**kwargs)
# end_time=time.time()
# print('run time is :[%s]' % (end_time - start_time))
# return res
# inner.__doc__=
# return inner
#
# @timmer # index=timmer(index)
# def index(name):
# time.sleep(1)
# print('functiion index')
# print(name)
# return 123
#
#
# res=index('fls')
# print(res)
#
#
# import time
# from functools import wraps
#
# def timmer(func):
# @wraps(func)
# def inner(*args,**kwargs):
# start_time=time.time()
# res=func(*args,**kwargs)
# stop_time=time.time()
# print('run time is :[%s]' %(stop_time-start_time))
# return res
#
# return inner
#
# @timmer
# def index():
# '''
# index function
# :return:
# '''
# time.sleep(3)
# print('welcome to index page')
# return 123
#
# @timmer #home=timmer(home) #home=inner
# def home(name):
# time.sleep(2)
# print('welcome %s to home page' %name)
# return 456
#
# # res=index() # res=inner()
# # print(res)
# #
# # res=home('egon') #inner('egon')
# # print(res)
#
# # print(index.__doc__)
# print(help(index))
| [
"lincappu@163.com"
] | lincappu@163.com |
5a4560c909ecfcd1bb9a56752e943418a440e91d | 75a2ad10d18aea735eaf3e859eb3988d94e9c36a | /CodeUp/예제/1295_알파벳대소문자변환.py | 02812ad0fd6a295035d54f34dc0d50e34174084c | [] | no_license | sbtiffanykim/problem-solving | d2679b0405f9c2397d7af780721066bfbd812e32 | 10a6ec90d29c9126f56c9be2ee696ce30ca30bd5 | refs/heads/master | 2023-04-25T18:26:56.511663 | 2021-06-10T12:30:13 | 2021-06-10T12:30:13 | 360,552,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | """
#1295: 알파벳 대소문자 변환
"""
s = input()
for c in s:
if c >= "a" and c <= "z":
print(chr(ord(c) - ord("a") + ord("A")), end="")
elif c >= "A" and c <= "Z":
print(chr(ord(c) - ord("A") + ord("a")), end="")
else:
print(c, end="")
| [
"sbtiffanykim@gmail.com"
] | sbtiffanykim@gmail.com |
40f3ae34a777b65d2d3d26bfa8d4ea1629e3ab06 | a8b21ea5633df0f5f75b3ee906ab10ce2414f6b7 | /bandwitch/list_common_enzymes/data/__init__.py | 0fcf080bb0f594bb27fd1c47d09c1c5d87a44bac | [
"MIT"
] | permissive | Edinburgh-Genome-Foundry/BandWitch | 34229d336899325286686faf19ba97f6db6eba2a | b6597077fc8ba03e7a7ef271bbd24f81f90632f2 | refs/heads/master | 2023-07-31T22:34:18.651189 | 2022-06-14T12:32:10 | 2022-06-14T12:32:10 | 106,592,382 | 15 | 0 | MIT | 2020-09-20T18:50:07 | 2017-10-11T18:19:45 | Python | UTF-8 | Python | false | false | 608 | py | """Loads REBASE enzymes infos (mehtylation sensitivity and providers)"""
__all__ = ['enzymes_infos']
import os.path as osp
csv_path = osp.join(osp.dirname(osp.realpath(__file__)), "enzymes_infos.csv")
with open(csv_path, "r") as f:
_lines = f.read().split("\n")
_fields = _lines[0].split(";")
_replacements = dict([("N/A", False), ("+", True), ("-", True)] +
[(str(i), i) for i in range(50)])
enzymes_infos = {
_line.split(";")[0]: dict(zip(_fields, [
_replacements.get(e, e) for e in _line.split(";")]))
for _line in _lines[1:]
}
| [
"valentin.zulkower@gmail.com"
] | valentin.zulkower@gmail.com |
8a74c28a08a46463894b8bae23427862d8af28e7 | 76b1e713a3057e6f08abc116814af00891dbc2ef | /store/views/orders.py | 29e2e13ea138449e13d79a05a236a2f88ac6878f | [] | no_license | Jay28497/Django-Ecommerce-Website | ed17f6536fe4be4d6db658c46999bb05ec22d3f8 | 2697d376c8ff2720720183c0e475b188ff7b0e33 | refs/heads/master | 2023-03-31T15:20:56.008251 | 2021-04-10T12:21:08 | 2021-04-10T12:21:08 | 355,427,413 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | from django.shortcuts import render
from django.views import View
from store.models.orders import Order
class OrderView(View):
def get(self, request):
customer = request.session.get('customer_id')
orders = Order.get_orders_by_customer(customer)
# print(customer, orders)
return render(request, 'store/orders.html', {'orders': orders})
| [
"jaykanjariya28@gmail.com"
] | jaykanjariya28@gmail.com |
7309d255ad74136c1423ec453f4a007ab3c5d182 | 8191c12eb7ebd4296b4e5d35e7de9b53bc767a5a | /docs/examples/configure_sm_pairing_properties.py | f0f907051e4833d81603f28245d9c3c837595939 | [
"MIT"
] | permissive | jreynders/BLESuite-1 | 758404823c71fb15ff8326a5611aed742065bda4 | 8335d47d76919b79f00cea72a1e58524f3440826 | refs/heads/master | 2023-02-20T22:21:35.891269 | 2022-11-08T22:09:06 | 2022-11-08T22:09:06 | 168,422,668 | 0 | 0 | MIT | 2023-02-08T20:01:18 | 2019-01-30T22:04:54 | Python | UTF-8 | Python | false | false | 1,763 | py | from blesuite.connection_manager import BLEConnectionManager
adapter = 0
role = 'central'
io_cap = 0x03
oob = 0x00
mitm = 0x01
bond = 0x01
lesc = 0x01
keypress = 0x00
ct2 = 0x01
rfu = 0x00
max_key_size = 16
initiator_key_distribution = 0x01
responder_key_distribution = 0x01
peer_device_address = "AA:BB:CC:DD:EE:FF"
peer_address_type = "public"
with BLEConnectionManager(adapter, role) as connection_manager:
# Get default Security Manager pairing properties to see baseline
print connection_manager.get_security_manager_protocol_default_pairing_parameters()
# Sets the default Security Manager pairing properties for all established connections
connection_manager.set_security_manager_protocol_default_pairing_parameters(io_cap, oob, mitm, bond, lesc,
keypress, ct2, rfu, max_key_size,
initiator_key_distribution,
responder_key_distribution)
print connection_manager.get_security_manager_protocol_default_pairing_parameters()
# initialize BLEConnection object
connection = connection_manager.init_connection(peer_device_address, peer_address_type)
# create connection
connection_manager.connect(connection)
# modify pairing parameters for just this connection
connection_manager.set_security_manager_protocol_pairing_parameters_for_connection(connection, io_cap=0x02)
# show the changes for the security manager made for the connection made in the last step
print connection_manager.get_security_manager_protocol_pairing_parameters_for_connection(connection)
| [
"taylor.trabun@nccgroup.trust"
] | taylor.trabun@nccgroup.trust |
f714ff4ca4736b9aef983bab85e1ddfc87a679aa | 19a32440205b2caeec67c73c10d917b5fb30a86a | /test/test_job_statistics.py | 6d40c6e536931197906776c433f0367c53ee3a96 | [
"MIT",
"Apache-2.0"
] | permissive | marrotte/isilon_sdk_python | 480e84312f5924a506aeb09c9c7cae79a2b9b7f4 | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | refs/heads/master | 2020-03-23T07:31:40.376316 | 2016-06-07T23:44:31 | 2016-06-07T23:44:31 | 141,277,076 | 1 | 0 | MIT | 2018-07-17T11:02:08 | 2018-07-17T11:02:08 | null | UTF-8 | Python | false | false | 1,239 | py | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.job_statistics import JobStatistics
class TestJobStatistics(unittest.TestCase):
""" JobStatistics unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testJobStatistics(self):
"""
Test JobStatistics
"""
model = swagger_client.models.job_statistics.JobStatistics()
if __name__ == '__main__':
unittest.main() | [
"Alex.Pecoraro@isilon.com"
] | Alex.Pecoraro@isilon.com |
771c7b41555428226550940c68115a3ee5b20af5 | 9ecd7568b6e4f0f55af7fc865451ac40038be3c4 | /tianlikai/anhui/chuzhou_zhaobiao.py | 902f6f1c213fa802e3604080f3eb86588fe10015 | [] | no_license | jasonTLK/scrapy | f5ac6e575e902c077a07dc0eb9d228506f1a173f | 2de8245fbc8731cfd868bbd91168e26271045300 | refs/heads/master | 2021-01-20T04:22:23.080864 | 2017-04-28T07:46:29 | 2017-04-28T07:46:29 | 89,681,374 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,941 | py | # -*- coding: utf-8 -*-
import scrapy
from scrapy import Request, FormRequest
from scrapy.selector import Selector
try:
from scrapy.spiders import Spider
except:
from scrapy.spiders import BaseSpider as Spider
import datetime
from items.biding import biding_gov
from utils.toDB import *
# 安徽滁州招投标网站
# 招标信息
class hz_gov_Spider(scrapy.Spider):
name = "chuzhou_zhaobiao.py"
allowed_domains = ["www.czzbcg.com"]
custom_settings = {
"DOWNLOADER_MIDDLEWARES": {
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
'middlewares.useragent_middleware.RandomUserAgent': 400,
# 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': None,
# 'middlewares.proxy_middleware.ProxyMiddleware': 250,
# 'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
# 'middlewares.retry_middleware.RetryWithProxyMiddleware': 300,
# 'middlewares.timestamp_middleware.TimestampMiddleware': 120
}
}
def start_requests(self):
urls = [
"http://www.czzbcg.com/czztb/jyxx/002001/002001001/MoreInfo.aspx?CategoryNum=002001001",
"http://www.czzbcg.com/czztb/jyxx/002002/002002001/MoreInfo.aspx?CategoryNum=002002001"
]
pages = [319, 126]
for i in range(len(urls)):
yield Request(urls[i], callback=self.parse, meta={'url': urls[i], 'page': pages[i]})
def parse(self, response):
cookies = response.headers['Set-Cookie']
url = response.meta['url']
page = response.meta['page']
selector = Selector(response)
start = 2
__VIEWSTATE = selector.xpath("//input[@id='__VIEWSTATE']/@value").extract()
headers = {
"Cookie": cookies,
"Referer": url,
"Host": "www.czzbcg.com"
}
while start <= page:
yield FormRequest(url=url,
formdata={
'__VIEWSTATE': __VIEWSTATE[0],
'__EVENTTARGET': 'MoreInfoList1$Pager',
'__EVENTARGUMENT': str(start)}, headers=headers,
callback=self.middle, meta={'page':str(start)})
start += 1
def middle(self, response):
print "当前是第:" + response.meta['page'] + "页"
selector = Selector(response)
urls = selector.xpath("//tr[@valign='top']//a/@href").extract()
names=[]
for i in urls:
names.append(selector.xpath("//a[@href='" + i + "']/text()").extract()[0].strip())
for i in range(len(names)):
url = "http://www.czzbcg.com" + "".join(urls[i])
str = "".join(names[i]) + "," + url
print str
yield Request(url=url, callback=self.parse2, meta={"info": str})
def parse2(self, response):
infos = response.meta["info"]
items = biding_gov()
items["url"] = response.url
items["name"] = "".join(infos).split(",")[0]
items["info"] = ""
items["create_time"] = datetime.datetime.now()
items["update_time"] = datetime.datetime.now()
page_info = "".join(response.body)
items["info"] = "".join(page_info)
db = MongodbHandle("172.20.3.10 ", 27017, "Biding_announcement")
db.get_insert(
"bid_anhui_ChuZhou",
{
"url": items["url"],
"name": items["name"],
"info": items["info"],
"create_time": items["create_time"],
"update_time": items["update_time"]
}
)
print items["url"]
print items["name"]
| [
"18723163167@163.com"
] | 18723163167@163.com |
0f68e2076139798798ae823d9174cb2c98a714ad | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_2/odtjoh001/question1.py | b2cbad1ff099511e3cfff70991ca97165e01ef48 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | year = eval(input("Enter a year:\n"))
if(year%4 == 0 and year % 100 !=0) or (year % 400 == 0):
print(year, "is a leap year.")
else:
print(year, "is not a leap year.")
| [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
6258638b9c51b8141d51fe91279f56e729151b39 | 12bedb87964f0093da27b7d6103996d19f355768 | /Exercise11.py | 805903c712058196defe72c6cb15c1df26c5c03b | [] | no_license | ErenBtrk/PythonDictionaryExercises | f77e9d09025cbff1592aa645018edcb23fbd1abe | 182146333843f582fe28006f889636f7d86c2352 | refs/heads/master | 2023-04-24T09:08:08.627173 | 2021-05-16T16:17:40 | 2021-05-16T16:17:40 | 367,856,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | '''
11. Write a Python program to multiply all the items in a dictionary
'''
my_dict = {'data1':5,'data2':-5,'data3':3}
total = 1
for key,value in my_dict.items():
total *= value
print(total) | [
"erenbtrk@hotmail.com"
] | erenbtrk@hotmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.