hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0d5dc684691063784e6ac984f160e9b545454def | 4,565 | py | Python | ppr-api/tests/unit/models/test_utils.py | pwei1018/ppr | 1fdd2f1ad33217045404d7b872d9fad41a4c7da6 | [
"Apache-2.0"
] | null | null | null | ppr-api/tests/unit/models/test_utils.py | pwei1018/ppr | 1fdd2f1ad33217045404d7b872d9fad41a4c7da6 | [
"Apache-2.0"
] | null | null | null | ppr-api/tests/unit/models/test_utils.py | pwei1018/ppr | 1fdd2f1ad33217045404d7b872d9fad41a4c7da6 | [
"Apache-2.0"
] | null | null | null | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Suite to ensure the datetime utility functions are working as expected."""
from datetime import timedelta as _timedelta
from ppr_api.models import utils as model_utils
def test_expiry_dt_from_years():
"""Assert that generating an expiry date from life years is performing as expected."""
expiry_ts = model_utils.expiry_dt_from_years(5)
now_ts = model_utils.now_ts()
print('Expiry timestamp: ' + model_utils.format_ts(expiry_ts))
print('Now timestamp: ' + model_utils.format_ts(now_ts))
assert (expiry_ts.year - now_ts.year) == 5
assert expiry_ts.hour == 23
assert expiry_ts.minute == 59
assert expiry_ts.second == 59
assert expiry_ts.day == now_ts.day
assert expiry_ts.month in (now_ts.month, (now_ts.month + 1))
def test_ts_from_iso_format():
"""Assert that creating a UTC datetime object from an ISO date-time formatted string is performing as expected."""
test_ts = model_utils.ts_from_iso_format('2021-02-16T23:00:00-08:00')
print('Test timestamp: ' + model_utils.format_ts(test_ts))
assert test_ts.day == 17
assert test_ts.month == 2
assert test_ts.year == 2021
assert test_ts.hour == 7
assert test_ts.minute == 0
assert test_ts.second == 0
test_ts = model_utils.ts_from_iso_format('2021-02-16T23:00:00+00:00')
print('Test timestamp: ' + model_utils.format_ts(test_ts))
assert test_ts.day == 16
assert test_ts.hour == 23
test_ts = model_utils.ts_from_iso_format('2021-02-16T13:00:00-08:00')
print('Test timestamp: ' + model_utils.format_ts(test_ts))
assert test_ts.day == 16
assert test_ts.hour == 21
test_ts = model_utils.ts_from_iso_format('2021-03-31T23:00:00-08:00')
print('Test timestamp: ' + model_utils.format_ts(test_ts))
assert test_ts.month == 4
assert test_ts.day == 1
assert test_ts.hour == 7
def test_ts_from_date_iso_format():
"""Assert that creating a UTC datetime object from an ISO date-time formatted string is performing as expected."""
test_ts = model_utils.ts_from_date_iso_format('2021-02-16')
print('Test timestamp: ' + model_utils.format_ts(test_ts))
assert test_ts.day in (16, 17)
assert test_ts.month == 2
assert test_ts.year == 2021
if test_ts.day == 16:
assert test_ts.hour >= 8
else:
assert test_ts.hour <= 7
def test_now_ts_offset():
"""Assert that adjusting UTC now by a number of days is performing as expected."""
now_ts = model_utils.now_ts() + _timedelta(days=60)
test_ts = model_utils.now_ts_offset(60, True)
print('Now timestamp + 60 days: ' + model_utils.format_ts(test_ts))
assert test_ts.day == now_ts.day
assert test_ts.month == now_ts.month
assert test_ts.year == now_ts.year
now_ts = model_utils.now_ts() - _timedelta(days=60)
test_ts = model_utils.now_ts_offset(60, False)
print('Now timestamp - 60 days: ' + model_utils.format_ts(test_ts))
assert test_ts.day == now_ts.day
assert test_ts.month == now_ts.month
assert test_ts.year == now_ts.year
def test_today_ts_offset():
"""Assert that adjusting UTC today by a number of days is performing as expected."""
test_now_ts = model_utils.now_ts_offset(7, False)
test_today_ts = model_utils.today_ts_offset(7, False)
print('test now - 7 days: ' + model_utils.format_ts(test_now_ts))
print('test today - 7 days: ' + model_utils.format_ts(test_today_ts))
assert test_today_ts.hour == 0
assert test_today_ts.minute == 0
assert test_today_ts.second == 0
assert test_today_ts < test_now_ts
def test_expiry_dt_add_years():
"""Assert that adding years to an expiry date is performing as expected."""
expiry_ts = model_utils.expiry_dt_from_years(1)
add_ts = model_utils.expiry_dt_add_years(expiry_ts, 4)
print('Initial expiry: ' + model_utils.format_ts(expiry_ts))
print('Updated expiry: ' + model_utils.format_ts(add_ts))
assert (add_ts.year - expiry_ts.year) == 4
| 40.758929 | 118 | 0.718072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,600 | 0.350416 |
0d5ddf3b2dc2c5628ef140f9b8e0527e81268a19 | 4,292 | py | Python | code.py | ChampLucky/olympic-hero | da8c92f87e9c4c9aabc4246e2e6eab5124c2517d | [
"MIT"
] | null | null | null | code.py | ChampLucky/olympic-hero | da8c92f87e9c4c9aabc4246e2e6eab5124c2517d | [
"MIT"
] | null | null | null | code.py | ChampLucky/olympic-hero | da8c92f87e9c4c9aabc4246e2e6eab5124c2517d | [
"MIT"
] | null | null | null | # --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
data=pd.read_csv(path)
data=data.rename(columns={"Total":"Total_Medals"})
data.head(10)
# --------------
#Code starts heredddx
data['Better_Event']=np.where(data.Total_Summer==data.Total_Winter,"Both",np.where(data.Total_Summer>data.Total_Winter,"Summer","Winter"))
better_event=data.Better_Event.value_counts().idxmax()
print(better_event)
# --------------
#Code starts here
def top_ten(df,name):
k=df.nlargest(10,name)
country_list=list(k['Country_Name'])
return country_list
top_countries=data[['Country_Name','Total_Summer','Total_Winter','Total_Medals']]
top_countries=top_countries[:-1]
top_10_summer=top_ten(top_countries,'Total_Summer')
top_10_winter=top_ten(top_countries,'Total_Winter')
top_10=top_ten(top_countries,'Total_Medals')
common=[]
for i in range(10):
k=top_10_summer[i]
if k in top_10_winter and k in top_10:
common.append(k)
print(common)
# --------------
#Code starts here
summer_df=data[data.Country_Name.isin(top_10_summer)]
winter_df=data[data.Country_Name.isin(top_10_winter)]
top_df=data[data.Country_Name.isin(top_10)]
plt.figure(figsize=(20,10))
plt.bar(summer_df.Country_Name,summer_df.Total_Summer)
# ax_1.set_title("bar chart between country_name and total summer medals")
plt.bar(winter_df.Country_Name,winter_df.Total_Winter)
# ax_2.set_title("bar chart between country_name and total winter medals")
plt.bar(top_df.Country_Name,top_df.Total_Medals)
# ax_3.set_title("bar chart between country_name and total medals")
plt.title("bar chart between country name and medal counts ")
plt.show()
# --------------
#Code starts here
summer_df['Golden_Ratio']=summer_df['Gold_Summer']/summer_df['Total_Summer']
# summer_country_gold=summer_df.Golden_Ratio.value_counts()
summer_max_ratio=summer_df.Golden_Ratio.max()
k=summer_df[summer_df['Golden_Ratio']==summer_max_ratio]
summer_country_gold=k['Country_Name'].tolist()[0]
print(summer_max_ratio)
# print(summer_country_gold,summer_max_ratio)
# print(type(summer_country_gold))
winter_df['Golden_Ratio']=winter_df['Gold_Winter']/winter_df['Total_Winter']
# summer_country_gold=summer_df.Golden_Ratio.value_counts()
winter_max_ratio=winter_df.Golden_Ratio.max()
l=winter_df[winter_df['Golden_Ratio']==winter_max_ratio]
winter_country_gold=l['Country_Name'].tolist()[0]
# print(summer_country_gold,summer_max_ratio)
# print(type(summer_country_gold))
print(winter_max_ratio)
top_df['Golden_Ratio']=top_df['Gold_Total']/top_df['Total_Medals']
# summer_country_gold=summer_df.Golden_Ratio.value_counts()
top_max_ratio=top_df.Golden_Ratio.max()
m=top_df[top_df['Golden_Ratio']==top_max_ratio]
top_country_gold=m['Country_Name'].tolist()[0]
# print(summer_country_gold,summer_max_ratio)
print(top_max_ratio)
# --------------
#Code starts here
data_1=data[:-1]
data_1['Total_Points']=data_1['Gold_Total']*3+data_1['Silver_Total']*2+data_1['Bronze_Total']
most_points=max(data_1.Total_Points)
best_country=data_1.loc[data_1.Total_Points.idxmax(),'Country_Name']
# --------------
#Code starts here
best=data[data['Country_Name']==best_country]
best=best[['Gold_Total','Silver_Total','Bronze_Total']]
best.plot.bar(stacked=True)
plt.xlabel('United States')
plt.ylabel('Medals Tally')
plt.xticks(rotation=45)
plt.show()
| 26.658385 | 753 | 0.602982 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,448 | 0.337372 |
0d5dfe3070bc1579ce7d09d768db26619dbfc6ac | 704 | py | Python | atester/tester.py | guchengxi1994/easy-api-tester | c77e4b599d3ff4ac44966b3cae34ad551b648b03 | [
"MIT"
] | 2 | 2021-08-04T00:35:16.000Z | 2021-12-16T09:23:56.000Z | atester/tester.py | guchengxi1994/easy-api-tester | c77e4b599d3ff4ac44966b3cae34ad551b648b03 | [
"MIT"
] | null | null | null | atester/tester.py | guchengxi1994/easy-api-tester | c77e4b599d3ff4ac44966b3cae34ad551b648b03 | [
"MIT"
] | null | null | null | import time
class Tester:
def __init__(self,
url,
api: str = "",
name: str = "",
params: str = "",
filepath: str = "") -> None:
self.url = url
self.api = api
self.name = name
self.params = params
self.filepath = filepath
def _generateConfig(self):
pass
def _generateName(self):
if self.name == "":
filename = str(time.time()).replace(".",'') + ".yaml"
elif not self.name.endswith(".yaml"):
filename = self.name.replace(".", "") + ".yaml"
else:
filename = self.name
self.name = filename | 24.275862 | 65 | 0.454545 | 691 | 0.981534 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.058239 |
0d6071d50184dd01af4d40afcaa5d56fe50daa79 | 430 | py | Python | scripts/A-A/ctsslmate.py | warsocket/recon | 4e26086b2c0338db980a0a3e736c499acf51ef15 | [
"Apache-2.0"
] | null | null | null | scripts/A-A/ctsslmate.py | warsocket/recon | 4e26086b2c0338db980a0a3e736c499acf51ef15 | [
"Apache-2.0"
] | null | null | null | scripts/A-A/ctsslmate.py | warsocket/recon | 4e26086b2c0338db980a0a3e736c499acf51ef15 | [
"Apache-2.0"
] | 1 | 2018-02-23T13:37:59.000Z | 2018-02-23T13:37:59.000Z | #!/usr/bin/env python
import requests
import sys
domains = set()
for line in sys.stdin:
domain = line.strip()
certs = requests.get("https://certspotter.com/api/v0/certs?domain=%s" % domain).json()
try:
for cert in certs:
for domain in cert["dns_names"]:
domain = domain.replace("*.", "")
domains.add(domain)
except:
pass
for d in domains:
print d
| 21.5 | 90 | 0.574419 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.2 |
0d61278643c82f3a1673a8a1cfd11ed2cb72fea1 | 9,731 | py | Python | omnibot/services/slack/message.py | troybots/omnibot | 99f93256154d241a0cd76a514fd8b2a245af3f0f | [
"Apache-2.0"
] | 72 | 2019-11-21T00:02:18.000Z | 2022-01-20T02:23:09.000Z | omnibot/services/slack/message.py | troybots/omnibot | 99f93256154d241a0cd76a514fd8b2a245af3f0f | [
"Apache-2.0"
] | 30 | 2019-11-22T04:24:57.000Z | 2021-09-24T22:50:12.000Z | omnibot/services/slack/message.py | mogofinancial/omnibot | b62ccec1bf77b16a498ce5cd5f16c1812102f75b | [
"Apache-2.0"
] | 6 | 2019-12-26T00:37:41.000Z | 2021-04-09T04:21:17.000Z | from omnibot import logging
from omnibot.services import stats
from omnibot.services import slack
from omnibot.services.slack import parser
logger = logging.getLogger(__name__)
class Message(object):
"""
Class for representing a parsed slack message.
"""
def __init__(self, bot, event, event_trace):
self._event_trace = event_trace
self.event = event
self._match = None
self._payload = {}
self._payload['omnibot_payload_type'] = 'message'
self._bot = bot
# The bot object has data we don't want to pass to downstreams, so
# in the payload, we just store specific bot data.
self._payload['bot'] = {
'name': bot.name,
'bot_id': bot.bot_id
}
# For future safety sake, we'll do the same for the team.
self._payload['team'] = {
'name': bot.team.name,
'team_id': bot.team.team_id
}
self._payload['ts'] = event['ts']
self._payload['thread_ts'] = event.get('thread_ts')
self._check_unsupported()
self._payload['user'] = event.get('user')
if self.user:
self._payload['parsed_user'] = slack.get_user(self.bot, self.user)
elif self.bot_id:
# TODO: call get_bot
self._payload['parsed_user'] = None
else:
self._payload['parsed_user'] = None
try:
self._payload['text'] = event['text']
except Exception:
logger.error(
'Message event is missing text attribute.',
extra=self.event_trace
)
raise
self._payload['parsed_text'] = self.text
self._payload['channel_id'] = event['channel']
self._event_trace['channel_id'] = self.channel_id
self._payload['channel'] = slack.get_channel(
self.bot,
self.channel_id
)
if not self.channel:
logger.error(
'Failed to fetch channel from channel_id.',
extra=self.event_trace
)
self._parse_payload()
def _check_unsupported(self):
# TODO: make the ignores configurable, but have a default list
# Ignore self
# Ignore bots
unsupported = False
if self.bot_id:
logger.debug('ignoring message from bot', extra=self.event_trace)
unsupported = True
# Ignore threads
elif self.thread_ts:
logger.debug('ignoring thread message', extra=self.event_trace)
unsupported = True
# For now, ignore all event subtypes
elif self.subtype:
extra = {'subtype': self.subtype}
extra.update(self.event_trace)
logger.debug(
'ignoring message with unsupported subtype',
extra=extra,
)
unsupported = True
if unsupported:
statsd = stats.get_statsd_client()
statsd.incr('event.unsupported')
raise MessageUnsupportedError()
def _parse_payload(self):
try:
self._payload['users'] = parser.extract_users(self.text, self.bot)
self._payload['parsed_text'] = parser.replace_users(
self.parsed_text,
self.users
)
except Exception:
logger.exception(
'Failed to extract user info from text.',
exc_info=True,
extra=self.event_trace
)
try:
self._payload['channels'] = parser.extract_channels(
self.text,
self.bot
)
self._payload['parsed_text'] = parser.replace_channels(
self.parsed_text,
self.channels
)
except Exception:
logger.exception(
'Failed to extract channel info from text.',
exc_info=True,
extra=self.event_trace
)
try:
self._payload['subteams'] = parser.extract_subteams(
self.text,
self.bot
)
except Exception:
logger.exception(
'Failed to extract subteam info from text.',
exc_info=True,
extra=self.event_trace
)
try:
self._payload['specials'] = parser.extract_specials(self.text)
self._payload['parsed_text'] = parser.replace_specials(
self.parsed_text,
self.specials
)
except Exception:
logger.exception(
'Failed to extract special info from text.',
exc_info=True,
extra=self.event_trace
)
try:
self._payload['emojis'] = parser.extract_emojis(self.text)
except Exception:
logger.exception(
'Failed to extract emoji info from text.',
exc_info=True,
extra=self.event_trace
)
try:
self._payload['emails'] = parser.extract_emails(self.text)
self._payload['parsed_text'] = parser.replace_emails(
self.parsed_text,
self.emails
)
except Exception:
logger.exception(
'Failed to extract email info from text.',
exc_info=True,
extra=self.event_trace
)
try:
self._payload['urls'] = parser.extract_urls(self.text)
self._payload['parsed_text'] = parser.replace_urls(
self.parsed_text,
self.urls
)
except Exception:
logger.exception(
'Failed to extract url info from text.',
exc_info=True,
extra=self.event_trace
)
try:
self._payload['directed'] = parser.extract_mentions(
# We match mentioned and directed against parsed users, not
# against raw users.
self.parsed_text,
self.bot,
self.channel
)
except Exception:
logger.exception(
'Failed to extract mentions from text.',
exc_info=True,
extra=self.event_trace
)
self._payload['mentioned'] = False
for user_id, user_name in self.users.items():
if self.bot.name == user_name:
self._payload['mentioned'] = True
try:
self._payload['command_text'] = parser.extract_command(
# Similar to mentions above, we find the command text
# from pre-parsed text for users, not against raw users.
self.parsed_text,
self.bot
)
except Exception:
logger.exception(
'Failed to extract command_text from text.',
exc_info=True,
extra=self.event_trace
)
@property
def subtype(self):
return self.event.get('subtype')
@property
def text(self):
return self._payload['text']
@property
def parsed_text(self):
return self._payload['parsed_text']
@property
def command_text(self):
return self._payload.get('command_text')
@property
def directed(self):
return self._payload.get('directed', False)
@property
def mentioned(self):
return self._payload.get('mentioned', False)
@property
def channel_id(self):
return self._payload.get('channel_id')
@property
def channel(self):
return self._payload.get('channel', {})
@property
def user(self):
return self._payload['user']
@property
def ts(self):
return self._payload['ts']
@property
def thread_ts(self):
return self._payload['thread_ts']
@property
def team(self):
return self._payload['team']
@property
def bot(self):
"""
The bot associated with the app that received this message from the
event subscription api. To get info about a bot that may have sent
this message, see bot_id.
"""
return self._bot
@property
def bot_id(self):
"""
The bot_id associated with the message, if the message if from a bot.
If this message isn't from a bot, this will return None.
"""
return self.event.get('bot_id')
@property
def channels(self):
return self._payload.get('channels', {})
@property
def users(self):
return self._payload.get('users', {})
@property
def specials(self):
return self._payload.get('specials', {})
@property
def emails(self):
return self._payload.get('emails', {})
@property
def urls(self):
return self._payload.get('urls', {})
@property
def match_type(self):
return self._payload.get('match_type')
@property
def match(self):
return self._match
@property
def payload(self):
return self._payload
@property
def event_trace(self):
return self._event_trace
def set_match(self, match_type, match):
self._payload['match_type'] = match_type
self._match = match
if match_type == 'command':
self._payload['command'] = match
self._payload['args'] = self.command_text[len(match):].strip()
elif match_type == 'regex':
self._payload['regex'] = match
class MessageUnsupportedError(Exception):
pass
| 30.409375 | 78 | 0.546193 | 9,547 | 0.981091 | 0 | 0 | 2,070 | 0.212722 | 0 | 0 | 2,131 | 0.218991 |
0d61ee15897bf40f0a5f1b4383621a2d4209c39a | 96 | py | Python | fixture/_base.py | berpress/MT5WT | 10826f974cd5aef14e8771e18ca0314f27a902e3 | [
"Apache-2.0"
] | null | null | null | fixture/_base.py | berpress/MT5WT | 10826f974cd5aef14e8771e18ca0314f27a902e3 | [
"Apache-2.0"
] | 1 | 2019-11-26T18:12:24.000Z | 2019-11-26T18:12:24.000Z | fixture/_base.py | berpress/MT5WT | 10826f974cd5aef14e8771e18ca0314f27a902e3 | [
"Apache-2.0"
] | null | null | null | import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("WebTerminal")
| 16 | 39 | 0.802083 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.135417 |
0d624fa0f65a72840354630ecb879c8f3430b061 | 18,980 | py | Python | ryu/app/DIjkstra_switch_13.py | mgrex97/ryu_competition | d8fbf424accf16e4ff18acff552317441de75010 | [
"Apache-2.0"
] | 1 | 2019-03-27T10:18:35.000Z | 2019-03-27T10:18:35.000Z | ryu/app/DIjkstra_switch_13.py | mgrex97/ryu_competition | d8fbf424accf16e4ff18acff552317441de75010 | [
"Apache-2.0"
] | null | null | null | ryu/app/DIjkstra_switch_13.py | mgrex97/ryu_competition | d8fbf424accf16e4ff18acff552317441de75010 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER, DEAD_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto.ether import ETH_TYPE_IPV6, ETH_TYPE_LLDP, ETH_TYPE_ARP
from ryu.lib import Dijkstra
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import arp
from ryu.lib.packet import ether_types
from ryu.topology import event
from ryu.topology.api import get_switch, get_link, get_host
from collections import defaultdict
#from pprint import pprint
ETHERNET_MULTICAST = 'ff:ff:ff:ff:ff:ff'
class BestPerformance(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(BestPerformance, self).__init__(*args, **kwargs)
self.arp_table = {}
self.arp_switch_table = {}
self.switchs_datapath = {}
# Saving switch port relevant to which link.
self.switch_to_link = {} # {
# dpid : {
# port_no: link,
# ...
# },...
# }
# Which and how many hosts connect to the Switch.
self.switch_to_host = {} # {
# dpid : [host_mac,...],...
# }
self.Dijkstra_Graph = Dijkstra.Graph() # Init in get_topology()
# Saving the dpid list which the path going through.
self.path_sets = {} #
# {
# path(src_mac,dest_mac) : [dpid,...],
# ...
# }
# host connect to which switch and which port
self.hosts_list = {} # {
# host mac : {
# 'dpid' : dpid,
# 'port_no' : port_no,
# },...
# }
# Recod the link will affect which paths.
self.link_dict = {} # {
# link(src,dest) : {
# 'port_no' : port_no,
# 'path_list' : [(src_mac, dest_mac),...]
# },...
# }
@set_ev_cls(event.EventLinkAddRequest)
def Link_Add(self, req):
# Saving new link data.
link = req.link
link_condition = (link.src.dpid, link.dst.dpid)
self.link_dict[link_condition] = {
'port_no' : link.src.port_no,
}
if link_condition[::-1] in self.link_dict:
self.link_dict[link_condition]['path_list'] = self.link_dict[link_condition[::-1]]['path_list']
# Set Dijkstra edges
self.Dijkstra_Graph.add_edge(link.src.dpid, link.dst.dpid, 1, True)
else:
self.link_dict[link_condition]['path_list'] = []
self.switch_to_link[link.src.dpid][link.src.port_no] = link
rep = event.EventLinkAddReply(req.src, True)
self.reply_to_request(req, rep)
def Link_Delete(self, link, state = True):
# Delete the flows of paths, if some paths go through this link.
link_condition = (link.src.dpid, link.dst.dpid)
self.Dijkstra_Graph.del_edge(link.src.dpid, link.dst.dpid)
# This path which is been deleted now will affect exist paths.
if link_condition in self.link_dict:
for path_condition in self.link_dict[link_condition]['path_list']:
# get host mac
src_mac = path_condition[0]
dst_mac = path_condition[1]
if src_mac in self.hosts_list \
and dst_mac in self.hosts_list:
src_dpid = self.hosts_list[src_mac]['dpid']
dst_dpid = self.hosts_list[dst_mac]['dpid']
if path_condition in self.path_sets:
# reached_break_point = True
# Delete the flow which is relevant to the path.
for dpid in self.path_sets[path_condition]:
# if dp_id is not break_point and reached_break_point:
# if dpid in self.switchs_datapath and state == True:
self.delete_flow(self.switchs_datapath[dpid], dst_mac)
self.delete_flow(self.switchs_datapath[dpid], src_mac)
# reached_break_point = False
self.path_sets.pop(path_condition,None)
self.path_sets.pop(path_condition[::-1],None)
#self.logger.info('Link Delete : %s to %s', link.src.dpid, link.dst.dpid)
self.link_dict.pop(link_condition)
@set_ev_cls(event.EventHostAdd)
def hosts_up(self, event):
# Save new host data.
host = event.host
dpid = int(host.port.dpid)
switch_port = int(host.port.port_no)
self.hosts_list[host.mac] = {
'dpid' : dpid,
'port_no' : switch_port
}
self.switch_to_host[dpid][switch_port] = host.mac
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def _port_status_handler(self, ev):
# Check port modify will affect which link or host.
msg = ev.msg
dpid = msg.datapath.id
reason = msg.reason
port_no = msg.desc.port_no
ofproto = msg.datapath.ofproto
if ofproto.OFPP_CONTROLLER == port_no:
#self.logger.info("port controller %s", port_no)
return
if reason == ofproto.OFPPR_ADD:
pass
elif reason in (ofproto.OFPPR_MODIFY, ofproto.OFPPR_DELETE):
# Check port have link or not.
if dpid in self.switch_to_link and port_no in self.switch_to_link[dpid]:
link = self.switch_to_link[dpid][port_no]
self.Link_Delete(link, True)
self.switch_to_link[dpid].pop(port_no)
# Check port have host or not.
elif dpid in self.switch_to_host and port_no in self.switch_to_host[dpid]:
host_mac = self.switch_to_host[dpid][port_no]
self.delete_flow(self.switchs_datapath[dpid], host_mac)
self.switch_to_host[dpid].pop(port_no)
else:
self.logger.info("Illeagal port state %s %s", port_no, reason)
# Handle the siwtch disconnect.
@set_ev_cls(ofp_event.EventOFPStateChange, DEAD_DISPATCHER)
def Switch_Disconnect(self, event):
# When switch disconnect, clear the relevant data.
dp_id = event.datapath.id
if dp_id in self.switchs_datapath:
# clear host data which is connect to this switch.
for port_no, host_mac in self.switch_to_host[dp_id].items():
self.hosts_list.pop(host_mac, None)
if dp_id in self.switch_to_link:
for port_no, link in self.switch_to_link[dp_id].items():
self.Link_Delete(link, False)
self.switch_to_host.pop(dp_id, None)
self.switch_to_link.pop(dp_id, None)
self.switchs_datapath.pop(dp_id, None)
self.Dijkstra_Graph.del_node(dp_id)
self.arp_table = {}
self.arp_switch_table = {}
# Handle the switch connect.
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.switchs_datapath[datapath.id] = datapath
self.switch_to_host[datapath.id] = {}
self.switch_to_link[datapath.id] = {}
self.Dijkstra_Graph.add_node(datapath.id)
self.add_flow(datapath, 0, match, actions)
# Delete specific flow in the ofp switch.
def delete_flow(self, datapath, dst = None, table_id = 0):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
if dst is not None:
match = parser.OFPMatch(eth_dst=dst)
else:
match = parser.OFPMatch()
mod = parser.OFPFlowMod(datapath=datapath, priority=1, match=match,
out_port=ofproto.OFPP_ANY, out_group=ofproto.OFPG_ANY,
command=ofproto.OFPFC_DELETE, table_id = table_id)
datapath.send_msg(mod)
def add_flow(self, datapath, priority, match, actions, buffer_id=None, table_id = 0):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst, table_id=table_id)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst, table_id=table_id)
datapath.send_msg(mod)
def add_flow_between_switch(self, src_dpid, dst_dpid, dst_mac, in_dpid = None, in_port = None):
datapath = self.switchs_datapath[src_dpid]
parser = datapath.ofproto_parser
try:
out_port = self.link_dict[(src_dpid, dst_dpid)]['port_no']
except KeyError as e:
self.logger.info("Link between switch %s and %s not exist.\nCan't find out_port", src_dpid, dst_dpid)
return False
if in_dpid != None:
try:
in_port = self.link_dict[(src_dpid, in_dpid)]['port_no']
except KeyError as e:
self.logger.info("Link between switch %s and %s not exist.\nCan't find in_port", src_dpid, dst_dpid)
return False
if in_port != None:
match = parser.OFPMatch(in_port = in_port, eth_dst = dst_mac)
else:
match = parser.OFPMatch(eth_dst = dst_mac)
actions = [parser.OFPActionOutput(out_port)]
self.add_flow(datapath, 1, match, actions)
return True
def add_Dijkstra_path_flow(self, src_dpid, dst_dpid, src_mac, dst_mac, in_port):
if dst_mac not in self.hosts_list:
return None
# Caculate the path then send flows.
path_condition = (src_mac, dst_mac)
if path_condition in self.path_sets or path_condition[::-1] in self.path_sets:
self.logger.info('Path exist!')
return None
Dijkstra_path = Dijkstra.dijsktra(self.Dijkstra_Graph, src_dpid, dst_dpid)
# Can't find any path.
if Dijkstra_path == None :
self.logger.info('Can\'t find path!')
return None
self.path_sets[path_condition] = list(Dijkstra_path)
# reverse tuple
self.path_sets[path_condition[::-1]] = self.path_sets[path_condition]
#self.logger.info('Path: %s', ','.join(map(str,Dijkstra_path)))
if len(Dijkstra_path) > 1:
prev_dpid = src_dpid
for index, curr_dpid in enumerate(Dijkstra_path[1:-1]) :
next_dpid = Dijkstra_path[index + 2]
self.add_flow_between_switch(curr_dpid, next_dpid, dst_mac, prev_dpid)
self.add_flow_between_switch(curr_dpid, prev_dpid, src_mac, next_dpid)
# Recod link will affect which path.
self.link_dict[(curr_dpid,next_dpid)]['path_list'].append(path_condition)
prev_dpid = curr_dpid
self.link_dict[(Dijkstra_path[0], Dijkstra_path[1])]['path_list'].append(path_condition)
self.add_flow_between_switch(Dijkstra_path[0], Dijkstra_path[1], dst_mac, in_port = in_port)
self.add_flow_between_switch(Dijkstra_path[-1], Dijkstra_path[-2], src_mac, in_port = self.hosts_list[dst_mac]['port_no'])
datapath = self.switchs_datapath[src_dpid]
parser = datapath.ofproto_parser
actions = [parser.OFPActionOutput(in_port)]
in_port = self.link_dict[(Dijkstra_path[0], Dijkstra_path[1])]['port_no']
match = parser.OFPMatch(in_port = in_port,eth_dst = src_mac)
self.add_flow(datapath, 1, match, actions)
datapath = self.switchs_datapath[dst_dpid]
parser = datapath.ofproto_parser
out_port = self.hosts_list[dst_mac]['port_no']
actions = [parser.OFPActionOutput(out_port)]
in_port = self.link_dict[(Dijkstra_path[-1], Dijkstra_path[-2])]['port_no']
match = parser.OFPMatch(in_port = in_port, eth_dst = dst_mac)
self.add_flow(datapath, 1, match, actions)
return Dijkstra_path
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes",
ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
src_mac = eth.src
dst_mac = eth.dst
out_port = None
if eth.ethertype in (ETH_TYPE_LLDP ,ETH_TYPE_IPV6):
# ignore lldp and IPV6 packet
return
if eth.ethertype == ETH_TYPE_ARP:
pkt_arp = pkt.get_protocols(arp.arp)[0]
self.arp_table[pkt_arp.src_ip] = src_mac
src_dpid = datapath.id
#self.logger.info("packet in [%s] %s %s %s %s", eth.ethertype, src_dpid, src_mac, dst_mac, in_port)
if dst_mac != ETHERNET_MULTICAST:
if dst_mac in self.hosts_list:
dst_dpid = self.hosts_list[dst_mac]['dpid']
if src_dpid == dst_dpid:
out_port = self.hosts_list[dst_mac]['port_no']
else:
self.add_Dijkstra_path_flow(src_dpid, dst_dpid, src_mac, dst_mac, in_port)
return None
else:
# dst not in host_list means host not exist.
return None
elif eth.ethertype == ETH_TYPE_ARP:
# arp proxy
if self.arp_proxy(eth, pkt_arp, datapath, in_port, msg) :
return None
else:
return None
if out_port == None : return None
actions = [parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
match = parser.OFPMatch(in_port=in_port, eth_dst=dst_mac)
#match = parser.OFPMatch(eth_dst=dst_mac)
# verify if we have a valid buffer_id, if yes avoid to send both
# flow_mod & packet_out
if msg.buffer_id != ofproto.OFP_NO_BUFFER:
self.add_flow(datapath, 1, match, actions, msg.buffer_id)
return None
else:
self.add_flow(datapath, 1, match, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
def arp_proxy(self, eth, pkt_arp, datapath, in_port, msg):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
arp_src_ip = pkt_arp.src_ip
arp_dst_ip = pkt_arp.dst_ip
eth_dst = eth.dst
eth_src = eth.src
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
if arp_dst_ip not in self.arp_table:
if self.arp_switch_table.setdefault((datapath.id, eth_src, arp_dst_ip), in_port) != in_port:
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=[], data=data)
datapath.send_msg(out)
return True
else:
# ARP_FLOOD
out_port = ofproto.OFPP_FLOOD
actions = [parser.OFPActionOutput(out_port)]
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
return True
elif pkt_arp.opcode == arp.ARP_REQUEST \
and self.arp_table[arp_src_ip] in self.hosts_list \
and self.hosts_list[self.arp_table[arp_src_ip]]['dpid'] == datapath.id:
ARP_Reply = packet.Packet()
ARP_Reply.add_protocol(ethernet.ethernet(
ethertype=eth.ethertype, dst=eth_src,
src=self.arp_table[arp_dst_ip]))
ARP_Reply.add_protocol(arp.arp(
opcode=arp.ARP_REPLY, src_mac=self.arp_table[arp_dst_ip],
src_ip=arp_dst_ip, dst_mac=eth_src, dst_ip=arp_src_ip))
ARP_Reply.serialize()
actions = [parser.OFPActionOutput(in_port)]
out = parser.OFPPacketOut(
datapath=datapath,
buffer_id=ofproto.OFP_NO_BUFFER,
in_port=ofproto.OFPP_CONTROLLER,
actions=actions, data=ARP_Reply.data)
datapath.send_msg(out)
return True
return False
| 43.53211 | 134 | 0.578346 | 17,660 | 0.930453 | 0 | 0 | 6,712 | 0.353635 | 0 | 0 | 3,151 | 0.166017 |
0d6498c824c3657f7aeb953f0131a759eba8a2fa | 68 | py | Python | cpo_pipeline/typing/__init__.py | DiDigsDNA/cpo-pipeline | 4b3236ef4fe37e6efa38554e90f6d289d4f1f801 | [
"MIT"
] | null | null | null | cpo_pipeline/typing/__init__.py | DiDigsDNA/cpo-pipeline | 4b3236ef4fe37e6efa38554e90f6d289d4f1f801 | [
"MIT"
] | 31 | 2018-10-11T17:43:19.000Z | 2019-06-14T19:26:26.000Z | cpo_pipeline/typing/__init__.py | DiDigsDNA/cpo-pipeline | 4b3236ef4fe37e6efa38554e90f6d289d4f1f801 | [
"MIT"
] | 3 | 2018-11-15T18:04:36.000Z | 2019-05-02T19:09:39.000Z | """
typing module
"""
from . import pipeline
from . import parsers
| 9.714286 | 22 | 0.691176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.308824 |
0d6637431650b1ab532cc70bbdb8ef04c6200f47 | 871 | py | Python | gmit--exercise03--collatz--code-and-output--20180205d.py | g00364787/52167assessments | 65318102196fbbf40b764cd189edc4e31963ecf5 | [
"Apache-2.0"
] | null | null | null | gmit--exercise03--collatz--code-and-output--20180205d.py | g00364787/52167assessments | 65318102196fbbf40b764cd189edc4e31963ecf5 | [
"Apache-2.0"
] | null | null | null | gmit--exercise03--collatz--code-and-output--20180205d.py | g00364787/52167assessments | 65318102196fbbf40b764cd189edc4e31963ecf5 | [
"Apache-2.0"
] | null | null | null | # AUTHOR = PAUL KEARNEY
# DATE = 2018-02-05
# STUDENT ID = G00364787
# EXERCISE 03
#
# filename= gmit--exercise03--collatz--20180205d.py
#
# the Collatz conjecture
print("The COLLATZ CONJECTURE")
# define the variables
num = ""
x = 0
# obtain user input
num = input("A start nummber: ")
x = int(num)
print("--Start of sequence.")
print (x)
# calculate the sequence/conjecture
while x != 1:
if x % 2 == 0:
x = x / 2
else:
x = (x * 3) + 1
print(int(x))
print("--End of sequence")
#
#
#
#
#
#
#
# WEEK 03
#
# filename= gmit--week03--collatz--20180205d.py
#
# STUDENT ID= g00364787
#
# the Collatz conjecture
#
#
#
#The COLLATZ CONJECTURE
#
#A start nummber: 12
#--Start of sequence.
#12
#6
#3
#10
#5
#16
#8
#4
#2
#1
#--End of sequence
#
## end ##
| 12.267606 | 52 | 0.543054 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 599 | 0.687715 |
0d67fd244c4c3feefcdec78169f64047738ccccd | 8,064 | py | Python | concon.py | nejucomo/concon | 0302475a86d25c53cd6ef50b0e4f6279ea73090d | [
"MIT"
] | null | null | null | concon.py | nejucomo/concon | 0302475a86d25c53cd6ef50b0e4f6279ea73090d | [
"MIT"
] | null | null | null | concon.py | nejucomo/concon | 0302475a86d25c53cd6ef50b0e4f6279ea73090d | [
"MIT"
] | null | null | null | """
concon (CONstrained CONtainers) provides usefully constrained container
subtypes as well as utilities for defining new constrained subtypes and
append-only dict modification.
There are two flavors of constraints: frozen and appendonly. The former
prevents any modification and the latter prevents any modification of
an existing entry. These two flavors of constraint are applied to the
set, list, and dict types.
(Note: frozenset is already a builtin, but is also provided in this
module scope for consistency.)
"""
import unittest
from collections import Mapping
# I. General Constraint abstractions
class ConstraintError (TypeError):
"""
ConstraintError is the base Exception type for any violation of
some constraint.
"""
Template = 'Attempt to call {!r}.{} {!r} {!r} violates constraint.'
def __str__(self):
return self.Template.format(*self.args)
@classmethod
def block(cls, method):
"""
This decorator raises a ConstraintError (or subclass) on any
call to the given method.
"""
def blocked_method(self, *a, **kw):
raise cls(self, method.__name__, a, kw)
return blocked_method
def define_constrained_subtype(prefix, base, blockednames, clsdict=None):
"""
Define a subtype which blocks a list of methods.
@param prefix: The subtype name prefix. This is prepended to the
base type name. This convention is baked in for
API consistency.
@param base: The base type to derive from.
@param blockednames: A list of method name strings. All of these
will be blocked.
@param clsdict: None or a dict which will be modified to become the
subtype class dict.
@return: The new subtype.
@raise: OverwriteError - If clsdict contains an entry in blockednames.
"""
name = prefix + base.__name__
clsdict = clsdict or {}
doc = clsdict.get('__doc__', '')
doc = 'An {} extension of {}.\n{}'.format(prefix, base.__name__, doc)
clsdict['__doc__'] = doc
setitem_without_overwrite(
clsdict,
'get_blocked_method_names',
lambda self: iter(blockednames),
)
for bname in blockednames:
setitem_without_overwrite(
clsdict,
bname,
ConstraintError.block(getattr(base, bname)),
)
return type(name, (base,), clsdict)
# II. Overwrite utilities
class OverwriteError (ConstraintError, KeyError):
"""
OverwriteError is raised when an attempt to overwrite a value in an
append-only structure occurs.
"""
Template = ('Attempted overwrite of key {!r} with '
'new value {!r} overwriting old value {!r}')
def __init__(self, key, newvalue, oldvalue):
KeyError.__init__(self, key, newvalue, oldvalue)
def setitem_without_overwrite(d, key, value):
"""
@param d: An instance of dict, that is: isinstance(d, dict)
@param key: a key
@param value: a value to associate with the key
@return: None
@raise: OverwriteError if the key is already present in d.
"""
if key in d:
raise OverwriteError(key, value, d[key])
else:
dict.__setitem__(d, key, value)
def update_without_overwrite(d, *args, **kwds):
"""
This has the same interface as dict.update except it uses
setitem_without_overwrite for all updates.
Note: The implementation is derived from
collections.MutableMapping.update.
"""
if args:
assert len(args) == 1, \
'At most one positional parameter is allowed: {0!r}'.format(args)
(other,) = args
if isinstance(other, Mapping):
for key in other:
setitem_without_overwrite(d, key, other[key])
elif hasattr(other, "keys"):
for key in other.keys():
setitem_without_overwrite(d, key, other[key])
else:
for key, value in other:
setitem_without_overwrite(d, key, value)
for key, value in kwds.items():
setitem_without_overwrite(d, key, value)
# III. Concrete Constrained Containers
frozenset = frozenset # Promote this builtin to module scope for consistency.
frozenlist = define_constrained_subtype(
'frozen', list,
['__delitem__', '__delslice__', '__iadd__', '__imul__', '__setitem__',
'__setslice__', 'append', 'extend', 'insert', 'pop', 'remove',
'reverse', 'sort'])
frozendict = define_constrained_subtype(
'frozen', dict,
['__delitem__', '__setitem__', 'clear', 'pop', 'popitem',
'setdefault', 'update'])
appendonlyset = define_constrained_subtype(
'appendonly', set,
['__iand__', '__isub__', '__ixor__', 'clear', 'difference_update',
'discard', 'intersection_update', 'pop', 'remove',
'symmetric_difference_update'])
appendonlylist = define_constrained_subtype(
'appendonly', list,
['__setitem__', '__delitem__', 'insert', 'reverse', 'pop', 'remove'])
appendonlydict = define_constrained_subtype(
'appendonly', dict,
['__delitem__', 'pop', 'popitem', 'clear'],
{'__setitem__': setitem_without_overwrite,
'update': update_without_overwrite})
# IV. Unittests
class SetItemWithoutOverwriteTests (unittest.TestCase):
def test_setitem_without_overwrite__no_overwrite(self):
d = {'a': 'apple'}
setitem_without_overwrite(d, 'b', 'banana')
self.assertEqual(d, {'a': 'apple', 'b': 'banana'})
def test_setitem_without_overwrite__with_overwrite(self):
d = {'a': 'apple'}
self.assertRaises(
OverwriteError,
setitem_without_overwrite,
d,
'a',
'applause',
)
def test_update_without_overwrite__no_overwrite(self):
d = {'a': 'apple'}
update_without_overwrite(d, {'b': 'banana'})
self.assertEqual(d, {'a': 'apple', 'b': 'banana'})
def test_update_without_overwrite__with_overwrite(self):
d = {'a': 'apple'}
self.assertRaises(
OverwriteError,
update_without_overwrite,
d,
{'a': 'applause'},
)
def test_update_without_overwrite__with_NonMappingWithKeysAndGetItem(self):
class NonMappingWithKeysAndGetItem (object):
def keys(self):
return ['a', 'b', 'c']
def __getitem__(self, key):
return 42
d = {}
update_without_overwrite(d, NonMappingWithKeysAndGetItem())
self.assertEqual(d, {'a': 42, 'b': 42, 'c': 42})
def test_update_without_overwrite__with_keyvalue_sequence(self):
d = {}
update_without_overwrite(d, [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(d, {'a': 0, 'b': 1, 'c': 2})
def test_update_without_overwrite__with_keywords(self):
d = {}
update_without_overwrite(d, a=0, b=1, c=2)
self.assertEqual(d, {'a': 0, 'b': 1, 'c': 2})
class BlockedMethodsTests (unittest.TestCase):
def test_frozendict(self):
self._check_blocked_methods(frozendict({}))
def test_frozenlist(self):
self._check_blocked_methods(frozenlist({}))
# Note, we do not test frozenset.
def test_appendonlydict(self):
self._check_blocked_methods(appendonlydict({}))
def test_appendonlylist(self):
self._check_blocked_methods(appendonlylist({}))
def test_appendonlyset(self):
self._check_blocked_methods(appendonlyset({}))
def _check_blocked_methods(self, obj):
for name in obj.get_blocked_method_names():
method = getattr(obj, name)
self.assertRaises(ConstraintError, method, 42)
class ContraintErrorTests (unittest.TestCase):
def test__str__(self):
error = ConstraintError(
None,
'foo',
('blah', 42),
dict(wombat='awesome!'),
)
self.assertRegexpMatches(
str(error),
r'^Attempt to call .* violates constraint\.$',
)
| 29.866667 | 79 | 0.63182 | 3,782 | 0.468998 | 0 | 0 | 293 | 0.036334 | 0 | 0 | 3,113 | 0.386037 |
0d688031a0fe7e0f5d4a206c1ace5f3e35d7e2b5 | 4,186 | py | Python | algorithms/search/tests/student/test_bfs.py | Tebs-Lab/learn-graph-theory | d883fb3ee095a9b80eebc33fd19aee33b35c7bae | [
"Unlicense"
] | 9 | 2019-03-25T14:21:37.000Z | 2021-12-06T19:19:49.000Z | algorithms/search/tests/student/test_bfs.py | Tebs-Lab/learn-graph-theory | d883fb3ee095a9b80eebc33fd19aee33b35c7bae | [
"Unlicense"
] | null | null | null | algorithms/search/tests/student/test_bfs.py | Tebs-Lab/learn-graph-theory | d883fb3ee095a9b80eebc33fd19aee33b35c7bae | [
"Unlicense"
] | 4 | 2019-08-02T20:32:00.000Z | 2020-10-14T02:19:50.000Z | from graphs.reference_implementation import DirectedGraph, UndirectedGraph
from algorithms.search.student_implementation import breadth_first_search
def test_start_is_stop():
g = DirectedGraph()
g.add_edge('a', 'b')
g.add_edge('b', 'c')
g.add_edge('b', 'd')
g.add_edge('e', 'f')
assert breadth_first_search(g, 'a', 'a') == ['a']
def test_no_path_directed():
'''
Test BFS on a simple directed graph that contains a no cycles, and no path to the goal.
'''
g = DirectedGraph()
g.add_edge('a', 'b')
g.add_edge('b', 'c')
g.add_edge('b', 'd')
g.add_edge('e', 'f')
assert breadth_first_search(g, 'a', 'e') is None
def test_no_path_directed_cycles():
'''
Test BFS on a simple directed graph that contains cycles, but no path to the goal.
'''
g = DirectedGraph()
g.add_edge('a', 'b')
g.add_edge('b', 'c')
g.add_edge('c', 'a')
g.add_edge('b', 'd')
g.add_edge('e', 'f')
assert breadth_first_search(g, 'a', 'e') is None
def test_single_path_directed():
g = DirectedGraph()
g.add_edge('a', 'b')
g.add_edge('b', 'c')
g.add_edge('b', 'd')
g.add_edge('d', 'e')
g.add_edge('e', 'f')
assert breadth_first_search(g, 'a', 'e') == ['a', 'b', 'd', 'e']
def test_multiple_paths_directed():
g = DirectedGraph()
g.add_edge('a', 'b')
g.add_edge('b', 'c')
g.add_edge('b', 'e')
g.add_edge('b', 'd')
g.add_edge('d', 'e')
g.add_edge('e', 'f')
assert breadth_first_search(g, 'a', 'e') == ['a', 'b', 'e']
def test_single_path_with_cycles_directed():
g = DirectedGraph()
g.add_edge('a', 'b')
g.add_edge('b', 'c')
g.add_edge('c', 'h')
g.add_edge('h', 'i')
g.add_edge('i', 'j')
g.add_edge('j', 'k')
g.add_edge('b', 'd')
g.add_edge('d', 'e')
g.add_edge('e', 'f')
g.add_edge('f', 'b')
assert breadth_first_search(g, 'a', 'k') == ['a', 'b', 'c', 'h', 'i', 'j', 'k']
def test_no_path_undirected():
'''
Test BFS on a simple undirected graph that contains a no cycles, and no path to the goal.
'''
g = UndirectedGraph()
g.add_edge('a', 'b')
g.add_edge('b', 'c')
g.add_edge('b', 'd')
g.add_edge('e', 'f')
assert breadth_first_search(g, 'a', 'e') is None
def test_no_path_undirected_cycles():
'''
Test BFS on a simple undirected graph that contains cycles, but no path to the goal.
'''
g = UndirectedGraph()
g.add_edge('a', 'b')
g.add_edge('b', 'c')
g.add_edge('c', 'a')
g.add_edge('b', 'd')
g.add_edge('e', 'f')
assert breadth_first_search(g, 'a', 'e') is None
def test_single_path_undirected():
g = UndirectedGraph()
g.add_edge('a', 'b')
g.add_edge('b', 'c')
g.add_edge('b', 'd')
g.add_edge('d', 'e')
g.add_edge('e', 'f')
assert breadth_first_search(g, 'a', 'e') == ['a', 'b', 'd', 'e']
def test_multiple_paths_undirected():
g = UndirectedGraph()
g.add_edge('a', 'b')
g.add_edge('b', 'c')
g.add_edge('b', 'e')
g.add_edge('b', 'd')
g.add_edge('d', 'e')
g.add_edge('e', 'f')
assert breadth_first_search(g, 'a', 'e') == ['a', 'b', 'e']
def test_single_path_with_cycles_undirected():
g = UndirectedGraph()
g.add_edge('a', 'b')
g.add_edge('b', 'c')
g.add_edge('c', 'h')
g.add_edge('h', 'i')
g.add_edge('i', 'j')
g.add_edge('j', 'k')
g.add_edge('b', 'd')
g.add_edge('d', 'e')
g.add_edge('e', 'f')
g.add_edge('f', 'b')
assert breadth_first_search(g, 'a', 'k') == ['a', 'b', 'c', 'h', 'i', 'j', 'k']
def test_graph_with_tie():
g = UndirectedGraph()
g.add_edge('a', 'b')
g.add_edge('b', 'c')
g.add_edge('b', 'd')
g.add_edge('c', 'e')
g.add_edge('d', 'e')
found = breadth_first_search(g, 'a', 'e')
assert found == ['a', 'b', 'c', 'e'] or found == ['a', 'b', 'd', 'e']
def test_ignores_weight():
g = UndirectedGraph()
g.add_edge('a', 'b', 1)
g.add_edge('a', 'd', 20)
g.add_edge('b', 'c', 1)
g.add_edge('c', 'e', 1)
g.add_edge('d', 'e', 20)
found = breadth_first_search(g, 'a', 'e')
assert found == ['a', 'd', 'e']
| 25.216867 | 93 | 0.543239 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,048 | 0.250358 |
0d6919c52d43e980747076e6a6d5776fe0cc110d | 13,205 | py | Python | levelTcricpr.py | scaralbi/dnaplotlib | a1fdd12ac3f3df1b16a0351402b8fe4f29b388d9 | [
"MIT"
] | null | null | null | levelTcricpr.py | scaralbi/dnaplotlib | a1fdd12ac3f3df1b16a0351402b8fe4f29b388d9 | [
"MIT"
] | null | null | null | levelTcricpr.py | scaralbi/dnaplotlib | a1fdd12ac3f3df1b16a0351402b8fe4f29b388d9 | [
"MIT"
] | null | null | null | import math
import dnaplotlib as dpl
import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib.patches import Polygon, Ellipse, Wedge, Circle, PathPatch
from matplotlib.path import Path
from matplotlib.lines import Line2D
from matplotlib.patheffects import Stroke
import matplotlib.patches as patches
import matplotlib as mpl
from matplotlib import cm
from colorspacious import cspace_converter
from collections import OrderedDict
cmaps = OrderedDict()
__author__ = 'Alberto Scarampi <as2945@cam.ac.uk>, Howe Lab, Cambridge'
__license__ = 'MIT'
__version__ = '1.0'
# Color maps
col_map = {}
col_map['red'] = (0.95, 0.30, 0.25)
col_map['green'] = (0.38, 0.82, 0.32)
col_map['blue'] = (0.38, 0.65, 0.87)
col_map['orange'] = (1.00, 0.75, 0.17)
col_map['purple'] = (0.55, 0.35, 0.64)
col_map['yellow'] = (0.98, 0.9, 0.55)
col_map['black'] = (0, 0, 0)
col_map['gray'] = (0.41, 0.41, 0.41)
GreenCmap = cm.get_cmap('Greens', 5)
prqRmap = cm.get_cmap('winter', 2)
prqAmap = cm.get_cmap('summer', 2)
def sbol_recombinase1 (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" SBOL recombinase site renderer - forward direction
"""
# Default parameters
color = (0,0,0)
color2 = (0,0,0)
start_pad = 0.0
end_pad = 0.0
x_extent = 6.0
y_extent = 6.0
linestyle = '-'
# Update default parameters if provided
if opts != None:
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'linestyle' in list(opts.keys()):
linestyle = opts['linestyle']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
if 'color' in list(opts.keys()):
color = opts['color']
if 'color2' in list(opts.keys()):
color2 = opts['color2']
# Check direction add start padding
final_end = end
final_start = prev_end
y_lower = -1 * y_extent/2
y_upper = y_extent/2
if start > end:
start = prev_end+end_pad+x_extent+linewidth
end = prev_end+end_pad
final_end = start+start_pad
color = color2
else:
start = prev_end+start_pad+linewidth
end = start+x_extent
final_end = end+end_pad
# Draw the site
p1 = Polygon([(start, y_lower),
(start, y_upper),
(end,0)],
edgecolor=(0,0,0), facecolor=color, linewidth=linewidth, zorder=11,
path_effects=[Stroke(joinstyle="miter")])
ax.add_patch(p1)
# Add a label if needed
if opts != None and 'label' in list(opts.keys()):
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
# Return the final start and end positions to the DNA renderer
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end
def sbol_recombinase2 (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" SBOL recombinase site renderer - reverse direction
"""
# Default parameters
color = (0,0,0)
color2 = (0,0,0)
start_pad = 0.0
end_pad = 0.0
x_extent = 6.0
y_extent = 6.0
linestyle = '-'
# Update default parameters if provided
if opts != None:
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'linestyle' in list(opts.keys()):
linestyle = opts['linestyle']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
if 'color' in list(opts.keys()):
color = opts['color']
if 'color2' in list(opts.keys()):
color2 = opts['color2']
else:
if 'color' in list(opts.keys()):
r2 = float(color[0]) / 2
g2 = float(color[1]) / 2
b2 = float(color[2]) / 2
color2 = (r2,g2,b2)
# Check direction add start padding
final_end = end
final_start = prev_end
y_lower = -1 * y_extent/2
y_upper = y_extent/2
if start > end:
start = prev_end+end_pad+x_extent+linewidth
end = prev_end+end_pad
final_end = start+start_pad
temp = color
color = color2
color2 = temp
else:
start = prev_end+start_pad+linewidth
end = start+x_extent
final_end = end+end_pad
# Draw the site
p1 = Polygon([(start, y_lower),
(start, y_upper),
(end,0)],
edgecolor=(0,0,0), facecolor=color, linewidth=linewidth, zorder=11,
path_effects=[Stroke(joinstyle="miter")])
midpoint = (end + start) / 2
hypotenuse = math.sqrt( (y_extent/2)**2 + (x_extent)**2 )
hypotenuse2 = hypotenuse / 2
cosineA = (y_extent/2) / hypotenuse
f = hypotenuse2 * cosineA
p2 = Polygon([(midpoint, -1*f),
(midpoint, f),
(end,0)],
edgecolor=(0,0,0), facecolor=color2, linewidth=linewidth, zorder=12,
path_effects=[Stroke(joinstyle="miter")])
ax.add_patch(p1)
ax.add_patch(p2)
# Add a label if needed
if opts != None and 'label' in list(opts.keys()):
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
# Return the final start and end positions to the DNA renderer
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end
# Function to calculate darker colour
def dark (col, fac=2.0):
return (col[0]/fac, col[1]/fac, col[2]/fac)
# Global line width
lw = 1.0
# Create the DNAplotlib renderer
dr = dpl.DNARenderer()
# Use default renderers and append our custom ones for recombinases
reg_renderers = dr.std_reg_renderers()
part_renderers = dr.SBOL_part_renderers()
part_renderers['RecombinaseSite'] = sbol_recombinase1
part_renderers['RecombinaseSite2'] = sbol_recombinase2
# Create the construct programmably to plot
sp = {'type':'EmptySpace', 'name':'S1', 'fwd':True, 'opts':{'x_extent':1}}
hrtR = {'type':'EmptySpace', 'name':'S1', 'fwd':True, 'opts':{'x_extent':5, 'label':'prqR HRT', 'label_y_offset':-5}}
hrtRA = {'type':'EmptySpace', 'name':'S1', 'fwd':True, 'opts':{'x_extent':5, 'label':'prqRA HRT', 'label_y_offset':-5}}
hrtA = {'type':'EmptySpace', 'name':'S1', 'fwd':True, 'opts':{'x_extent':5, 'label':'prqA HRT', 'label_y_offset':-5}}
J105 = {'type':'Promoter', 'name':'J23105', 'fwd':True,'opts':{'linewidth':lw, 'color':GreenCmap(0.2), 'label':'J23105', 'label_y_offset':-5}}
J114 = {'type':'Promoter', 'name':'J23114', 'fwd':True,'opts':{'linewidth':lw, 'color':GreenCmap(0.4), 'label':'J23114', 'label_y_offset':-8}}
J117 = {'type':'Promoter', 'name':'J12117', 'fwd':True,'opts':{'linewidth':lw, 'color':GreenCmap(0.6), 'label':'J23117', 'label_y_offset':-8}}
J119 = {'type':'Promoter', 'name':'J23119', 'fwd':True,'opts':{'linewidth':lw, 'color':GreenCmap(0.8), 'label':'J23119', 'label_y_offset':-8}}
Plac = {'type':'Promoter', 'name':'Plac', 'fwd':True,'opts':{'linewidth':lw, 'color':col_map['green'], 'label':'Plac', 'label_y_offset':-8}}
rbs = {'type':'RBS', 'name':'rbs', 'fwd':True, 'opts':{'color':col_map['gray']}}
TluxI = {'type':'Terminator', 'name':'term', 'fwd':True, 'opts':{'linewidth':lw, 'color':col_map['black'], 'label':'TluxI', 'label_y_offset':-8}}
TcpcG1 = {'type':'Terminator', 'name':'term', 'fwd':True, 'opts':{'linewidth':lw, 'color':col_map['black'], 'label':'TcpcG1', 'label_y_offset':-8}}
cas12a = {'type':'CDS', 'name':'cds', 'fwd':True, 'opts':{'color':col_map['purple'], 'label':'cas12a', 'label_x_offset':-2, 'label_y_offset':-0.5, 'label_style':'italic'}}
gRNA_R = {'type':'CDS', 'name':'cds', 'fwd':True, 'opts':{'color':col_map['red'], 'label':'prqR gRNA', 'label_x_offset':-0.3, 'label_y_offset':-0.5, 'label_style':'italic'}}
gRNA_A = {'type':'CDS', 'name':'cds', 'fwd':True, 'opts':{'color':col_map['orange'], 'label':'prqA gRNA', 'label_x_offset':-0.3, 'label_y_offset':-0.5, 'label_style':'italic'}}
prqR = {'type':'CDS', 'name':'prqR', 'fwd':True, 'opts':{'color':col_map['red'], 'label':'prqR', 'label_x_offset':-2, 'label_y_offset':-0.5, 'label_style':'italic'}}
prqA = {'type':'CDS', 'name':'prqA', 'fwd':True, 'opts':{'color':col_map['orange'], 'label':'prqA', 'label_x_offset':-2, 'label_y_offset':-0.5, 'label_style':'italic'}}
recRf = {'type':'RecombinaseSite', 'name':'a1', 'fwd':True, 'opts':{'color':prqRmap(0), 'color2':prqRmap(0), 'x_extent':16, 'y_extent':12, 'start_pad':3, 'end_pad':3}}
recRr = {'type':'RecombinaseSite', 'name':'a2', 'fwd':False, 'opts':{'color':prqRmap(1), 'color2':prqRmap(1), 'x_extent':16, 'y_extent':12, 'start_pad':3, 'end_pad':3}}
recAf = {'type':'RecombinaseSite', 'name':'a3', 'fwd':True, 'opts':{'color':prqAmap(0), 'color2':prqAmap(0), 'x_extent':16, 'y_extent':12, 'start_pad':3, 'end_pad':3}}
recAr = {'type':'RecombinaseSite', 'name':'a4', 'fwd':False, 'opts':{'color':prqAmap(1), 'color2':prqAmap(1), 'x_extent':16, 'y_extent':12, 'start_pad':3, 'end_pad':3}}
recRAf = {'type':'RecombinaseSite', 'name':'a1', 'fwd':True, 'opts':{'color':prqRmap(0), 'color2':prqRmap(0), 'x_extent':16, 'y_extent':12, 'start_pad':3, 'end_pad':3}}
recRAr = {'type':'RecombinaseSite', 'name':'a2', 'fwd':False, 'opts':{'color':prqAmap(1), 'color2':prqAmap(1), 'x_extent':16, 'y_extent':12, 'start_pad':3, 'end_pad':3}}
TGCC= {'type':'5StickyRestrictionSite', 'name':'5SRS', 'fwd':True, 'opts':{'color':col_map['black'], 'label':'TGCC', 'label_x_offset':-1, 'label_y_offset':6}}
CGTT= {'type':'3StickyRestrictionSite', 'name':'3SRS', 'fwd':True, 'opts':{'color':col_map['black'], 'label':'CGTT', 'label_x_offset':1, 'label_y_offset':-8}}
GCAA= {'type':'5StickyRestrictionSite', 'name':'5SRS', 'fwd':True, 'opts':{'color':col_map['black'], 'label':'GCAA', 'label_x_offset':-1, 'label_y_offset':6}}
TGAT= {'type':'3StickyRestrictionSite', 'name':'3SRS', 'fwd':True, 'opts':{'color':col_map['black'], 'label':'TGAT', 'label_x_offset':1, 'label_y_offset':-8}}
ACTA= {'type':'5StickyRestrictionSite', 'name':'5SRS', 'fwd':True, 'opts':{'color':col_map['black'], 'label':'ACTA', 'label_x_offset':-1, 'label_y_offset':6}}
AATG= {'type':'3StickyRestrictionSite', 'name':'3SRS', 'fwd':True, 'opts':{'color':col_map['black'], 'label':'AATG', 'label_x_offset':1, 'label_y_offset':-8}}
pJML1017 = [TGCC, J105, rbs, cas12a, TcpcG1, CGTT]
pJML1016 = [TGCC, J114, rbs, cas12a, TcpcG1, CGTT]
pJML1015 = [TGCC, J117, rbs, cas12a, TcpcG1, CGTT]
pAS104 = [GCAA, J119, gRNA_R, TluxI, TGAT]
pAS109 = [GCAA, J119, gRNA_A, TluxI, TGAT]
pAS101 = [ACTA, recRf, hrtR, recRr, AATG]
pAS102 = [ACTA, recRAf, hrtRA, recRAr, AATG]
pAS106 = [ACTA, recAf, hrtA, recAr, AATG]
# Create the figure
fig = plt.figure(figsize=(12,4))
gs = gridspec.GridSpec(2, 4)
ax_dna1 = plt.subplot(gs[0])
ax_dna2 = plt.subplot(gs[1])
ax_dna3 = plt.subplot(gs[2])
ax_dna4 = plt.subplot(gs[3])
ax_dna5 = plt.subplot(gs[4])
ax_dna6 = plt.subplot(gs[5])
ax_dna7 = plt.subplot(gs[6])
ax_dna8 = plt.subplot(gs[7])
# Redender the DNA to axis
start, end = dr.renderDNA(ax_dna1, pJML1017, part_renderers)
ax_dna1.set_xlim([start, end])
ax_dna1.set_ylim([-18,20])
ax_dna1.set_aspect('equal')
ax_dna1.set_xticks([])
ax_dna1.set_yticks([])
ax_dna1.axis('off')
start, end = dr.renderDNA(ax_dna2, pJML1016, part_renderers)
ax_dna2.set_xlim([start, end])
ax_dna2.set_ylim([-18,20])
ax_dna2.set_aspect('equal')
ax_dna2.set_xticks([])
ax_dna2.set_yticks([])
ax_dna2.axis('off')
start, end = dr.renderDNA(ax_dna3, pJML1015, part_renderers)
ax_dna3.set_xlim([start, end])
ax_dna3.set_ylim([-18,20])
ax_dna3.set_aspect('equal')
ax_dna3.set_xticks([])
ax_dna3.set_yticks([])
ax_dna3.axis('off')
start, end = dr.renderDNA(ax_dna4, pAS104, part_renderers)
ax_dna4.set_xlim([start, end])
ax_dna4.set_ylim([-18,20])
ax_dna4.set_aspect('equal')
ax_dna4.set_xticks([])
ax_dna4.set_yticks([])
ax_dna4.axis('off')
start, end = dr.renderDNA(ax_dna5, pAS109, part_renderers)
ax_dna5.set_xlim([start, end])
ax_dna5.set_ylim([-18,20])
ax_dna5.set_aspect('equal')
ax_dna5.set_xticks([])
ax_dna5.set_yticks([])
ax_dna5.axis('off')
start, end = dr.renderDNA(ax_dna6, pAS101, part_renderers)
ax_dna6.set_xlim([start, end])
ax_dna6.set_ylim([-18,20])
ax_dna6.set_aspect('equal')
ax_dna6.set_xticks([])
ax_dna6.set_yticks([])
ax_dna6.axis('off')
start, end = dr.renderDNA(ax_dna7, pAS102, part_renderers)
ax_dna7.set_xlim([start, end])
ax_dna7.set_ylim([-18,20])
ax_dna7.set_aspect('equal')
ax_dna7.set_xticks([])
ax_dna7.set_yticks([])
ax_dna7.axis('off')
start, end = dr.renderDNA(ax_dna8, pAS106, part_renderers)
ax_dna8.set_xlim([start, end])
ax_dna8.set_ylim([-18,20])
ax_dna8.set_aspect('equal')
ax_dna8.set_xticks([])
ax_dna8.set_yticks([])
ax_dna8.axis('off')
# Update subplot spacing
plt.subplots_adjust(hspace=0.01, left=0.05, right=0.95, top=0.92, bottom=0.01)
# Save the figure
fig.savefig('crisprplasmids.png', dpi = 600)
# Clear the plotting cache
plt.close('all') | 38.054755 | 176 | 0.66649 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,376 | 0.33139 |
0d6993d1de4fca2fe18d6fada4d0381421cd473f | 1,624 | py | Python | program/convert_ui.py | Addision/ProtoExcelTool | 17eaa08c08312e59c5a8f6114a121177ca65508a | [
"MIT"
] | null | null | null | program/convert_ui.py | Addision/ProtoExcelTool | 17eaa08c08312e59c5a8f6114a121177ca65508a | [
"MIT"
] | null | null | null | program/convert_ui.py | Addision/ProtoExcelTool | 17eaa08c08312e59c5a8f6114a121177ca65508a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : convertUI.py
@Time : 2022/02/28 00:08:31
@Author : felix
@Version : 1.0
@Contact : laijia2008@126.com
@License : (C)Copyright 2021-2025, felix&lai
@Desc : None
'''
# here put the import lib
import os
import sys
def ConvertUI():
print(os.path.abspath(sys.argv[0]))
path = os.path.split(os.path.abspath(sys.argv[0]))[0]
os.system("cd "+path)
print("-------------------------------------------------------")
print(os.getcwd())
os.system("pyuic5 -o ./uipy/add_tmpl_ui.py ../designer/ui/add_tmpl.ui")
os.system("pyuic5 -o ./uipy/modify_enum_ui.py ../designer/ui/modify_enum.ui")
os.system("pyuic5 -o ./uipy/modify_proto_ui.py ../designer/ui/modify_proto.ui")
os.system("pyuic5 -o ./uipy/create_proto_ui.py ../designer/ui/create_proto.ui")
os.system("pyuic5 -o ./uipy/modify_tmpl_ui.py ../designer/ui/modify_tmpl.ui")
os.system("pyuic5 -o ./uipy/proto_tool_ui.py ../designer/ui/proto_tool.ui")
os.system("pyuic5 -o ./uipy/setting_ui.py ../designer/ui/setting.ui")
os.system("pyuic5 -o ./uipy/create_dir_ui.py ../designer/ui/create_dir.ui")
os.system("pyuic5 -o ./uipy/modify_dir_ui.py ../designer/ui/modify_dir.ui")
os.system("pyuic5 -o ./uipy/create_enum_ui.py ../designer/ui/create_enum.ui")
os.system("pyuic5 -o ./uipy/modify_enum_ui.py ../designer/ui/modify_enum.ui")
# os.system("pyrcc5 -o res_rc.py icons/res.qrc")
print("transport ui to py ok...")
print("-------------------------------------------------------")
if __name__ == "__main__":
ConvertUI()
| 37.767442 | 83 | 0.613916 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,184 | 0.729064 |
0d6bbc4a1deb02ea0fe6dd3bf20550182a612637 | 71,145 | py | Python | third_party/ray/core/generated/common_pb2.py | HuantWang/SUPERSONIC | bea7090e8bc4a54ed52495dd910ef946c88bec67 | [
"CC-BY-4.0"
] | 78 | 2022-02-02T00:23:02.000Z | 2022-03-15T11:44:02.000Z | third_party/ray/core/generated/common_pb2.py | HuantWang/SUPERSONIC | bea7090e8bc4a54ed52495dd910ef946c88bec67 | [
"CC-BY-4.0"
] | null | null | null | third_party/ray/core/generated/common_pb2.py | HuantWang/SUPERSONIC | bea7090e8bc4a54ed52495dd910ef946c88bec67 | [
"CC-BY-4.0"
] | 3 | 2022-01-30T05:10:14.000Z | 2022-03-04T21:18:44.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: src/ray/protobuf/common.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='src/ray/protobuf/common.proto',
package='ray.rpc',
syntax='proto3',
serialized_options=_b('\n\030io.ray.runtime.generated'),
serialized_pb=_b('\n\x1dsrc/ray/protobuf/common.proto\x12\x07ray.rpc\"v\n\x07\x41\x64\x64ress\x12\x1b\n\traylet_id\x18\x01 \x01(\x0cR\x08rayletId\x12\x1d\n\nip_address\x18\x02 \x01(\tR\tipAddress\x12\x12\n\x04port\x18\x03 \x01(\x05R\x04port\x12\x1b\n\tworker_id\x18\x04 \x01(\x0cR\x08workerId\"z\n\x16JavaFunctionDescriptor\x12\x1d\n\nclass_name\x18\x01 \x01(\tR\tclassName\x12#\n\rfunction_name\x18\x02 \x01(\tR\x0c\x66unctionName\x12\x1c\n\tsignature\x18\x03 \x01(\tR\tsignature\"\xa4\x01\n\x18PythonFunctionDescriptor\x12\x1f\n\x0bmodule_name\x18\x01 \x01(\tR\nmoduleName\x12\x1d\n\nclass_name\x18\x02 \x01(\tR\tclassName\x12#\n\rfunction_name\x18\x03 \x01(\tR\x0c\x66unctionName\x12#\n\rfunction_hash\x18\x04 \x01(\tR\x0c\x66unctionHash\"\x8d\x01\n\x15\x43ppFunctionDescriptor\x12\x19\n\x08lib_name\x18\x01 \x01(\tR\x07libName\x12\'\n\x0f\x66unction_offset\x18\x02 \x01(\tR\x0e\x66unctionOffset\x12\x30\n\x14\x65xec_function_offset\x18\x03 \x01(\tR\x12\x65xecFunctionOffset\"\xc5\x02\n\x12\x46unctionDescriptor\x12[\n\x18java_function_descriptor\x18\x01 \x01(\x0b\x32\x1f.ray.rpc.JavaFunctionDescriptorH\x00R\x16javaFunctionDescriptor\x12\x61\n\x1apython_function_descriptor\x18\x02 \x01(\x0b\x32!.ray.rpc.PythonFunctionDescriptorH\x00R\x18pythonFunctionDescriptor\x12X\n\x17\x63pp_function_descriptor\x18\x03 \x01(\x0b\x32\x1e.ray.rpc.CppFunctionDescriptorH\x00R\x15\x63ppFunctionDescriptorB\x15\n\x13\x66unction_descriptor\"\xe5\x07\n\x08TaskSpec\x12%\n\x04type\x18\x01 \x01(\x0e\x32\x11.ray.rpc.TaskTypeR\x04type\x12-\n\x08language\x18\x02 \x01(\x0e\x32\x11.ray.rpc.LanguageR\x08language\x12L\n\x13\x66unction_descriptor\x18\x03 \x01(\x0b\x32\x1b.ray.rpc.FunctionDescriptorR\x12\x66unctionDescriptor\x12\x15\n\x06job_id\x18\x04 \x01(\x0cR\x05jobId\x12\x17\n\x07task_id\x18\x05 \x01(\x0cR\x06taskId\x12$\n\x0eparent_task_id\x18\x06 \x01(\x0cR\x0cparentTaskId\x12%\n\x0eparent_counter\x18\x07 \x01(\x04R\rparentCounter\x12\x1b\n\tcaller_id\x18\x08 \x01(\x0cR\x08\x63\x61llerId\x12\x37\n\x0e\x63\x61ller_address\x18\t \x01(\x0b\x32\x10.ray.rpc.AddressR\rcallerAddress\x12$\n\x04\x61rgs\x18\n \x03(\x0b\x32\x10.ray.rpc.TaskArgR\x04\x61rgs\x12\x1f\n\x0bnum_returns\x18\x0b \x01(\x04R\nnumReturns\x12W\n\x12required_resources\x18\x0c \x03(\x0b\x32(.ray.rpc.TaskSpec.RequiredResourcesEntryR\x11requiredResources\x12s\n\x1crequired_placement_resources\x18\r \x03(\x0b\x32\x31.ray.rpc.TaskSpec.RequiredPlacementResourcesEntryR\x1arequiredPlacementResources\x12W\n\x18\x61\x63tor_creation_task_spec\x18\x0e \x01(\x0b\x32\x1e.ray.rpc.ActorCreationTaskSpecR\x15\x61\x63torCreationTaskSpec\x12>\n\x0f\x61\x63tor_task_spec\x18\x0f \x01(\x0b\x32\x16.ray.rpc.ActorTaskSpecR\ractorTaskSpec\x12\x1f\n\x0bmax_retries\x18\x10 \x01(\x05R\nmaxRetries\x1a\x44\n\x16RequiredResourcesEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\x01R\x05value:\x02\x38\x01\x1aM\n\x1fRequiredPlacementResourcesEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\x01R\x05value:\x02\x38\x01\"\x86\x01\n\x07TaskArg\x12\x1d\n\nobject_ids\x18\x01 \x03(\x0cR\tobjectIds\x12\x12\n\x04\x64\x61ta\x18\x02 \x01(\x0cR\x04\x64\x61ta\x12\x1a\n\x08metadata\x18\x03 \x01(\x0cR\x08metadata\x12,\n\x12nested_inlined_ids\x18\x04 \x03(\x0cR\x10nestedInlinedIds\"\xba\x02\n\x15\x41\x63torCreationTaskSpec\x12\x19\n\x08\x61\x63tor_id\x18\x02 \x01(\x0cR\x07\x61\x63torId\x12,\n\x12max_actor_restarts\x18\x03 \x01(\x03R\x10maxActorRestarts\x12\x34\n\x16\x64ynamic_worker_options\x18\x04 \x03(\tR\x14\x64ynamicWorkerOptions\x12\'\n\x0fmax_concurrency\x18\x05 \x01(\x05R\x0emaxConcurrency\x12\x1f\n\x0bis_detached\x18\x06 \x01(\x08R\nisDetached\x12\x12\n\x04name\x18\x07 \x01(\tR\x04name\x12\x1d\n\nis_asyncio\x18\x08 \x01(\x08R\tisAsyncio\x12%\n\x0e\x65xtension_data\x18\t \x01(\tR\rextensionData\"\xe0\x01\n\rActorTaskSpec\x12\x19\n\x08\x61\x63tor_id\x18\x02 \x01(\x0cR\x07\x61\x63torId\x12\x42\n\x1e\x61\x63tor_creation_dummy_object_id\x18\x04 \x01(\x0cR\x1a\x61\x63torCreationDummyObjectId\x12#\n\ractor_counter\x18\x05 \x01(\x04R\x0c\x61\x63torCounter\x12K\n#previous_actor_task_dummy_object_id\x18\x07 \x01(\x0cR\x1epreviousActorTaskDummyObjectId\"]\n\x11TaskExecutionSpec\x12%\n\x0elast_timestamp\x18\x02 \x01(\x01R\rlastTimestamp\x12!\n\x0cnum_forwards\x18\x03 \x01(\x04R\x0bnumForwards\"\x82\x01\n\x04Task\x12.\n\ttask_spec\x18\x01 \x01(\x0b\x32\x11.ray.rpc.TaskSpecR\x08taskSpec\x12J\n\x13task_execution_spec\x18\x02 \x01(\x0b\x32\x1a.ray.rpc.TaskExecutionSpecR\x11taskExecutionSpec\">\n\nResourceId\x12\x14\n\x05index\x18\x01 \x01(\x03R\x05index\x12\x1a\n\x08quantity\x18\x02 \x01(\x01R\x08quantity\"^\n\x10ResourceMapEntry\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x36\n\x0cresource_ids\x18\x02 \x03(\x0b\x32\x13.ray.rpc.ResourceIdR\x0bresourceIds\"\xf4\x03\n\x08ViewData\x12\x1b\n\tview_name\x18\x01 \x01(\tR\x08viewName\x12\x35\n\x08measures\x18\x02 \x03(\x0b\x32\x19.ray.rpc.ViewData.MeasureR\x08measures\x1a\x93\x03\n\x07Measure\x12\x12\n\x04tags\x18\x01 \x01(\tR\x04tags\x12\x1b\n\tint_value\x18\x02 \x01(\x03R\x08intValue\x12!\n\x0c\x64ouble_value\x18\x03 \x01(\x01R\x0b\x64oubleValue\x12)\n\x10\x64istribution_min\x18\x04 \x01(\x01R\x0f\x64istributionMin\x12+\n\x11\x64istribution_mean\x18\x05 \x01(\x01R\x10\x64istributionMean\x12)\n\x10\x64istribution_max\x18\x06 \x01(\x01R\x0f\x64istributionMax\x12-\n\x12\x64istribution_count\x18\x07 \x01(\x01R\x11\x64istributionCount\x12\x44\n\x1e\x64istribution_bucket_boundaries\x18\x08 \x03(\x01R\x1c\x64istributionBucketBoundaries\x12<\n\x1a\x64istribution_bucket_counts\x18\t \x03(\x01R\x18\x64istributionBucketCounts\"\xa3\x02\n\rObjectRefInfo\x12\x1b\n\tobject_id\x18\x01 \x01(\x0cR\x08objectId\x12\x1b\n\tcall_site\x18\x02 \x01(\tR\x08\x63\x61llSite\x12\x1f\n\x0bobject_size\x18\x03 \x01(\x03R\nobjectSize\x12&\n\x0flocal_ref_count\x18\x04 \x01(\x03R\rlocalRefCount\x12\x37\n\x18submitted_task_ref_count\x18\x05 \x01(\x03R\x15submittedTaskRefCount\x12,\n\x12\x63ontained_in_owned\x18\x06 \x03(\x0cR\x10\x63ontainedInOwned\x12(\n\x10pinned_in_memory\x18\x07 \x01(\x08R\x0epinnedInMemory\"\x87\x07\n\x0f\x43oreWorkerStats\x12*\n\x11\x63urrent_task_desc\x18\x01 \x01(\tR\x0f\x63urrentTaskDesc\x12*\n\x11num_pending_tasks\x18\x02 \x01(\x05R\x0fnumPendingTasks\x12\x34\n\x17num_object_ids_in_scope\x18\x03 \x01(\x05R\x13numObjectIdsInScope\x12\x33\n\x16\x63urrent_task_func_desc\x18\x04 \x01(\tR\x13\x63urrentTaskFuncDesc\x12\x1d\n\nip_address\x18\x06 \x01(\tR\tipAddress\x12\x12\n\x04port\x18\x07 \x01(\x03R\x04port\x12\x19\n\x08\x61\x63tor_id\x18\x08 \x01(\x0cR\x07\x61\x63torId\x12R\n\x0eused_resources\x18\t \x03(\x0b\x32+.ray.rpc.CoreWorkerStats.UsedResourcesEntryR\rusedResources\x12O\n\rwebui_display\x18\n \x03(\x0b\x32*.ray.rpc.CoreWorkerStats.WebuiDisplayEntryR\x0cwebuiDisplay\x12\"\n\rnum_in_plasma\x18\x0b \x01(\x05R\x0bnumInPlasma\x12*\n\x11num_local_objects\x18\x0c \x01(\x05R\x0fnumLocalObjects\x12\x37\n\x18used_object_store_memory\x18\r \x01(\x03R\x15usedObjectStoreMemory\x12*\n\x11task_queue_length\x18\x0e \x01(\x05R\x0ftaskQueueLength\x12,\n\x12num_executed_tasks\x18\x0f \x01(\x05R\x10numExecutedTasks\x12\x1f\n\x0b\x61\x63tor_title\x18\x10 \x01(\tR\nactorTitle\x12\x37\n\x0bobject_refs\x18\x11 \x03(\x0b\x32\x16.ray.rpc.ObjectRefInfoR\nobjectRefs\x1a@\n\x12UsedResourcesEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\x01R\x05value:\x02\x38\x01\x1a?\n\x11WebuiDisplayEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01*)\n\x08Language\x12\n\n\x06PYTHON\x10\x00\x12\x08\n\x04JAVA\x10\x01\x12\x07\n\x03\x43PP\x10\x02*$\n\nWorkerType\x12\n\n\x06WORKER\x10\x00\x12\n\n\x06\x44RIVER\x10\x01*U\n\x08TaskType\x12\x0f\n\x0bNORMAL_TASK\x10\x00\x12\x17\n\x13\x41\x43TOR_CREATION_TASK\x10\x01\x12\x0e\n\nACTOR_TASK\x10\x02\x12\x0f\n\x0b\x44RIVER_TASK\x10\x03\x42\x1a\n\x18io.ray.runtime.generatedb\x06proto3')
)
_LANGUAGE = _descriptor.EnumDescriptor(
name='Language',
full_name='ray.rpc.Language',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='PYTHON', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JAVA', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CPP', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=4697,
serialized_end=4738,
)
_sym_db.RegisterEnumDescriptor(_LANGUAGE)
Language = enum_type_wrapper.EnumTypeWrapper(_LANGUAGE)
_WORKERTYPE = _descriptor.EnumDescriptor(
name='WorkerType',
full_name='ray.rpc.WorkerType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='WORKER', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DRIVER', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=4740,
serialized_end=4776,
)
_sym_db.RegisterEnumDescriptor(_WORKERTYPE)
WorkerType = enum_type_wrapper.EnumTypeWrapper(_WORKERTYPE)
_TASKTYPE = _descriptor.EnumDescriptor(
name='TaskType',
full_name='ray.rpc.TaskType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NORMAL_TASK', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTOR_CREATION_TASK', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTOR_TASK', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DRIVER_TASK', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=4778,
serialized_end=4863,
)
_sym_db.RegisterEnumDescriptor(_TASKTYPE)
TaskType = enum_type_wrapper.EnumTypeWrapper(_TASKTYPE)
PYTHON = 0
JAVA = 1
CPP = 2
WORKER = 0
DRIVER = 1
NORMAL_TASK = 0
ACTOR_CREATION_TASK = 1
ACTOR_TASK = 2
DRIVER_TASK = 3
_ADDRESS = _descriptor.Descriptor(
name='Address',
full_name='ray.rpc.Address',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='raylet_id', full_name='ray.rpc.Address.raylet_id', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='rayletId', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ip_address', full_name='ray.rpc.Address.ip_address', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='ipAddress', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='port', full_name='ray.rpc.Address.port', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='port', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='worker_id', full_name='ray.rpc.Address.worker_id', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='workerId', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=42,
serialized_end=160,
)
_JAVAFUNCTIONDESCRIPTOR = _descriptor.Descriptor(
name='JavaFunctionDescriptor',
full_name='ray.rpc.JavaFunctionDescriptor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='class_name', full_name='ray.rpc.JavaFunctionDescriptor.class_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='className', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='function_name', full_name='ray.rpc.JavaFunctionDescriptor.function_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='functionName', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='signature', full_name='ray.rpc.JavaFunctionDescriptor.signature', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='signature', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=162,
serialized_end=284,
)
_PYTHONFUNCTIONDESCRIPTOR = _descriptor.Descriptor(
name='PythonFunctionDescriptor',
full_name='ray.rpc.PythonFunctionDescriptor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='module_name', full_name='ray.rpc.PythonFunctionDescriptor.module_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='moduleName', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='class_name', full_name='ray.rpc.PythonFunctionDescriptor.class_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='className', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='function_name', full_name='ray.rpc.PythonFunctionDescriptor.function_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='functionName', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='function_hash', full_name='ray.rpc.PythonFunctionDescriptor.function_hash', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='functionHash', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=287,
serialized_end=451,
)
_CPPFUNCTIONDESCRIPTOR = _descriptor.Descriptor(
name='CppFunctionDescriptor',
full_name='ray.rpc.CppFunctionDescriptor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lib_name', full_name='ray.rpc.CppFunctionDescriptor.lib_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='libName', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='function_offset', full_name='ray.rpc.CppFunctionDescriptor.function_offset', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='functionOffset', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='exec_function_offset', full_name='ray.rpc.CppFunctionDescriptor.exec_function_offset', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='execFunctionOffset', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=454,
serialized_end=595,
)
_FUNCTIONDESCRIPTOR = _descriptor.Descriptor(
name='FunctionDescriptor',
full_name='ray.rpc.FunctionDescriptor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='java_function_descriptor', full_name='ray.rpc.FunctionDescriptor.java_function_descriptor', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='javaFunctionDescriptor', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='python_function_descriptor', full_name='ray.rpc.FunctionDescriptor.python_function_descriptor', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='pythonFunctionDescriptor', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cpp_function_descriptor', full_name='ray.rpc.FunctionDescriptor.cpp_function_descriptor', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='cppFunctionDescriptor', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='function_descriptor', full_name='ray.rpc.FunctionDescriptor.function_descriptor',
index=0, containing_type=None, fields=[]),
],
serialized_start=598,
serialized_end=923,
)
_TASKSPEC_REQUIREDRESOURCESENTRY = _descriptor.Descriptor(
name='RequiredResourcesEntry',
full_name='ray.rpc.TaskSpec.RequiredResourcesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ray.rpc.TaskSpec.RequiredResourcesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='key', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='ray.rpc.TaskSpec.RequiredResourcesEntry.value', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='value', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1776,
serialized_end=1844,
)
_TASKSPEC_REQUIREDPLACEMENTRESOURCESENTRY = _descriptor.Descriptor(
name='RequiredPlacementResourcesEntry',
full_name='ray.rpc.TaskSpec.RequiredPlacementResourcesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ray.rpc.TaskSpec.RequiredPlacementResourcesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='key', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='ray.rpc.TaskSpec.RequiredPlacementResourcesEntry.value', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='value', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1846,
serialized_end=1923,
)
_TASKSPEC = _descriptor.Descriptor(
name='TaskSpec',
full_name='ray.rpc.TaskSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='ray.rpc.TaskSpec.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='type', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='language', full_name='ray.rpc.TaskSpec.language', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='language', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='function_descriptor', full_name='ray.rpc.TaskSpec.function_descriptor', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='functionDescriptor', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_id', full_name='ray.rpc.TaskSpec.job_id', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='jobId', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_id', full_name='ray.rpc.TaskSpec.task_id', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='taskId', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parent_task_id', full_name='ray.rpc.TaskSpec.parent_task_id', index=5,
number=6, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='parentTaskId', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parent_counter', full_name='ray.rpc.TaskSpec.parent_counter', index=6,
number=7, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='parentCounter', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='caller_id', full_name='ray.rpc.TaskSpec.caller_id', index=7,
number=8, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='callerId', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='caller_address', full_name='ray.rpc.TaskSpec.caller_address', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='callerAddress', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='args', full_name='ray.rpc.TaskSpec.args', index=9,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='args', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_returns', full_name='ray.rpc.TaskSpec.num_returns', index=10,
number=11, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='numReturns', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='required_resources', full_name='ray.rpc.TaskSpec.required_resources', index=11,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='requiredResources', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='required_placement_resources', full_name='ray.rpc.TaskSpec.required_placement_resources', index=12,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='requiredPlacementResources', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='actor_creation_task_spec', full_name='ray.rpc.TaskSpec.actor_creation_task_spec', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='actorCreationTaskSpec', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='actor_task_spec', full_name='ray.rpc.TaskSpec.actor_task_spec', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='actorTaskSpec', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_retries', full_name='ray.rpc.TaskSpec.max_retries', index=15,
number=16, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='maxRetries', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_TASKSPEC_REQUIREDRESOURCESENTRY, _TASKSPEC_REQUIREDPLACEMENTRESOURCESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=926,
serialized_end=1923,
)
_TASKARG = _descriptor.Descriptor(
name='TaskArg',
full_name='ray.rpc.TaskArg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='object_ids', full_name='ray.rpc.TaskArg.object_ids', index=0,
number=1, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='objectIds', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='ray.rpc.TaskArg.data', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='data', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='ray.rpc.TaskArg.metadata', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='metadata', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nested_inlined_ids', full_name='ray.rpc.TaskArg.nested_inlined_ids', index=3,
number=4, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='nestedInlinedIds', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1926,
serialized_end=2060,
)
_ACTORCREATIONTASKSPEC = _descriptor.Descriptor(
name='ActorCreationTaskSpec',
full_name='ray.rpc.ActorCreationTaskSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='actor_id', full_name='ray.rpc.ActorCreationTaskSpec.actor_id', index=0,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='actorId', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_actor_restarts', full_name='ray.rpc.ActorCreationTaskSpec.max_actor_restarts', index=1,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='maxActorRestarts', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dynamic_worker_options', full_name='ray.rpc.ActorCreationTaskSpec.dynamic_worker_options', index=2,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='dynamicWorkerOptions', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_concurrency', full_name='ray.rpc.ActorCreationTaskSpec.max_concurrency', index=3,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='maxConcurrency', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_detached', full_name='ray.rpc.ActorCreationTaskSpec.is_detached', index=4,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='isDetached', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='ray.rpc.ActorCreationTaskSpec.name', index=5,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='name', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_asyncio', full_name='ray.rpc.ActorCreationTaskSpec.is_asyncio', index=6,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='isAsyncio', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='extension_data', full_name='ray.rpc.ActorCreationTaskSpec.extension_data', index=7,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='extensionData', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2063,
serialized_end=2377,
)
_ACTORTASKSPEC = _descriptor.Descriptor(
name='ActorTaskSpec',
full_name='ray.rpc.ActorTaskSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='actor_id', full_name='ray.rpc.ActorTaskSpec.actor_id', index=0,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='actorId', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='actor_creation_dummy_object_id', full_name='ray.rpc.ActorTaskSpec.actor_creation_dummy_object_id', index=1,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='actorCreationDummyObjectId', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='actor_counter', full_name='ray.rpc.ActorTaskSpec.actor_counter', index=2,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='actorCounter', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='previous_actor_task_dummy_object_id', full_name='ray.rpc.ActorTaskSpec.previous_actor_task_dummy_object_id', index=3,
number=7, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='previousActorTaskDummyObjectId', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2380,
serialized_end=2604,
)
_TASKEXECUTIONSPEC = _descriptor.Descriptor(
name='TaskExecutionSpec',
full_name='ray.rpc.TaskExecutionSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='last_timestamp', full_name='ray.rpc.TaskExecutionSpec.last_timestamp', index=0,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='lastTimestamp', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_forwards', full_name='ray.rpc.TaskExecutionSpec.num_forwards', index=1,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='numForwards', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2606,
serialized_end=2699,
)
_TASK = _descriptor.Descriptor(
name='Task',
full_name='ray.rpc.Task',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='task_spec', full_name='ray.rpc.Task.task_spec', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='taskSpec', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_execution_spec', full_name='ray.rpc.Task.task_execution_spec', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='taskExecutionSpec', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2702,
serialized_end=2832,
)
_RESOURCEID = _descriptor.Descriptor(
name='ResourceId',
full_name='ray.rpc.ResourceId',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='index', full_name='ray.rpc.ResourceId.index', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='index', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='quantity', full_name='ray.rpc.ResourceId.quantity', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='quantity', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2834,
serialized_end=2896,
)
_RESOURCEMAPENTRY = _descriptor.Descriptor(
name='ResourceMapEntry',
full_name='ray.rpc.ResourceMapEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='ray.rpc.ResourceMapEntry.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='name', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resource_ids', full_name='ray.rpc.ResourceMapEntry.resource_ids', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='resourceIds', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2898,
serialized_end=2992,
)
_VIEWDATA_MEASURE = _descriptor.Descriptor(
name='Measure',
full_name='ray.rpc.ViewData.Measure',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tags', full_name='ray.rpc.ViewData.Measure.tags', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='tags', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='int_value', full_name='ray.rpc.ViewData.Measure.int_value', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='intValue', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='double_value', full_name='ray.rpc.ViewData.Measure.double_value', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='doubleValue', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='distribution_min', full_name='ray.rpc.ViewData.Measure.distribution_min', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='distributionMin', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='distribution_mean', full_name='ray.rpc.ViewData.Measure.distribution_mean', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='distributionMean', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='distribution_max', full_name='ray.rpc.ViewData.Measure.distribution_max', index=5,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='distributionMax', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='distribution_count', full_name='ray.rpc.ViewData.Measure.distribution_count', index=6,
number=7, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='distributionCount', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='distribution_bucket_boundaries', full_name='ray.rpc.ViewData.Measure.distribution_bucket_boundaries', index=7,
number=8, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='distributionBucketBoundaries', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='distribution_bucket_counts', full_name='ray.rpc.ViewData.Measure.distribution_bucket_counts', index=8,
number=9, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='distributionBucketCounts', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3092,
serialized_end=3495,
)
_VIEWDATA = _descriptor.Descriptor(
name='ViewData',
full_name='ray.rpc.ViewData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='view_name', full_name='ray.rpc.ViewData.view_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='viewName', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='measures', full_name='ray.rpc.ViewData.measures', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='measures', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_VIEWDATA_MEASURE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2995,
serialized_end=3495,
)
_OBJECTREFINFO = _descriptor.Descriptor(
name='ObjectRefInfo',
full_name='ray.rpc.ObjectRefInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='object_id', full_name='ray.rpc.ObjectRefInfo.object_id', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='objectId', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='call_site', full_name='ray.rpc.ObjectRefInfo.call_site', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='callSite', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='object_size', full_name='ray.rpc.ObjectRefInfo.object_size', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='objectSize', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='local_ref_count', full_name='ray.rpc.ObjectRefInfo.local_ref_count', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='localRefCount', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='submitted_task_ref_count', full_name='ray.rpc.ObjectRefInfo.submitted_task_ref_count', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='submittedTaskRefCount', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='contained_in_owned', full_name='ray.rpc.ObjectRefInfo.contained_in_owned', index=5,
number=6, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='containedInOwned', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pinned_in_memory', full_name='ray.rpc.ObjectRefInfo.pinned_in_memory', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='pinnedInMemory', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3498,
serialized_end=3789,
)
_COREWORKERSTATS_USEDRESOURCESENTRY = _descriptor.Descriptor(
name='UsedResourcesEntry',
full_name='ray.rpc.CoreWorkerStats.UsedResourcesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ray.rpc.CoreWorkerStats.UsedResourcesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='key', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='ray.rpc.CoreWorkerStats.UsedResourcesEntry.value', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='value', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4566,
serialized_end=4630,
)
_COREWORKERSTATS_WEBUIDISPLAYENTRY = _descriptor.Descriptor(
name='WebuiDisplayEntry',
full_name='ray.rpc.CoreWorkerStats.WebuiDisplayEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ray.rpc.CoreWorkerStats.WebuiDisplayEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='key', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='ray.rpc.CoreWorkerStats.WebuiDisplayEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='value', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4632,
serialized_end=4695,
)
_COREWORKERSTATS = _descriptor.Descriptor(
name='CoreWorkerStats',
full_name='ray.rpc.CoreWorkerStats',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='current_task_desc', full_name='ray.rpc.CoreWorkerStats.current_task_desc', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='currentTaskDesc', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_pending_tasks', full_name='ray.rpc.CoreWorkerStats.num_pending_tasks', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='numPendingTasks', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_object_ids_in_scope', full_name='ray.rpc.CoreWorkerStats.num_object_ids_in_scope', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='numObjectIdsInScope', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='current_task_func_desc', full_name='ray.rpc.CoreWorkerStats.current_task_func_desc', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='currentTaskFuncDesc', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ip_address', full_name='ray.rpc.CoreWorkerStats.ip_address', index=4,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='ipAddress', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='port', full_name='ray.rpc.CoreWorkerStats.port', index=5,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='port', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='actor_id', full_name='ray.rpc.CoreWorkerStats.actor_id', index=6,
number=8, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='actorId', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='used_resources', full_name='ray.rpc.CoreWorkerStats.used_resources', index=7,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='usedResources', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='webui_display', full_name='ray.rpc.CoreWorkerStats.webui_display', index=8,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='webuiDisplay', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_in_plasma', full_name='ray.rpc.CoreWorkerStats.num_in_plasma', index=9,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='numInPlasma', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_local_objects', full_name='ray.rpc.CoreWorkerStats.num_local_objects', index=10,
number=12, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='numLocalObjects', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='used_object_store_memory', full_name='ray.rpc.CoreWorkerStats.used_object_store_memory', index=11,
number=13, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='usedObjectStoreMemory', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_queue_length', full_name='ray.rpc.CoreWorkerStats.task_queue_length', index=12,
number=14, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='taskQueueLength', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_executed_tasks', full_name='ray.rpc.CoreWorkerStats.num_executed_tasks', index=13,
number=15, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='numExecutedTasks', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='actor_title', full_name='ray.rpc.CoreWorkerStats.actor_title', index=14,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='actorTitle', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='object_refs', full_name='ray.rpc.CoreWorkerStats.object_refs', index=15,
number=17, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='objectRefs', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_COREWORKERSTATS_USEDRESOURCESENTRY, _COREWORKERSTATS_WEBUIDISPLAYENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3792,
serialized_end=4695,
)
_FUNCTIONDESCRIPTOR.fields_by_name['java_function_descriptor'].message_type = _JAVAFUNCTIONDESCRIPTOR
_FUNCTIONDESCRIPTOR.fields_by_name['python_function_descriptor'].message_type = _PYTHONFUNCTIONDESCRIPTOR
_FUNCTIONDESCRIPTOR.fields_by_name['cpp_function_descriptor'].message_type = _CPPFUNCTIONDESCRIPTOR
_FUNCTIONDESCRIPTOR.oneofs_by_name['function_descriptor'].fields.append(
_FUNCTIONDESCRIPTOR.fields_by_name['java_function_descriptor'])
_FUNCTIONDESCRIPTOR.fields_by_name['java_function_descriptor'].containing_oneof = _FUNCTIONDESCRIPTOR.oneofs_by_name['function_descriptor']
_FUNCTIONDESCRIPTOR.oneofs_by_name['function_descriptor'].fields.append(
_FUNCTIONDESCRIPTOR.fields_by_name['python_function_descriptor'])
_FUNCTIONDESCRIPTOR.fields_by_name['python_function_descriptor'].containing_oneof = _FUNCTIONDESCRIPTOR.oneofs_by_name['function_descriptor']
_FUNCTIONDESCRIPTOR.oneofs_by_name['function_descriptor'].fields.append(
_FUNCTIONDESCRIPTOR.fields_by_name['cpp_function_descriptor'])
_FUNCTIONDESCRIPTOR.fields_by_name['cpp_function_descriptor'].containing_oneof = _FUNCTIONDESCRIPTOR.oneofs_by_name['function_descriptor']
_TASKSPEC_REQUIREDRESOURCESENTRY.containing_type = _TASKSPEC
_TASKSPEC_REQUIREDPLACEMENTRESOURCESENTRY.containing_type = _TASKSPEC
_TASKSPEC.fields_by_name['type'].enum_type = _TASKTYPE
_TASKSPEC.fields_by_name['language'].enum_type = _LANGUAGE
_TASKSPEC.fields_by_name['function_descriptor'].message_type = _FUNCTIONDESCRIPTOR
_TASKSPEC.fields_by_name['caller_address'].message_type = _ADDRESS
_TASKSPEC.fields_by_name['args'].message_type = _TASKARG
_TASKSPEC.fields_by_name['required_resources'].message_type = _TASKSPEC_REQUIREDRESOURCESENTRY
_TASKSPEC.fields_by_name['required_placement_resources'].message_type = _TASKSPEC_REQUIREDPLACEMENTRESOURCESENTRY
_TASKSPEC.fields_by_name['actor_creation_task_spec'].message_type = _ACTORCREATIONTASKSPEC
_TASKSPEC.fields_by_name['actor_task_spec'].message_type = _ACTORTASKSPEC
_TASK.fields_by_name['task_spec'].message_type = _TASKSPEC
_TASK.fields_by_name['task_execution_spec'].message_type = _TASKEXECUTIONSPEC
_RESOURCEMAPENTRY.fields_by_name['resource_ids'].message_type = _RESOURCEID
_VIEWDATA_MEASURE.containing_type = _VIEWDATA
_VIEWDATA.fields_by_name['measures'].message_type = _VIEWDATA_MEASURE
_COREWORKERSTATS_USEDRESOURCESENTRY.containing_type = _COREWORKERSTATS
_COREWORKERSTATS_WEBUIDISPLAYENTRY.containing_type = _COREWORKERSTATS
_COREWORKERSTATS.fields_by_name['used_resources'].message_type = _COREWORKERSTATS_USEDRESOURCESENTRY
_COREWORKERSTATS.fields_by_name['webui_display'].message_type = _COREWORKERSTATS_WEBUIDISPLAYENTRY
_COREWORKERSTATS.fields_by_name['object_refs'].message_type = _OBJECTREFINFO
DESCRIPTOR.message_types_by_name['Address'] = _ADDRESS
DESCRIPTOR.message_types_by_name['JavaFunctionDescriptor'] = _JAVAFUNCTIONDESCRIPTOR
DESCRIPTOR.message_types_by_name['PythonFunctionDescriptor'] = _PYTHONFUNCTIONDESCRIPTOR
DESCRIPTOR.message_types_by_name['CppFunctionDescriptor'] = _CPPFUNCTIONDESCRIPTOR
DESCRIPTOR.message_types_by_name['FunctionDescriptor'] = _FUNCTIONDESCRIPTOR
DESCRIPTOR.message_types_by_name['TaskSpec'] = _TASKSPEC
DESCRIPTOR.message_types_by_name['TaskArg'] = _TASKARG
DESCRIPTOR.message_types_by_name['ActorCreationTaskSpec'] = _ACTORCREATIONTASKSPEC
DESCRIPTOR.message_types_by_name['ActorTaskSpec'] = _ACTORTASKSPEC
DESCRIPTOR.message_types_by_name['TaskExecutionSpec'] = _TASKEXECUTIONSPEC
DESCRIPTOR.message_types_by_name['Task'] = _TASK
DESCRIPTOR.message_types_by_name['ResourceId'] = _RESOURCEID
DESCRIPTOR.message_types_by_name['ResourceMapEntry'] = _RESOURCEMAPENTRY
DESCRIPTOR.message_types_by_name['ViewData'] = _VIEWDATA
DESCRIPTOR.message_types_by_name['ObjectRefInfo'] = _OBJECTREFINFO
DESCRIPTOR.message_types_by_name['CoreWorkerStats'] = _COREWORKERSTATS
DESCRIPTOR.enum_types_by_name['Language'] = _LANGUAGE
DESCRIPTOR.enum_types_by_name['WorkerType'] = _WORKERTYPE
DESCRIPTOR.enum_types_by_name['TaskType'] = _TASKTYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Address = _reflection.GeneratedProtocolMessageType('Address', (_message.Message,), {
'DESCRIPTOR' : _ADDRESS,
'__module__' : 'src.ray.protobuf.common_pb2'
# @@protoc_insertion_point(class_scope:ray.rpc.Address)
})
_sym_db.RegisterMessage(Address)
JavaFunctionDescriptor = _reflection.GeneratedProtocolMessageType('JavaFunctionDescriptor', (_message.Message,), {
'DESCRIPTOR' : _JAVAFUNCTIONDESCRIPTOR,
'__module__' : 'src.ray.protobuf.common_pb2'
# @@protoc_insertion_point(class_scope:ray.rpc.JavaFunctionDescriptor)
})
_sym_db.RegisterMessage(JavaFunctionDescriptor)
PythonFunctionDescriptor = _reflection.GeneratedProtocolMessageType('PythonFunctionDescriptor', (_message.Message,), {
'DESCRIPTOR' : _PYTHONFUNCTIONDESCRIPTOR,
'__module__' : 'src.ray.protobuf.common_pb2'
# @@protoc_insertion_point(class_scope:ray.rpc.PythonFunctionDescriptor)
})
_sym_db.RegisterMessage(PythonFunctionDescriptor)
CppFunctionDescriptor = _reflection.GeneratedProtocolMessageType('CppFunctionDescriptor', (_message.Message,), {
'DESCRIPTOR' : _CPPFUNCTIONDESCRIPTOR,
'__module__' : 'src.ray.protobuf.common_pb2'
# @@protoc_insertion_point(class_scope:ray.rpc.CppFunctionDescriptor)
})
_sym_db.RegisterMessage(CppFunctionDescriptor)
FunctionDescriptor = _reflection.GeneratedProtocolMessageType('FunctionDescriptor', (_message.Message,), {
'DESCRIPTOR' : _FUNCTIONDESCRIPTOR,
'__module__' : 'src.ray.protobuf.common_pb2'
# @@protoc_insertion_point(class_scope:ray.rpc.FunctionDescriptor)
})
_sym_db.RegisterMessage(FunctionDescriptor)
TaskSpec = _reflection.GeneratedProtocolMessageType('TaskSpec', (_message.Message,), {
'RequiredResourcesEntry' : _reflection.GeneratedProtocolMessageType('RequiredResourcesEntry', (_message.Message,), {
'DESCRIPTOR' : _TASKSPEC_REQUIREDRESOURCESENTRY,
'__module__' : 'src.ray.protobuf.common_pb2'
# @@protoc_insertion_point(class_scope:ray.rpc.TaskSpec.RequiredResourcesEntry)
})
,
'RequiredPlacementResourcesEntry' : _reflection.GeneratedProtocolMessageType('RequiredPlacementResourcesEntry', (_message.Message,), {
'DESCRIPTOR' : _TASKSPEC_REQUIREDPLACEMENTRESOURCESENTRY,
'__module__' : 'src.ray.protobuf.common_pb2'
# @@protoc_insertion_point(class_scope:ray.rpc.TaskSpec.RequiredPlacementResourcesEntry)
})
,
'DESCRIPTOR' : _TASKSPEC,
'__module__' : 'src.ray.protobuf.common_pb2'
# @@protoc_insertion_point(class_scope:ray.rpc.TaskSpec)
})
_sym_db.RegisterMessage(TaskSpec)
_sym_db.RegisterMessage(TaskSpec.RequiredResourcesEntry)
_sym_db.RegisterMessage(TaskSpec.RequiredPlacementResourcesEntry)
TaskArg = _reflection.GeneratedProtocolMessageType('TaskArg', (_message.Message,), {
'DESCRIPTOR' : _TASKARG,
'__module__' : 'src.ray.protobuf.common_pb2'
# @@protoc_insertion_point(class_scope:ray.rpc.TaskArg)
})
_sym_db.RegisterMessage(TaskArg)
ActorCreationTaskSpec = _reflection.GeneratedProtocolMessageType('ActorCreationTaskSpec', (_message.Message,), {
'DESCRIPTOR' : _ACTORCREATIONTASKSPEC,
'__module__' : 'src.ray.protobuf.common_pb2'
# @@protoc_insertion_point(class_scope:ray.rpc.ActorCreationTaskSpec)
})
_sym_db.RegisterMessage(ActorCreationTaskSpec)
ActorTaskSpec = _reflection.GeneratedProtocolMessageType('ActorTaskSpec', (_message.Message,), {
'DESCRIPTOR' : _ACTORTASKSPEC,
'__module__' : 'src.ray.protobuf.common_pb2'
# @@protoc_insertion_point(class_scope:ray.rpc.ActorTaskSpec)
})
_sym_db.RegisterMessage(ActorTaskSpec)
TaskExecutionSpec = _reflection.GeneratedProtocolMessageType('TaskExecutionSpec', (_message.Message,), {
'DESCRIPTOR' : _TASKEXECUTIONSPEC,
'__module__' : 'src.ray.protobuf.common_pb2'
# @@protoc_insertion_point(class_scope:ray.rpc.TaskExecutionSpec)
})
_sym_db.RegisterMessage(TaskExecutionSpec)
Task = _reflection.GeneratedProtocolMessageType('Task', (_message.Message,), {
'DESCRIPTOR' : _TASK,
'__module__' : 'src.ray.protobuf.common_pb2'
# @@protoc_insertion_point(class_scope:ray.rpc.Task)
})
_sym_db.RegisterMessage(Task)
ResourceId = _reflection.GeneratedProtocolMessageType('ResourceId', (_message.Message,), {
'DESCRIPTOR' : _RESOURCEID,
'__module__' : 'src.ray.protobuf.common_pb2'
# @@protoc_insertion_point(class_scope:ray.rpc.ResourceId)
})
_sym_db.RegisterMessage(ResourceId)
ResourceMapEntry = _reflection.GeneratedProtocolMessageType('ResourceMapEntry', (_message.Message,), {
'DESCRIPTOR' : _RESOURCEMAPENTRY,
'__module__' : 'src.ray.protobuf.common_pb2'
# @@protoc_insertion_point(class_scope:ray.rpc.ResourceMapEntry)
})
_sym_db.RegisterMessage(ResourceMapEntry)
ViewData = _reflection.GeneratedProtocolMessageType('ViewData', (_message.Message,), {
'Measure' : _reflection.GeneratedProtocolMessageType('Measure', (_message.Message,), {
'DESCRIPTOR' : _VIEWDATA_MEASURE,
'__module__' : 'src.ray.protobuf.common_pb2'
# @@protoc_insertion_point(class_scope:ray.rpc.ViewData.Measure)
})
,
'DESCRIPTOR' : _VIEWDATA,
'__module__' : 'src.ray.protobuf.common_pb2'
# @@protoc_insertion_point(class_scope:ray.rpc.ViewData)
})
_sym_db.RegisterMessage(ViewData)
_sym_db.RegisterMessage(ViewData.Measure)
ObjectRefInfo = _reflection.GeneratedProtocolMessageType('ObjectRefInfo', (_message.Message,), {
'DESCRIPTOR' : _OBJECTREFINFO,
'__module__' : 'src.ray.protobuf.common_pb2'
# @@protoc_insertion_point(class_scope:ray.rpc.ObjectRefInfo)
})
_sym_db.RegisterMessage(ObjectRefInfo)
CoreWorkerStats = _reflection.GeneratedProtocolMessageType('CoreWorkerStats', (_message.Message,), {
'UsedResourcesEntry' : _reflection.GeneratedProtocolMessageType('UsedResourcesEntry', (_message.Message,), {
'DESCRIPTOR' : _COREWORKERSTATS_USEDRESOURCESENTRY,
'__module__' : 'src.ray.protobuf.common_pb2'
# @@protoc_insertion_point(class_scope:ray.rpc.CoreWorkerStats.UsedResourcesEntry)
})
,
'WebuiDisplayEntry' : _reflection.GeneratedProtocolMessageType('WebuiDisplayEntry', (_message.Message,), {
'DESCRIPTOR' : _COREWORKERSTATS_WEBUIDISPLAYENTRY,
'__module__' : 'src.ray.protobuf.common_pb2'
# @@protoc_insertion_point(class_scope:ray.rpc.CoreWorkerStats.WebuiDisplayEntry)
})
,
'DESCRIPTOR' : _COREWORKERSTATS,
'__module__' : 'src.ray.protobuf.common_pb2'
# @@protoc_insertion_point(class_scope:ray.rpc.CoreWorkerStats)
})
_sym_db.RegisterMessage(CoreWorkerStats)
_sym_db.RegisterMessage(CoreWorkerStats.UsedResourcesEntry)
_sym_db.RegisterMessage(CoreWorkerStats.WebuiDisplayEntry)
DESCRIPTOR._options = None
_TASKSPEC_REQUIREDRESOURCESENTRY._options = None
_TASKSPEC_REQUIREDPLACEMENTRESOURCESENTRY._options = None
_COREWORKERSTATS_USEDRESOURCESENTRY._options = None
_COREWORKERSTATS_WEBUIDISPLAYENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 46.591356 | 7,878 | 0.753813 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 20,425 | 0.28709 |
0d6c53671cbbb117ed649f7fa65fd1ba932a052e | 2,506 | py | Python | examples/django_app/sample/views.py | toonarmycaptain/pyflot | f2dde10709aeed39074fcce8172184b5cd8bfd66 | [
"MIT"
] | 3 | 2016-01-29T21:51:42.000Z | 2021-05-17T02:43:17.000Z | examples/django_app/sample/views.py | toonarmycaptain/pyflot | f2dde10709aeed39074fcce8172184b5cd8bfd66 | [
"MIT"
] | 2 | 2015-12-09T17:58:08.000Z | 2018-03-04T20:14:09.000Z | examples/django_app/sample/views.py | toonarmycaptain/pyflot | f2dde10709aeed39074fcce8172184b5cd8bfd66 | [
"MIT"
] | 5 | 2015-07-22T20:23:32.000Z | 2021-05-17T02:45:28.000Z | import flot
import math
import datetime
from django.views.generic import TemplateView
class HomeView(TemplateView):
template_name = 'home.html'
def get_context_data(self, **kwargs):
xy10 = flot.Series(x=flot.XVariable(points=range(1, 10)),
y=flot.YVariable(points=range(1, 10)),
options=flot.SeriesOptions(bars={'show': True},
label='y = 10*x'))
xy20 = flot.Series(x=flot.XVariable(points=[i for i in range(1, 10)]),
y=flot.YVariable(points=[i*2 for i in range(1, 10)]),
options=flot.SeriesOptions(bars={'show': True},
label='y = 20*x',
color='green'))
x_time_points = [datetime.date(2011, 1, i) for i in range(1, 20)]
y_points = [float(1)/i for i in range(1, 20)]
time1 = flot.Series(x=flot.TimeXVariable(points=x_time_points),
y=flot.YVariable(points=y_points),
options=flot.SeriesOptions(points={'show': True},
lines={'show': True},
label='y = 1/x',
color='blue'))
graph_option = flot.GraphOptions(xaxis={'format': '%d/%m/%Y'})
xpoints = map(math.radians ,range(1, 360))
ypoints = map(math.sin, xpoints)
sin_series = flot.Series(data=zip(xpoints, ypoints),
options=flot.SeriesOptions(label='sin(x)',
color='red'))
last_series = flot.Series(xpoints=range(0, 10), ypoints=range(0, 10),
options=flot.SeriesOptions(label='y = x'))
inline_series = flot.Series(data=[(x*2, x) for x in range(0, 10)])
context = {
'graph1': flot.Graph(series1=xy10, series2=xy20),
'graph2': flot.Graph(series1=time1, options=graph_option),
'sin_graph': flot.Graph(sin_series=sin_series),
'last_series': flot.Graph(last_series=last_series),
'all_series_graph': flot.Graph([xy10, xy20, last_series]),
'inline_series': flot.Graph([inline_series,])
}
return context
| 48.192308 | 80 | 0.475658 | 2,418 | 0.964884 | 0 | 0 | 0 | 0 | 0 | 0 | 188 | 0.07502 |
0d6c93e9529dba1be9084932e1bd1a732f244d8b | 1,097 | py | Python | wolfbot/sensors/avg_color.py | sunil3590/TIM | 5c8de80d631a4dea0f604091467dba7a7201de48 | [
"MIT"
] | 1 | 2019-01-12T14:35:54.000Z | 2019-01-12T14:35:54.000Z | wolfbot/sensors/avg_color.py | sunil3590/TIM | 5c8de80d631a4dea0f604091467dba7a7201de48 | [
"MIT"
] | null | null | null | wolfbot/sensors/avg_color.py | sunil3590/TIM | 5c8de80d631a4dea0f604091467dba7a7201de48 | [
"MIT"
] | null | null | null | import color_sensor_ISL29125
from time import time
cs = color_sensor_ISL29125.color_senser(1)
if cs.valid_init:
print "Valid color sensor"
else :
print "Color Sensor invalid"
t0 = time()
red_list = []
green_list = []
blue_list = []
for x in range(100):
stat = cs.readStatus()
if "" in stat: #"FLAG_CONV_DONE" in stat:
if "FLAG_CONV_R" not in stat:
red_list.append( cs.readRed() )
if "FLAG_CONV_G" not in stat:
green_list.append( cs.readGreen() )
if "FLAG_CONV_G" not in stat:
blue_list.append( cs.readBlue() )
tf = time()
red_avg = float(sum( red_list)) / float(len(red_list))
green_avg = float(sum( green_list)) / float(len(green_list))
blue_avg = float(sum( blue_list)) / float(len(blue_list))
print "In " + str( int((tf-t0)*10000)/10.0) + "ms the avg of: "
print str(len(red_list)) + " red vals was " + str(red_avg)
print str(len(green_list)) +" green vals was " + str(green_avg)
print str(len(blue_list)) + " blue vals was " + str(blue_avg)
print red_avg
print green_avg
print blue_avg
print ""
| 27.425 | 66 | 0.649043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.170465 |
0d6cfdc31d74171c37475fd7569d74d50b976420 | 24 | py | Python | test.py | JohnnyBruh/Repository | f8bfb14737eee78fa8da400c7f6ddb21efda4baf | [
"CC0-1.0"
] | null | null | null | test.py | JohnnyBruh/Repository | f8bfb14737eee78fa8da400c7f6ddb21efda4baf | [
"CC0-1.0"
] | null | null | null | test.py | JohnnyBruh/Repository | f8bfb14737eee78fa8da400c7f6ddb21efda4baf | [
"CC0-1.0"
] | null | null | null | print("yaaaay")
input() | 12 | 16 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.333333 |
0d6d890fdcce2be6554d36ca0ba885f23e87f40b | 879 | py | Python | getdata.py | krantirk/dash-120million-taxi-app | 7c807b12ecb2e4a0fdf48bb7a627fadabbc0d6cc | [
"MIT"
] | 103 | 2020-06-05T19:36:24.000Z | 2022-03-26T21:07:22.000Z | getdata.py | krantirk/dash-120million-taxi-app | 7c807b12ecb2e4a0fdf48bb7a627fadabbc0d6cc | [
"MIT"
] | 4 | 2020-08-03T10:01:37.000Z | 2021-07-10T21:06:17.000Z | getdata.py | krantirk/dash-120million-taxi-app | 7c807b12ecb2e4a0fdf48bb7a627fadabbc0d6cc | [
"MIT"
] | 38 | 2020-06-26T02:29:28.000Z | 2022-03-25T03:42:47.000Z | import vaex
import os
# Open the main data
taxi_path = 's3://vaex/taxi/yellow_taxi_2012_zones.hdf5?anon=true'
# override the path, e.g. $ export TAXI_PATH=/data/taxi/yellow_taxi_2012_zones.hdf5
taxi_path = os.environ.get('TAXI_PATH', taxi_path)
df_original = vaex.open(taxi_path)
# Make sure the data is cached locally
used_columns = ['pickup_longitude',
'pickup_latitude',
'dropoff_longitude',
'dropoff_latitude',
'total_amount',
'trip_duration_min',
'trip_speed_mph',
'pickup_hour',
'pickup_day',
'dropoff_borough',
'dropoff_zone',
'pickup_borough',
'pickup_zone']
for col in used_columns:
print(f'Making sure column "{col}" is cached...')
df_original.nop(col, progress=True)
| 33.807692 | 83 | 0.594994 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 454 | 0.516496 |
0d6e8c3d883e48ab73c44b0b33f085bbe1e9c3de | 493 | py | Python | commitsan/util.py | abusalimov/commitsan | 5ff6ff970ed643bee52a132a8c4b84c1a0c80b7e | [
"MIT"
] | null | null | null | commitsan/util.py | abusalimov/commitsan | 5ff6ff970ed643bee52a132a8c4b84c1a0c80b7e | [
"MIT"
] | null | null | null | commitsan/util.py | abusalimov/commitsan | 5ff6ff970ed643bee52a132a8c4b84c1a0c80b7e | [
"MIT"
] | null | null | null | from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
def unique(iterable, key=id):
"""
List unique elements, preserving order. Remember all elements ever seen.
"""
return unique_values((key(element), element) for element in iterable)
def unique_values(pairs):
seen = set()
seen_add = seen.add
for k, v in pairs:
if k not in seen:
seen_add(k)
yield v
| 24.65 | 76 | 0.630832 | 0 | 0 | 159 | 0.322515 | 0 | 0 | 0 | 0 | 88 | 0.178499 |
0d700c7bc2b0f82c98abdf77a46fc878ff69dcbb | 1,540 | py | Python | topicextractor/__init__.py | kjchung495/topicextractor | 6b2f2d9f122b6888d551275113c94126128dcef2 | [
"Apache-2.0"
] | null | null | null | topicextractor/__init__.py | kjchung495/topicextractor | 6b2f2d9f122b6888d551275113c94126128dcef2 | [
"Apache-2.0"
] | null | null | null | topicextractor/__init__.py | kjchung495/topicextractor | 6b2f2d9f122b6888d551275113c94126128dcef2 | [
"Apache-2.0"
] | null | null | null | import nltk
from nltk.tokenize import word_tokenize
#nltk.download('punkt')
#nltk.download('averaged_perceptron_tagger')
from collections import Counter
def extract_noun_counts(doc_list):
nouns_pdoc = []
for i in range(len(doc_list)):
pos = nltk.pos_tag(nltk.word_tokenize(doc_list[i]))
print('working on doc ' + str(i+1))
nouns = []
for word, pos in pos:
if pos.startswith('NN'):
nouns.append(word)
nouns_pdoc.append(nouns)
freq = []
for nouns in nouns_pdoc:
freq.append(dict(Counter(nouns)))
return freq
def tfidf(count, count_container): #'count' in dict type
tfidf = {}
doc_max = 0
for key, val in count.items():
if doc_max < val:
doc_max = val
all_doc_max = []
for i in range(len(count_container)):
temp_max = 0
for key, val in count_container[i].items():
if temp_max < val:
temp_max = val
all_doc_max.append(temp_max)
for key, val in count.items():
idf_sum = 0
idf = 0
for i in range(len(count_container)):
if key in count_container[i].keys():
idf_sum += count_container[i][key]/all_doc_max[i]
else:
idf_sum += 0
idf = idf_sum/len(count_container) + 1e-10 #'idf' will have a value between 0~1
tfidf[key] = round((val/doc_max)/idf, 4)
return Counter(tfidf).most_common() | 24.444444 | 87 | 0.562987 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.094156 |
0d709297a9132b15b51435b7ab4b51ce55c7e9f3 | 14,315 | py | Python | src/finn/transformation/streamline/absorb.py | SpontaneousDuck/finn | 7cdfd6271159c6cc4636bd33047a7f2e175a7390 | [
"BSD-3-Clause"
] | null | null | null | src/finn/transformation/streamline/absorb.py | SpontaneousDuck/finn | 7cdfd6271159c6cc4636bd33047a7f2e175a7390 | [
"BSD-3-Clause"
] | null | null | null | src/finn/transformation/streamline/absorb.py | SpontaneousDuck/finn | 7cdfd6271159c6cc4636bd33047a7f2e175a7390 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from onnx import helper as oh
from finn.core.datatype import DataType
from finn.transformation import Transformation
from finn.util.basic import get_by_name
from finn.custom_op.registry import getCustomOp
from finn.transformation.infer_datatypes import InferDataTypes
class AbsorbAddIntoMultiThreshold(Transformation):
"""Absorb preceding Add ops into MultiThreshold by updating the threshold
values. Only scalar/1D add vectors can be absorbed."""
def apply(self, model):
graph = model.graph
node_ind = 0
graph_modified = False
for n in graph.node:
node_ind += 1
if n.op_type == "Add":
consumer = model.find_consumer(n.output[0])
if consumer is not None and consumer.op_type == "MultiThreshold":
add_weight_name = n.input[1]
threshold_name = consumer.input[1]
A = model.get_initializer(add_weight_name)
T = model.get_initializer(threshold_name)
assert A is not None, "Initializer for add weights is not set."
assert T is not None, "Initializer for thresholds is not set."
start_name = n.input[0]
# we can only absorb 0d or 1d adds
is_scalar = A.ndim == 0 or all(x == 1 for x in A.shape)
actual_ndims = len(tuple(filter(lambda x: x > 1, A.shape)))
is_1d = actual_ndims == 1
if is_scalar or is_1d:
Tnew = T - A.reshape(-1, 1)
# Tnew = T - A.reshape(-1, T.shape[1])
# compute new thresholds and set initializer
model.set_initializer(threshold_name, Tnew)
# wire add input directly to MultiThreshold
consumer.input[0] = start_name
# remove the add node
graph.node.remove(n)
graph_modified = True
return (model, graph_modified)
class AbsorbMulIntoMultiThreshold(Transformation):
"""Absorb preceding Mul ops into MultiThreshold by updating the threshold
values. Only *positive* scalar/1D mul vectors can be absorbed."""
def apply(self, model):
graph = model.graph
node_ind = 0
graph_modified = False
for n in graph.node:
node_ind += 1
if n.op_type == "Mul":
mul_weight_name = n.input[1]
A = model.get_initializer(mul_weight_name)
assert A is not None, "Initializer for mul weights is not set."
is_signed = (A < 0).any()
is_scalar = A.ndim == 0 or all(x == 1 for x in A.shape)
actual_ndims = len(tuple(filter(lambda x: x > 1, A.shape)))
is_1d = actual_ndims == 1
consumer = model.find_consumer(n.output[0])
if consumer is not None and consumer.op_type == "MultiThreshold":
if not is_signed and (is_1d or is_scalar):
threshold_name = consumer.input[1]
T = model.get_initializer(threshold_name)
assert T is not None, "Initializer for thresholds is not set."
start_name = n.input[0]
# compute new thresholds and set initializer
Tnew = T / A.reshape(-1, 1)
# TODO: need to handle negative A values correctly; produce
# mul sign mask and merge into preceding matmul?
model.set_initializer(threshold_name, Tnew)
# wire add input directly to MultiThreshold
consumer.input[0] = start_name
# remove the mul node
graph.node.remove(n)
graph_modified = True
return (model, graph_modified)
class FactorOutMulSignMagnitude(Transformation):
"""Split multiply-by-constant nodes into two multiply-by-constant nodes,
where the first node is a bipolar vector (of signs) and the second is a
vector of magnitudes."""
def apply(self, model):
graph = model.graph
node_ind = 0
graph_modified = False
for n in graph.node:
node_ind += 1
if n.op_type == "Mul":
mul_weight_name = n.input[1]
A = model.get_initializer(mul_weight_name)
assert A is not None, "Initializer for mul weights is not set."
is_scalar = np.prod(A.shape) == 1
actual_ndims = len(tuple(filter(lambda x: x > 1, A.shape)))
is_1d = actual_ndims == 1
is_not_bipolar = (
model.get_tensor_datatype(mul_weight_name) != DataType.BIPOLAR
)
is_signed = (A < 0).any()
if is_signed and (is_scalar or is_1d) and is_not_bipolar:
start_name = n.input[0]
in_shape = model.get_tensor_shape(start_name)
middle_name = model.make_new_valueinfo_name()
model.set_tensor_shape(middle_name, in_shape)
sign_mul_param_name = model.make_new_valueinfo_name()
# create new mul node with sign(A) as the operand
sgn = np.sign(A)
model.set_initializer(sign_mul_param_name, sgn)
model.set_tensor_datatype(sign_mul_param_name, DataType.BIPOLAR)
# replace original mul weight by magnitudes
model.set_initializer(mul_weight_name, np.abs(A))
new_mul = oh.make_node(
"Mul", [start_name, sign_mul_param_name], [middle_name]
)
n.input[0] = middle_name
graph.node.insert(node_ind - 1, new_mul)
graph_modified = True
return (model, graph_modified)
class Absorb1BitMulIntoMatMul(Transformation):
"""Absorb bipolar or binary multiplications into the preciding matrix
multiply."""
def apply(self, model):
graph = model.graph
node_ind = 0
graph_modified = False
for n in graph.node:
node_ind += 1
if n.op_type == "MatMul":
matmul_weight_name = n.input[1]
W = model.get_initializer(matmul_weight_name)
Wdt = model.get_tensor_datatype(matmul_weight_name)
assert W is not None, "Initializer for matmul weights is not set."
consumer = model.find_consumer(n.output[0])
if consumer is not None and consumer.op_type == "Mul":
mul_weight_name = consumer.input[1]
A = model.get_initializer(mul_weight_name)
assert A is not None, "Initializer for mul weights is not set."
is_1bit = model.get_tensor_datatype(mul_weight_name).bitwidth() == 1
if is_1bit:
Wnew = A * W
assert (
Wnew.shape == W.shape
), """Shape of new weights is not
the same as the shape of the weight matrix before."""
check_fxn = np.vectorize(lambda x: Wdt.allowed(x))
# only absorb if permitted by W datatype
if check_fxn(Wnew).all():
model.set_initializer(matmul_weight_name, Wnew)
n.output[0] = consumer.output[0]
graph.node.remove(consumer)
graph_modified = True
return (model, graph_modified)
class Absorb1BitMulIntoConv(Transformation):
"""Absorb bipolar or binary multiplications into the preciding convolution."""
def apply(self, model):
graph = model.graph
node_ind = 0
graph_modified = False
for n in graph.node:
node_ind += 1
if n.op_type == "Conv":
conv_weight_name = n.input[1]
W = model.get_initializer(conv_weight_name)
Wdt = model.get_tensor_datatype(conv_weight_name)
assert W is not None, "Initializer for conv weights is not set."
consumer = model.find_consumer(n.output[0])
if consumer is not None and consumer.op_type == "Mul":
mul_weight_name = consumer.input[1]
A = model.get_initializer(mul_weight_name)
assert A is not None, "Initializer for mul weights is not set."
is_1bit = model.get_tensor_datatype(mul_weight_name).bitwidth() == 1
is_scalar = np.prod(A.shape) == 1
actual_ndims = len(tuple(filter(lambda x: x > 1, A.shape)))
is_1d = actual_ndims == 1
if is_1bit and (is_1d or is_scalar):
# move the mul to the OFM position, since the mul is
# applied on the outputs channelwise or as scalar
Wnew = A.reshape(-1, 1, 1, 1) * W
assert (
Wnew.shape == W.shape
), """Shape of new weights is not
the same as the shape of the conv weights before."""
check_fxn = np.vectorize(lambda x: Wdt.allowed(x))
# only absorb if permitted by W datatype
if check_fxn(Wnew).all():
model.set_initializer(conv_weight_name, Wnew)
n.output[0] = consumer.output[0]
graph.node.remove(consumer)
graph_modified = True
return (model, graph_modified)
class AbsorbTransposeIntoMultiThreshold(Transformation):
"""Change (NHWCTranpose -> MultiThreshold -> NCHWTranspose) to (MultiThreshold)
with NHWC mode."""
def apply(self, model):
graph = model.graph
node_ind = 0
graph_modified = False
for n in graph.node:
node_ind += 1
if n.op_type == "Transpose":
perms = list(get_by_name(n.attribute, "perm").ints)
if perms == [0, 3, 1, 2]:
mt_cand = model.find_consumer(n.output[0])
if mt_cand.op_type == "MultiThreshold":
final_t_cand = model.find_consumer(mt_cand.output[0])
if final_t_cand.op_type == "Transpose":
perms = list(
get_by_name(final_t_cand.attribute, "perm").ints
)
if perms == [0, 2, 3, 1]:
mt = getCustomOp(mt_cand)
mt.set_nodeattr("data_layout", "NHWC")
# get rid of tranpose nodes, wire MT directly
mt_cand.input[0] = n.input[0]
mt_cand.output[0] = final_t_cand.output[0]
graph.node.remove(n)
graph.node.remove(final_t_cand)
graph_modified = True
elif final_t_cand.op_type == "Reshape":
oshape = model.get_tensor_shape(final_t_cand.output[0])
if len(oshape) == 2:
# transition to FC part, can still use NHWC
mt = getCustomOp(mt_cand)
mt.set_nodeattr("data_layout", "NHWC")
# get rid of first tranpose node
mt_cand.input[0] = n.input[0]
# fix output shape for MultiThreshold
mt_ishape = model.get_tensor_shape(mt_cand.input[0])
(b, h, w, c) = mt_ishape
assert (
h == 1 and w == 1
), """Untested spatial dim
in conv->fc transition, proceed with caution!"""
model.set_tensor_shape(mt_cand.output[0], mt_ishape)
graph.node.remove(n)
graph_modified = True
if graph_modified:
model = model.transform(InferDataTypes())
return (model, graph_modified)
| 50.22807 | 88 | 0.542927 | 12,491 | 0.872581 | 0 | 0 | 0 | 0 | 0 | 0 | 3,896 | 0.272162 |
0d71379845c71ad7524a31d7ceaa01d4a691d64e | 822 | py | Python | examples/static_content.py | kjosib/kale | ced67fa7a335fbe9524c86a0a805cfdd56f6d560 | [
"MIT"
] | null | null | null | examples/static_content.py | kjosib/kale | ced67fa7a335fbe9524c86a0a805cfdd56f6d560 | [
"MIT"
] | 3 | 2020-03-04T03:16:52.000Z | 2020-04-25T06:22:48.000Z | examples/static_content.py | kjosib/kale | ced67fa7a335fbe9524c86a0a805cfdd56f6d560 | [
"MIT"
] | 2 | 2020-05-22T16:29:33.000Z | 2022-01-10T19:36:51.000Z | """
<html><body>
<p>
You'll probably want to supply a stylesheet. Perhaps some javascript library.
Maybe even some images. One way or another, it's handy to be able to point at
a directory full of static content and let the framework do its job.
</p>
<p>
This example exercises that facility by presenting the examples folder within
your web browser.
</p>
<p>Click <a href="static">here</a> to see this work.</p>
<p>When you're done digesting this example, may I suggest
<a href="/static/simple_task_list.py"> simple_task_list.py </a>?</p>
</body></html>
"""
import os
import kali
app = kali.Router()
# This is how it's done:
app.delegate_folder("/static/", kali.StaticFolder(os.path.dirname(__file__)))
# This is enough to have an index page.
@app.function('/')
def hello(): return __doc__
kali.serve_http(app)
| 22.833333 | 77 | 0.723844 | 0 | 0 | 0 | 0 | 46 | 0.055961 | 0 | 0 | 638 | 0.776156 |
0d71f71fc61d06bd9b1713ddabe63d768527680c | 1,266 | py | Python | Hello_world/hello_world2.py | elsuizo/Kivy_work | 90da07721257a5e2ebf9eafacfb5a632f91cfbdd | [
"MIT"
] | null | null | null | Hello_world/hello_world2.py | elsuizo/Kivy_work | 90da07721257a5e2ebf9eafacfb5a632f91cfbdd | [
"MIT"
] | null | null | null | Hello_world/hello_world2.py | elsuizo/Kivy_work | 90da07721257a5e2ebf9eafacfb5a632f91cfbdd | [
"MIT"
] | null | null | null | #= -------------------------------------------------------------------------
# @file hello_world2.py
#
# @date 02/14/16 13:29:22
# @author Martin Noblia
# @email martin.noblia@openmailbox.org
#
# @brief
#
# @detail
#
# Licence:
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
#---------------------------------------------------------------------------=#
from kivy.app import App
from kivy.uix.scatter import Scatter
from kivy.uix.label import Label
from kivy.uix.floatlayout import FloatLayout
class TutorialApp(App):
def buil(self):
f = FloatLayout()
s = Scatter()
Label(text="Hello", font_size=150)
f.add_widget(s)
s.add_widget(l)
return f
if __name__ == "__main__":
TutorialApp().run()
| 28.772727 | 78 | 0.627172 | 199 | 0.157188 | 0 | 0 | 0 | 0 | 0 | 0 | 863 | 0.681675 |
0d72db2c734fbc6f87d4e7544c59946f28af4c13 | 1,167 | py | Python | libs/validators/iban.py | Sparklingx/nzbhydra | e2433e1155255ba37341cc79750b104e7dd8889a | [
"Apache-2.0"
] | 674 | 2015-11-06T04:22:47.000Z | 2022-02-26T17:31:43.000Z | libs/validators/iban.py | Sparklingx/nzbhydra | e2433e1155255ba37341cc79750b104e7dd8889a | [
"Apache-2.0"
] | 713 | 2015-11-06T10:48:58.000Z | 2018-11-27T16:32:18.000Z | libs/validators/iban.py | Sparklingx/nzbhydra | e2433e1155255ba37341cc79750b104e7dd8889a | [
"Apache-2.0"
] | 106 | 2015-12-07T11:21:06.000Z | 2022-03-11T10:58:41.000Z | import re
from .utils import validator
regex = (
r'^[A-Z]{2}[0-9]{2}[A-Z0-9]{13,30}$'
)
pattern = re.compile(regex)
def char_value(char):
"""A=10, B=11, ..., Z=35
"""
if char.isdigit():
return int(char)
else:
return 10 + ord(char) - ord('A')
def modcheck(value):
"""Check if the value string passes the mod97-test.
"""
# move country code and check numbers to end
rearranged = value[4:] + value[:4]
# convert letters to numbers
converted = [char_value(char) for char in rearranged]
# interpret as integer
integerized = int(''.join([str(i) for i in converted]))
return (integerized % 97 == 1)
@validator
def iban(value):
"""
Return whether or not given value is a valid IBAN code.
If the value is a valid IBAN this function returns ``True``, otherwise
:class:`~validators.utils.ValidationFailure`.
Examples::
>>> iban('DE29100500001061045672')
True
>>> iban('123456')
ValidationFailure(func=iban, ...)
.. versionadded:: 0.8
:param value: IBAN string to validate
"""
return pattern.match(value) and modcheck(value)
| 22.018868 | 74 | 0.610111 | 0 | 0 | 0 | 0 | 494 | 0.423308 | 0 | 0 | 636 | 0.544987 |
0d736e7e49cff33ae106086347e250953188ade6 | 249 | py | Python | Multiples of 3 and 5.py | ahmedharbi197/Project-Euler | 596fa7622233868a08200f2d7fe3b7e83d0af41f | [
"MIT"
] | 1 | 2019-06-10T23:10:38.000Z | 2019-06-10T23:10:38.000Z | Multiples of 3 and 5.py | ahmedharbi197/Project-Euler | 596fa7622233868a08200f2d7fe3b7e83d0af41f | [
"MIT"
] | null | null | null | Multiples of 3 and 5.py | ahmedharbi197/Project-Euler | 596fa7622233868a08200f2d7fe3b7e83d0af41f | [
"MIT"
] | null | null | null | import sys
t = int(input().strip())
for a0 in range(t):
n = int(input().strip())
def preSum(q):
return (q*(1+q) //2 )
result = 3*preSum(int((n-1)//3)) + 5*preSum(int((n-1)//5)) - 15*preSum(int((n-1)//15))
print(int(result))
| 24.9 | 90 | 0.526104 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0d74ec72c3d16df3ed04de198a0c999287ff6316 | 1,303 | py | Python | home/models.py | davidkiama/Foto-Moto- | 97f5cafb3580152c3672834dcbbbf5cfa961ff15 | [
"Unlicense"
] | null | null | null | home/models.py | davidkiama/Foto-Moto- | 97f5cafb3580152c3672834dcbbbf5cfa961ff15 | [
"Unlicense"
] | null | null | null | home/models.py | davidkiama/Foto-Moto- | 97f5cafb3580152c3672834dcbbbf5cfa961ff15 | [
"Unlicense"
] | null | null | null | from statistics import mode
from django.db import models
from cloudinary.models import CloudinaryField
# Create your models here.
class Image(models.Model):
# image = models.ImageField(
# upload_to='uploads/', default='default.jpg')
image = CloudinaryField('image')
title = models.CharField(max_length=60)
description = models.TextField()
location = models.ForeignKey('Location', on_delete=models.CASCADE)
category = models.ForeignKey('Category', on_delete=models.CASCADE)
@classmethod
def get_all_images(cls):
images = cls.objects.all()
return images
@classmethod
def get_images_by_category(cls, category):
images = cls.objects.filter(category=category)
return images
@classmethod
def filter_by_location(cls, location):
images = cls.objects.filter(location=location)
return images
@classmethod
def search_by_category(cls, search_term):
images = cls.objects.filter(category__name__icontains=search_term)
return images
class Location(models.Model):
name = models.CharField(max_length=60)
def __str__(self):
return self.name
class Category(models.Model):
name = models.CharField(max_length=60)
def __str__(self):
return self.name
| 25.057692 | 74 | 0.697621 | 1,162 | 0.891788 | 0 | 0 | 522 | 0.400614 | 0 | 0 | 131 | 0.100537 |
0d751410403aebe0b04f4d2c6ba497f74d25e18a | 2,396 | py | Python | mozillians/users/tests/__init__.py | justinpotts/mozillians | efa5cbdfe4992d2ba1c1d85bfbb5b09b2215cc44 | [
"BSD-3-Clause"
] | 1 | 2016-07-24T07:20:19.000Z | 2016-07-24T07:20:19.000Z | mozillians/users/tests/__init__.py | Acidburn0zzz/mozillians | 991d51b8dc6d7b8265d20a33e83597a1f4500761 | [
"BSD-3-Clause"
] | null | null | null | mozillians/users/tests/__init__.py | Acidburn0zzz/mozillians | 991d51b8dc6d7b8265d20a33e83597a1f4500761 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib.auth.models import Group, User
from django.utils import timezone
import factory
from factory import fuzzy
from mozillians.geo.models import City, Country, Region
from mozillians.users.models import Language
class UserFactory(factory.DjangoModelFactory):
username = factory.Sequence(lambda n: 'user{0}'.format(n))
first_name = 'Joe'
last_name = factory.Sequence(lambda n: 'Doe {0}'.format(n))
email = factory.LazyAttribute(
lambda a: '{0}.{1}@example.com'.format(
a.first_name, a.last_name.replace(' ', '.')))
class Meta:
model = User
@factory.post_generation
def userprofile(self, create, extracted, **kwargs):
self.userprofile.full_name = ' '.join([self.first_name, self.last_name])
self.userprofile.geo_country = Country.objects.get_or_create(
name='Greece', code='gr',
mapbox_id='country.1188277719'
)[0]
self.userprofile.geo_region = Region.objects.get_or_create(
name='Attika', country=self.userprofile.geo_country,
mapbox_id='province.539510334'
)[0]
self.userprofile.geo_city = City.objects.get_or_create(
name='Athens', region=self.userprofile.geo_region,
country=self.userprofile.geo_country,
lat=39.727924, lng=21.592328,
mapbox_id='mapbox-places.10946738'
)[0]
self.userprofile.lat = 39.727924
self.userprofile.lng = 21.592328
if extracted:
for key, value in extracted.items():
setattr(self.userprofile, key, value)
self.userprofile.save()
@factory.post_generation
def manager(self, create, extracted, **kwargs):
if extracted:
group, created = Group.objects.get_or_create(name='Managers')
self.groups.add(group)
@factory.post_generation
def vouched(self, create, extracted, **kwargs):
# By default Users are vouched
if extracted is None or extracted:
self.userprofile.is_vouched = True
self.userprofile.vouches_received.create(
voucher=None, date=timezone.now(), description='a test autovouch')
self.userprofile.save()
class LanguageFactory(factory.DjangoModelFactory):
code = fuzzy.FuzzyChoice(choices=['en', 'fr', 'el', 'es'])
class Meta:
model = Language
| 35.761194 | 82 | 0.647746 | 2,161 | 0.90192 | 0 | 0 | 1,613 | 0.673205 | 0 | 0 | 219 | 0.091402 |
0d779f79a6ff4f979c6a546d7fdf9c7fcb571967 | 1,248 | py | Python | tests/_utils.py | tsuyukimakoto/physaliidae | e55416e8b84c4a4ed2a31290f16ccd42350853d2 | [
"MIT"
] | 2 | 2017-04-26T01:10:24.000Z | 2019-05-04T03:29:24.000Z | tests/_utils.py | tsuyukimakoto/physaliidae | e55416e8b84c4a4ed2a31290f16ccd42350853d2 | [
"MIT"
] | 220 | 2019-01-01T03:18:11.000Z | 2022-03-28T20:29:49.000Z | tests/_utils.py | tsuyukimakoto/biisan | 8e55d73c582fcbba918595c2e741ffce7c88aaa9 | [
"MIT"
] | null | null | null | import os
import shutil
from contextlib import (
contextmanager,
)
from pathlib import Path
import pytest
@pytest.fixture(scope='function', autouse=True)
def cleanup():
test_generate_dir = (Path('.') / 'tests' / 'biisan_data')
if test_generate_dir.exists():
shutil.rmtree(test_generate_dir)
yield
if test_generate_dir.exists():
shutil.rmtree(test_generate_dir)
@pytest.fixture(scope='function', autouse=True)
def setenv():
os.environ['BIISAN_SETTINGS_MODULE'] = 'tests.biisan_data.data.biisan_local_settings'
yield
del os.environ['BIISAN_SETTINGS_MODULE']
@contextmanager
def cd(to):
prev_cwd = Path.cwd()
os.chdir(to)
try:
yield
finally:
os.chdir(prev_cwd)
def _copy_blog(entry_file):
src = Path('.') / 'test_data' / entry_file
dest = Path('.') / 'biisan_data' / 'data' / 'blog' / entry_file
shutil.copyfile(src, dest)
def copy_first_blog():
_copy_blog('my_first_blog.rst')
def copy_second_blog():
_copy_blog('my_second_blog.rst')
def copy_test_local_settings():
src = Path('.') / 'test_data' / 'biisan_local_settings.py'
dest = Path('.') / 'biisan_data' / 'data' / 'biisan_local_settings.py'
shutil.copyfile(src, dest)
| 22.285714 | 89 | 0.677885 | 0 | 0 | 513 | 0.411058 | 625 | 0.500801 | 0 | 0 | 306 | 0.245192 |
0d78168c5d6eacac481b47c1f58e174237b91fe4 | 1,866 | py | Python | christmasflix/tests.py | jbettenh/last_christmas | 2f861afbdf67ca69df0e7ed815a870c5bb4feb19 | [
"MIT"
] | null | null | null | christmasflix/tests.py | jbettenh/last_christmas | 2f861afbdf67ca69df0e7ed815a870c5bb4feb19 | [
"MIT"
] | 1 | 2021-11-03T20:16:50.000Z | 2021-12-22T16:05:44.000Z | christmasflix/tests.py | jbettenh/last_christmas | 2f861afbdf67ca69df0e7ed815a870c5bb4feb19 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.shortcuts import reverse
from .models import MovieList
from christmasflix import omdbmovies
class MovieListIndexViewTests(TestCase):
def test_no_lists(self):
response = self.client.get(reverse('christmasflix:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No lists are available.")
self.assertQuerysetEqual(response.context['movie_lists'], [])
def test_one_movie_list(self):
mylist1 = MovieList.objects.create(name="mylist1")
response = self.client.get(reverse('christmasflix:index'))
self.assertQuerysetEqual(response.context['movie_lists'], [mylist1])
def test_two_movie_lists(self):
mylist1 = MovieList.objects.create(name="mylist1")
mylist2 = MovieList.objects.create(name="mylist2")
response = self.client.get(reverse('christmasflix:index'))
self.assertQuerysetEqual(response.context['movie_lists'], [mylist1, mylist2], ordered=False)
class IndexViewTest(TestCase):
def test_index_uses_template(self):
response = self.client.get(f'/christmasflix/')
self.assertTemplateUsed(response, 'christmasflix/index.html')
class DetailViewTest(TestCase):
def test_list_uses_template(self):
my_list = MovieList.objects.create()
response = self.client.get(f'/christmasflix/{my_list.id}/')
self.assertTemplateUsed(response, 'christmasflix/movie_list.html')
class MovieViewTest(TestCase):
def test_movies_uses_template(self):
response = self.client.get(f'/christmasflix/movies/')
self.assertTemplateUsed(response, 'christmasflix/movies.html')
class MovieResultsTest(TestCase):
def test_no_movie_found(self):
# asdasdfc
omdbmovies.search_movie("Die Hard")
self.assertRaises(KeyError) | 33.927273 | 100 | 0.721329 | 1,714 | 0.918542 | 0 | 0 | 0 | 0 | 0 | 0 | 332 | 0.177921 |
0d78acb4113c1695f1ed8f46e6ee86a7f8ade1aa | 430 | py | Python | errors.py | zofy/crawler | 3db2a214b4ca86022c49670a204e04fc1208f53a | [
"MIT"
] | null | null | null | errors.py | zofy/crawler | 3db2a214b4ca86022c49670a204e04fc1208f53a | [
"MIT"
] | null | null | null | errors.py | zofy/crawler | 3db2a214b4ca86022c49670a204e04fc1208f53a | [
"MIT"
] | null | null | null | from asyncio import CancelledError, TimeoutError
from requests.exceptions import Timeout, ConnectionError
from aiohttp.client_exceptions import ClientHttpProxyError, ClientProxyConnectionError, ClientOSError
class CaptchaError(Exception):
pass
ProxyErrors = (CaptchaError, ClientOSError, ClientProxyConnectionError, ClientHttpProxyError)
TimeoutErrors = (Timeout, TimeoutError, CancelledError, ConnectionError)
| 35.833333 | 102 | 0.834884 | 40 | 0.093023 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0d7972a9afb83ed1ccb7a5d48d6c3bd4bfcc326f | 4,493 | py | Python | holdingsparser/scrape.py | mhadam/holdingsparser | c2f58acf6414e417ff435bb01ecbb8c37be50d77 | [
"MIT"
] | 9 | 2018-04-23T23:35:21.000Z | 2021-11-03T04:34:09.000Z | holdingsparser/scrape.py | mhadam/holdingsparser | c2f58acf6414e417ff435bb01ecbb8c37be50d77 | [
"MIT"
] | 2 | 2021-05-06T14:38:14.000Z | 2021-05-08T01:17:33.000Z | holdingsparser/scrape.py | mhadam/holdingsparser | c2f58acf6414e417ff435bb01ecbb8c37be50d77 | [
"MIT"
] | 4 | 2018-08-18T18:09:06.000Z | 2021-06-21T02:02:07.000Z | import json
import logging
import re
from itertools import chain
from json import JSONDecodeError
from typing import Iterable, Optional, Mapping
import requests
import untangle
from bs4 import BeautifulSoup, PageElement, Tag
from holdingsparser.file import Holding, VotingAuthority, ShrsOrPrnAmt
logger = logging.getLogger(__name__)
def get_elements_from_soup(
messages: Iterable[str], soup: BeautifulSoup
) -> Iterable[PageElement]:
# check if any search results are found
found_iterables = [soup.find_all(string=x) for x in set(messages)]
return chain.from_iterable(found_iterables)
def is_results_missing(soup: BeautifulSoup) -> bool:
messages = {"No matching Ticker Symbol.", "No matching CIK."}
elements = get_elements_from_soup(messages, soup)
return any(elements)
def get_filings_download_element(
soup: BeautifulSoup, form, description
) -> Optional[Tag]:
# find results table in page
results_table = soup.find("table", attrs={"summary": "Results"})
# find documents in search results
def filter_search_results_entries(tag, form_type, description_text):
"""Returns true if the tag is a link in a row with text including form-type
and description"""
desc_re = re.compile(description_text, re.I) # I: ignorecase
form_re = re.compile(form_type, re.I)
try:
return (
tag.parent.name == "td"
and tag.name == "a"
and tag["id"] == "documentsbutton"
and tag.parent.parent.find(string=form_re)
and tag.parent.parent.find(string=desc_re)
)
except (IndexError, KeyError):
return False
try:
# result should be the <a> element containing holding documents link
return results_table.find(
lambda x: filter_search_results_entries(x, form, description)
)
except (TypeError, AttributeError):
pass
def find_holdings_document_url(soup: BeautifulSoup) -> Optional[str]:
# search entire soup for link to xml file
any_information_table_xml_link = soup.find(
"a", string=re.compile(r"^.+informationtable\.xml$")
)
try:
return "https://www.sec.gov" + any_information_table_xml_link["href"]
except (TypeError, KeyError):
raise RuntimeError("failed to find holdings document URL")
def get_holdings(payload: str) -> Iterable[Holding]:
o = untangle.parse(payload)
information_table = o.informationTable.children
for table in information_table:
voting_authority_element = table.votingAuthority
none_value = int(getattr(voting_authority_element, "None").cdata)
voting_authority = VotingAuthority(
sole=int(voting_authority_element.Sole.cdata),
shared=int(voting_authority_element.Shared.cdata),
none=none_value,
)
shrs_or_prn_amt = ShrsOrPrnAmt(
amt=int(table.shrsOrPrnAmt.sshPrnamt.cdata),
amt_type=table.shrsOrPrnAmt.sshPrnamtType.cdata,
)
yield Holding(
name_of_issuer=table.nameOfIssuer.cdata,
title_of_class=table.titleOfClass.cdata,
cusip=table.cusip.cdata,
value=int(table.value.cdata),
shrs_or_prn_amt=shrs_or_prn_amt,
investment_discretion=table.investmentDiscretion.cdata,
voting_authority=voting_authority,
)
def get_first_13f(payload: Mapping):
recent = payload["filings"]["recent"]
zipped = zip(recent["accessionNumber"], recent["form"])
return next(x[0] for x in zipped if x[1] == "13F-HR")
def get_filings_url(cik: str) -> Optional[str]:
logger.info(
f"searching for filing at https://data.sec.gov/submissions/CIK{cik}.json"
)
submission_url = f"https://data.sec.gov/submissions/CIK{cik}.json"
submissions_response = requests.get(submission_url)
logger.debug(f"{submissions_response=}")
try:
payload = json.loads(submissions_response.text)
except JSONDecodeError:
return
logger.debug(f"{payload=}")
try:
response_cik = payload["cik"]
first_accession_number_formatted = get_first_13f(payload)
accession_number = first_accession_number_formatted.replace("-", "")
path = f"Archives/edgar/data/{response_cik}/{accession_number}/{first_accession_number_formatted}-index.htm"
return f"https://www.sec.gov/{path}"
except KeyError:
pass
| 35.65873 | 116 | 0.676163 | 0 | 0 | 1,054 | 0.234587 | 0 | 0 | 0 | 0 | 877 | 0.195193 |
0d7ab5a4ed73da20e4b8a1aaf38e3c5f297e7d62 | 4,757 | py | Python | scripts/settings.py | jugoodma/reu-2018 | e6133b556216e8a7a91b23dd37dc043408cbab82 | [
"MIT"
] | 5 | 2018-07-16T02:56:31.000Z | 2020-04-05T21:24:24.000Z | scripts/settings.py | jugoodma/reu-2018 | e6133b556216e8a7a91b23dd37dc043408cbab82 | [
"MIT"
] | null | null | null | scripts/settings.py | jugoodma/reu-2018 | e6133b556216e8a7a91b23dd37dc043408cbab82 | [
"MIT"
] | 2 | 2018-07-16T18:38:20.000Z | 2018-08-03T18:54:00.000Z | # DEFAULT SETTINGS FILE
import json
import requests
mturk_type = "sandbox"
data_type = "temporal"
data_path = "../data/"
input_path = "../input/"
template_path = "../templates/"
result_path = "../results/"
max_results = 10
approve_all = False
# you must have a YouTube API v3 key
# your key must be in a json file titled:
# 'youtube-key.json'
# with structure:
# {
# "key": "<YOUR API KEY HERE>"
# }
# obviously without the <>
#
# our .gitignore ignores json files so your
# key will not be public
youtube_api = 'https://www.googleapis.com/youtube/v3/videos?id={}&key=' + json.loads(open('../youtube-key.json', 'r').read())['key'] + '&part=status'
# if the items list is > 0 then the youtube video exists/is watchable
def youtube_video_exists(ytid):
return len(requests.get(youtube_api.format(ytid)).json()['items']) > 0
# environments variable includes both mturk and data environments
# mturk - live, sandbox
# data - temporal, spatial, audio
#
# It's best not to edit the mturk environments
#
# DATA ENVIRON:
# assignments - number of unique workers per HIT
# lifetime - number of seconds that each HIT is visible
# duration - number of seconds that the assignment is available to the worker
# approve - number of seconds after assignment is submitted that it is auto-approved
# reward - cost per assignment
# title - the title of the HIT
# keywords - the keywords of the HIT
# desc - the description of the HIT
# xml - the xml file for this HIT
# worker - requirements for the HIT worker
# go to http://docs.aws.amazon.com/AWSMechTurk/latest/AWSMturkAPI/ApiReference_QualificationRequirementDataStructureArticle.html#ApiReference_QualificationType-IDs
# to read the worker requirements. you may want to use masters only for certain data collection environments
environments = {
# mturk environ.
"live": {
"endpoint": "https://mturk-requester.us-east-1.amazonaws.com",
"preview": "https://www.mturk.com/mturk/preview",
"manage": "https://requester.mturk.com/mturk/manageHITs",
},
"sandbox": {
"endpoint": "https://mturk-requester-sandbox.us-east-1.amazonaws.com",
"preview": "https://workersandbox.mturk.com/mturk/preview",
"manage": "https://requestersandbox.mturk.com/mturk/manageHITs",
},
# data environ.
"temporal": {
"csv": "temporal-input.csv",
"assignments": 1,
"lifetime": 3 * 24 * 60 * 60, # 3 days
"duration": 10 * 60, # 10 minutes
"approve": 5 * 24 * 60 * 60, # 5 days
"reward": "0.07",
"title": "Annotate when audio source appears in video",
"keywords": "annotating, video, audio",
"desc": "Watch a 10 second video clip and tell us when the source of a labeled sound appears in the frame of the video",
"xml": "temporal-ui.xml",
"worker": [{
'QualificationTypeId': '000000000000000000L0',
'Comparator': 'GreaterThanOrEqualTo',
'IntegerValues': [95],
'RequiredToPreview': True,
}],
"hit-type-id": "3ISL4H6O6ITRSQNC3OSL4OKOO05ICC",
"out": "temporal-output.csv",
},
"spatial": {
"csv": "spatial-input.csv",
"assignments": 1,
"lifetime": 3 * 24 * 60 * 60, # 3 days
"duration": 5 * 60, # 5 minutes
"approve": 3 * 24 * 60 * 60, # 3 days
"reward": "0.02",
"title": "Locate Where Audio Originates in a Video",
"keywords": "matching, video, audio",
"desc": "Watch a 1 second video clip and indicate where the audio is coming from",
"xml": "spatial-ui.xml",
"worker": [{
'QualificationTypeId': '000000000000000000L0',
'Comparator': 'GreaterThanOrEqualTo',
'IntegerValues': [80],
'RequiredToPreview': True,
}],
"hit-type-id": "3M8DJV5FJWDQ93IDR6VEH2187CRFN0",
"out":"spatial-output.csv",
},
# this is out-dated
"captions": {
"csv": "captions-input.csv",
"assignments": 2,
"lifetime": 7 * 24 * 60 * 60, # 7 days
"duration": 60 * 60, # 1 hour
"approve": 5 * 24 * 60 * 60, # 5 days
"reward": "0.07",
"title": "Annotate a video without visual context",
"keywords": "classify, video, audio",
"desc": "Listen to a blurred 10 to 30 second video and describe what happens in the audio scene",
"xml": "audio-ui.xml",
"worker": [{
'QualificationTypeId': '000000000000000000L0',
'Comparator': 'GreaterThanOrEqualTo',
'IntegerValues': [95],
'RequiredToPreview': True,
}],
"hit-type-id": "",
"out":"audio-output.csv",
},
}
| 38.056 | 165 | 0.61194 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,390 | 0.712634 |
0d7b441950e43399337a0b17c73f11ba2c3bafd4 | 10,790 | py | Python | synapse-prometheus-connector/src/main.py | microsoft/azure-synapse-spark-metrics | a32ca9426973b34d0d7a9092a9c608fae6d7b0ff | [
"MIT"
] | 8 | 2021-02-26T11:26:55.000Z | 2022-03-31T16:16:17.000Z | synapse-prometheus-connector/src/main.py | microsoft/azure-synapse-spark-metrics | a32ca9426973b34d0d7a9092a9c608fae6d7b0ff | [
"MIT"
] | 2 | 2021-11-11T08:43:19.000Z | 2021-11-30T09:11:31.000Z | synapse-prometheus-connector/src/main.py | microsoft/azure-synapse-spark-metrics | a32ca9426973b34d0d7a9092a9c608fae6d7b0ff | [
"MIT"
] | 4 | 2021-09-13T12:34:56.000Z | 2022-02-15T03:57:13.000Z | # coding=utf-8
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import json
import os
import signal
import time
import traceback
import requests
import access_token
import config
import metrics
import model
import spark_pools
def write_string_to_path(path, filename, content):
_path = os.path.join(path, filename)
os.makedirs(path, exist_ok=True)
with open(_path, 'w', encoding='utf-8') as f:
f.write(content)
def generate_spark_application_scrape_configs(application_list, workspace_name, synapse_host, api_version):
livy_path_template = f'/livyApi/versions/{api_version}' + '/sparkpools/{spark_pool_name}/sessions/{livy_id}/applications/{application_id}'
metrics_paths = [
'/metrics/executors/prometheus',
]
static_configs = []
for app in application_list:
livy_path = livy_path_template.format(spark_pool_name=app.spark_pool_name, livy_id=app.livy_id, application_id=app.spark_application_id)
for metrics_path in metrics_paths:
static_configs.append(model.PrometheusStaticConfig(
targets=[ synapse_host ],
labels={
'synapse_api_version': str(api_version),
'workspace_name': str(workspace_name),
'spark_pool_name': str(app.spark_pool_name),
'livy_id': str(app.livy_id),
'application_id': str(app.spark_application_id),
'name': str(app.name),
'__metrics_path__': str(livy_path + metrics_path),
'__param_format': 'html',
'__scheme__': 'https',
}
))
return static_configs
def get_spark_applications(synapse_host, synapse_api_version, bearer_token):
path = '/monitoring/workloadTypes/spark/applications'
url = f'https://{synapse_host}{path}'
params = {
'api-version': synapse_api_version,
'skip': 0,
'filter': "(state eq 'submitting') or (state eq 'inprogress')",
}
headers = {
'Authorization': f'Bearer {bearer_token}'
}
response = requests.get(url, params=params, headers=headers, timeout=15)
if response.status_code == 200:
apps_info = response.json()
applications = apps_info.get('sparkJobs')
application_list = []
if applications:
for _app in applications:
app = model.spark_application_from_dict(_app)
if not app.spark_application_id:
continue
application_list.append(app)
return application_list
print(response.json())
response.raise_for_status()
def token_refresh_by_workspace(workspace_config, workspace_context):
if not workspace_context or time.time() - workspace_context.get('token_refresh_time', 0) >= workspace_config.token_refresh_interval_sec:
metrics.token_refresh_count.labels(workspace_name=workspace_config.workspace_name).inc()
try:
print('refreshing token...')
bearer_token = access_token.get_access_token(
workspace_config.service_principal_name,
workspace_config.service_principal_password,
workspace_config.tenant_id,
workspace_config.resource_uri)
workspace_context['token_refresh_time'] = int(time.time())
workspace_context['bearer_token'] = bearer_token
print('token refreshed.')
metrics.token_refresh_last_time.labels(workspace_name=workspace_config.workspace_name).set(int(time.time()))
except:
metrics.token_refresh_failed_count.labels(workspace_name=workspace_config.workspace_name).inc()
traceback.print_exc()
def spark_application_discovery_by_workspace(workspace_config, workspace_context):
if time.time() - workspace_context.get('application_discovery_time', 0) >= workspace_config.spark_application_discovery_interval_sec:
metrics.application_discovery_count.labels(workspace_name=workspace_config.workspace_name).inc()
try:
print('spark application discovery...')
bearer_token = workspace_context.get('bearer_token')
if not bearer_token:
return
synapse_host = workspace_config.synapse_host()
synapse_api_version = workspace_config.synapse_api_version
workspace_name = workspace_config.workspace_name
with metrics.application_discovery_duration_histogram.labels(workspace_name).time():
application_list = get_spark_applications(synapse_host, synapse_api_version, bearer_token)
workspace_scrape_configs = generate_spark_application_scrape_configs(application_list, workspace_name, synapse_host, synapse_api_version)
if workspace_config.service_discovery_output_folder:
folder = os.path.join(workspace_config.service_discovery_output_folder, f'workspace/{workspace_name}/')
write_string_to_path(folder, 'bearer_token', bearer_token)
write_string_to_path(folder, 'application_discovery.json', model.to_json(workspace_scrape_configs))
workspace_context['workspace_scrape_configs'] = workspace_scrape_configs
workspace_context['application_list'] = application_list
workspace_context['application_discovery_time'] = int(time.time())
print(f'spark application discovery, found targets: {len(application_list)}.')
# spark pool metrics
spark_pool_applications = {}
for app in application_list:
spark_pool_applications.setdefault(app.spark_pool_name, 0)
spark_pool_applications[app.spark_pool_name] += 1
print(f'{app.spark_pool_name}/sessions/{app.livy_id}/applications/{app.spark_application_id}\tstate:{app.state}')
for spark_pool_name, application_count in spark_pool_applications.items():
metrics.application_discovery_target.labels(workspace_name=workspace_name, spark_pool_name=spark_pool_name).set(application_count)
# spark application metrics
metrics.application_info._metrics = {}
metrics.application_submit_time._metrics = {}
metrics.application_queue_duration._metrics = {}
metrics.application_running_duration._metrics = {}
for app in application_list:
app_base_labels = dict(workspace_name=workspace_name, spark_pool_name=app.spark_pool_name, name=app.name,
application_id=app.spark_application_id, livy_id=app.livy_id)
metrics.application_info.labels(subscription_id=workspace_config.subscription_id,
resource_group=workspace_config.resource_group,
tenant_id=workspace_config.tenant_id,
**app_base_labels).set(1)
metrics.application_submit_time.labels(**app_base_labels).set(app.submit_time_seconds)
metrics.application_queue_duration.labels(**app_base_labels).set(app.queued_duration_seconds)
metrics.application_running_duration.labels(**app_base_labels).set(app.running_duration_seconds)
except:
metrics.application_discovery_failed_count.labels(workspace_name=workspace_config.workspace_name).inc()
traceback.print_exc()
def spark_pool_metrics_by_workspace(workspace_config, workspace_context):
if not workspace_config.enable_spark_pools_metadata_metrics:
return
if not workspace_context or time.time() - workspace_context.get('spark_pool_metrics_token_refresh_time', 0) >= workspace_config.token_refresh_interval_sec:
try:
print('refreshing token for spark pool metrics...')
bearer_token = access_token.get_access_token(
workspace_config.service_principal_name,
workspace_config.service_principal_password,
workspace_config.tenant_id,
workspace_config.azure_management_resource_uri)
workspace_context['spark_pool_metrics_token_refresh_time'] = int(time.time())
workspace_context['spark_pool_metrics_bearer_token'] = bearer_token
print('token refreshed for spark pool metrics.')
except:
traceback.print_exc()
bearer_token = workspace_context.get('spark_pool_metrics_bearer_token')
if bearer_token and time.time() - workspace_context.get('spark_pool_metrics_time', 0) >= 300:
workspace_context['spark_pool_metrics_time'] = int(time.time())
try:
spark_pools_info = spark_pools.get_spark_pools(
workspace_config.subscription_id,
workspace_config.resource_group,
workspace_config.workspace_name,
bearer_token)
for sp in spark_pools_info:
metrics.spark_pool_info.labels(
workspace_name=workspace_config.workspace_name,
spark_pool_name=sp.name,
location=sp.location,
spark_version=sp.spark_version,
node_count=str(sp.node_count),
node_size=str(sp.node_size),
provisioning_state=str(sp.provisioning_state),
auto_scale_enabled=str(sp.auto_scale_enabled),
node_cpu_cores=str(sp.node_cpu_cores),
node_memory_size=str(sp.node_memory_size),
).set(1)
except:
traceback.print_exc()
class GracefulShutdown:
shutdown = False
def __init__(self):
signal.signal(signal.SIGINT, self.set_shutdown)
signal.signal(signal.SIGTERM, self.set_shutdown)
def set_shutdown(self, signum, frame):
self.shutdown = True
def main():
graceful_shutdown = GracefulShutdown()
cfg = config.read_config(filename='config/config.yaml')
print('started, config loaded.')
workspace_contexts = {}
global_context = {}
while not graceful_shutdown.shutdown:
for workspace_config in cfg.workspaces:
workspace_context = workspace_contexts.setdefault(workspace_config.workspace_name, {})
try:
token_refresh_by_workspace(workspace_config, workspace_context)
spark_application_discovery_by_workspace(workspace_config, workspace_context)
spark_pool_metrics_by_workspace(workspace_config, workspace_context)
except:
traceback.print_exc()
time.sleep(1)
if __name__ == "__main__":
main()
| 46.508621 | 159 | 0.668397 | 266 | 0.024652 | 0 | 0 | 0 | 0 | 0 | 0 | 1,459 | 0.135218 |
0d7c6c0d4a76f2eeb76fb63de618f9a0da93b8d7 | 5,465 | py | Python | stanCode_projects/pedestrian_removing/stanCodoshop.py | shihjames/sc-projects | 3c09ae12489edb2ba55335ea192a8206e9a45e84 | [
"MIT"
] | null | null | null | stanCode_projects/pedestrian_removing/stanCodoshop.py | shihjames/sc-projects | 3c09ae12489edb2ba55335ea192a8206e9a45e84 | [
"MIT"
] | null | null | null | stanCode_projects/pedestrian_removing/stanCodoshop.py | shihjames/sc-projects | 3c09ae12489edb2ba55335ea192a8206e9a45e84 | [
"MIT"
] | null | null | null | """
File: stanCodoshop.py
Name: James Shih
----------------------------------------------
SC101_Assignment3
Adapted from Nick Parlante's
Ghost assignment by Jerry Liao.
-----------------------------------------------
Remove people or abnormal objects in a certain photo.
"""
import os
import sys
import time
from simpleimage import SimpleImage
def get_pixel_dist(pixel, red, green, blue):
"""
Returns the color distance between pixel and mean RGB value
Input:
pixel (Pixel): pixel with RGB values to be compared
red (int): average red value across all images
green (int): average green value across all images
blue (int): average blue value across all images
Returns:
dist (int): color distance between red, green, and blue pixel values
"""
dist = (((red - pixel.red) ** 2) + ((green - pixel.green) ** 2) + ((blue - pixel.blue) ** 2)) ** (1/2)
return dist
def get_average(pixels):
"""
Given a list of pixels, finds the average red, blue, and green values
Input:
pixels (List[Pixel]): list of pixels to be averaged
Returns:
rgb (List[int]): list of average red, green, blue values across pixels respectively
Assumes you are returning in the order: [red, green, blue]
"""
# Create three variables to add up all pixel values.
red = 0
green = 0
blue = 0
for pixel in pixels:
red += pixel.red
green += pixel.green
blue += pixel.blue
return [red // len(pixels), green // len(pixels), blue // len(pixels)]
def get_best_pixel(pixels):
"""
Given a list of pixels, returns the pixel with the smallest
distance from the average red, green, and blue values across all pixels.
Input:
pixels (List[Pixel]): list of pixels to be averaged and compared
Returns:
best (Pixel): pixel closest to RGB averages
"""
red_avg = get_average(pixels)[0]
green_avg = get_average(pixels)[1]
blue_avg = get_average(pixels)[2]
dist = float('inf')
# Variable 'index' represents the index of pixel in list pixels.
best = get_pixel_dist(pixels[0], red_avg, green_avg, blue_avg)
for pixel in pixels:
# Set condition to renew variables.
if get_pixel_dist(pixel, red_avg, green_avg, blue_avg) < dist:
# Renew value of dist.
dist = get_pixel_dist(pixel, red_avg, green_avg, blue_avg)
# Renew value of index.
best = pixel
return best
# Method 2 #
# dist = []
# for i in range(len(pixels)):
# dist.append(get_pixel_dist(pixels[i], red_avg, green_avg, blue_avg))
# index = dist.index(min(dist))
# best = pixels[index]
# return best
# End Method 2 #
def solve(images):
"""
Given a list of image objects, compute and display a Ghost solution image
based on these images. There will be at least 3 images and they will all
be the same size.
Input:
images (List[SimpleImage]): list of images to be processed
"""
start = time.time()
width = images[0].width
height = images[0].height
result = SimpleImage.blank(width, height)
# Scan through all pixels.
for x in range(width):
for y in range(height):
# Create a list of pixels for the function 'get_best_pixel'.
pixels = []
for image in images:
pixels.append(image.get_pixel(x, y))
# Best pixel of a certain pixel in the image.
best = get_best_pixel(pixels)
# Each pixel of the result image is set as best
result.set_pixel(x, y, best)
end = time.time()
print(end-start)
# Method 2 #
# for x in range(width):
# for y in range(height):
# pixels = []
# result_pix = result.get_pixel(x, y)
# for image in images:
# pixels.append(image.get_pixel(x, y))
# best = get_best_pixel(pixels)
# result_pix.red = best.red
# result_pix.green = best.green
# result_pix.blue = best.blue
# End Method 2 #
print("Displaying image!")
result.show()
def jpgs_in_dir(direc):
"""
(provided, DO NOT MODIFY)
Given the name of a directory, returns a list of the .jpg filenames
within it.
Input:
dir (string): name of directory
Returns:
filenames(List[string]): names of jpg files in directory
"""
filenames = []
for filename in os.listdir(direc):
if filename.endswith('.jpg'):
filenames.append(os.path.join(direc, filename))
return filenames
def load_images(direc):
"""
(provided, DO NOT MODIFY)
Given a directory name, reads all the .jpg files within it into memory and
returns them in a list. Prints the filenames out as it goes.
Input:
dir (string): name of directory
Returns:
images (List[SimpleImages]): list of images in directory
"""
images = []
jpgs = jpgs_in_dir(direc)
for filename in jpgs:
print("Loading", filename)
image = SimpleImage(filename)
images.append(image)
return images
def main():
# (provided, DO NOT MODIFY)
args = sys.argv[1:]
# We just take 1 argument, the folder containing all the images.
# The load_images() capability is provided above.
images = load_images(args[0])
solve(images)
if __name__ == '__main__':
main()
| 29.224599 | 106 | 0.6086 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,304 | 0.604575 |
0d7c9dd39ea4011bea154b13be9de5e46cbc2b5f | 1,254 | py | Python | src/search/views/author.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 18 | 2021-05-20T13:20:16.000Z | 2022-02-11T02:40:18.000Z | src/search/views/author.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 109 | 2021-05-21T20:14:23.000Z | 2022-03-31T20:56:10.000Z | src/search/views/author.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 4 | 2021-05-17T13:47:53.000Z | 2022-02-12T10:48:21.000Z | from rest_framework import viewsets
from elasticsearch_dsl import Search
from elasticsearch_dsl.connections import connections
from search.filters import ElasticsearchFuzzyFilter
from search.documents import AuthorDocument
from search.serializers import AuthorDocumentSerializer
from utils.permissions import ReadOnly
class AuthorDocumentView(viewsets.ReadOnlyModelViewSet):
serializer_class = AuthorDocumentSerializer
document = AuthorDocument
permission_classes = [ReadOnly]
filter_backends = [ElasticsearchFuzzyFilter]
search_fields = ['first_name', 'last_name']
def __init__(self, *args, **kwargs):
assert self.document is not None
self.client = connections.get_connection(
self.document._get_using()
)
self.index = self.document._index._name
self.mapping = self.document._doc_type.mapping.properties.name
self.search = Search(
using=self.client,
index=self.index,
doc_type=self.document._doc_type.name
)
super(AuthorDocumentView, self).__init__(*args, **kwargs)
def get_queryset(self):
queryset = self.search.query()
queryset.model = self.document.Django.model
return queryset
| 32.153846 | 70 | 0.720893 | 932 | 0.743222 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.018341 |
0d7e76df168940114e6ba8aba86bf80b7241eafe | 4,329 | py | Python | MPNet/data_loader.py | sbaktha/MPNet | 7780a00bb5bca2fc5fdfcc5d50b8dcc7c0120b20 | [
"MIT"
] | null | null | null | MPNet/data_loader.py | sbaktha/MPNet | 7780a00bb5bca2fc5fdfcc5d50b8dcc7c0120b20 | [
"MIT"
] | null | null | null | MPNet/data_loader.py | sbaktha/MPNet | 7780a00bb5bca2fc5fdfcc5d50b8dcc7c0120b20 | [
"MIT"
] | null | null | null | import torch
import torch.utils.data as data
import os
import pickle
import numpy as np
import nltk
from PIL import Image
import os.path
import random
from torch.autograd import Variable
import torch.nn as nn
import math
# Environment Encoder
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.encoder = nn.Sequential(nn.Linear(2800, 512),nn.PReLU(),nn.Linear(512, 256),nn.PReLU(),nn.Linear(256, 128),nn.PReLU(),nn.Linear(128, 28))
def forward(self, x):
x = self.encoder(x)
return x
#N=number of environments; NP=Number of Paths
def load_dataset(N=100,NP=4000):
Q = Encoder()
Q.load_state_dict(torch.load('../models/cae_encoder.pkl'))
if torch.cuda.is_available():
Q.cuda()
obs_rep=np.zeros((N,28),dtype=np.float32)
for i in range(0,N):
#load obstacle point cloud
temp=np.fromfile('../../dataset/obs_cloud/obc'+str(i)+'.dat')
temp=temp.reshape(len(temp)/2,2)
obstacles=np.zeros((1,2800),dtype=np.float32)
obstacles[0]=temp.flatten()
inp=torch.from_numpy(obstacles)
inp=Variable(inp).cuda()
output=Q(inp)
output=output.data.cpu()
obs_rep[i]=output.numpy()
## calculating length of the longest trajectory
max_length=0
path_lengths=np.zeros((N,NP),dtype=np.int8)
for i in range(0,N):
for j in range(0,NP):
fname='../../dataset/e'+str(i)+'/path'+str(j)+'.dat'
if os.path.isfile(fname):
path=np.fromfile(fname)
path=path.reshape(len(path)/2,2)
path_lengths[i][j]=len(path)
if len(path)> max_length:
max_length=len(path)
paths=np.zeros((N,NP,max_length,2), dtype=np.float32) ## padded paths
for i in range(0,N):
for j in range(0,NP):
fname='../../dataset/e'+str(i)+'/path'+str(j)+'.dat'
if os.path.isfile(fname):
path=np.fromfile(fname)
path=path.reshape(len(path)/2,2)
for k in range(0,len(path)):
paths[i][j][k]=path[k]
dataset=[]
targets=[]
for i in range(0,N):
for j in range(0,NP):
if path_lengths[i][j]>0:
for m in range(0, path_lengths[i][j]-1):
data=np.zeros(32,dtype=np.float32)
for k in range(0,28):
data[k]=obs_rep[i][k]
data[28]=paths[i][j][m][0]
data[29]=paths[i][j][m][1]
data[30]=paths[i][j][path_lengths[i][j]-1][0]
data[31]=paths[i][j][path_lengths[i][j]-1][1]
targets.append(paths[i][j][m+1])
dataset.append(data)
data=zip(dataset,targets)
random.shuffle(data)
dataset,targets=zip(*data)
return np.asarray(dataset),np.asarray(targets)
#N=number of environments; NP=Number of Paths; s=starting environment no.; sp=starting_path_no
#Unseen_environments==> N=10, NP=2000,s=100, sp=0
#seen_environments==> N=100, NP=200,s=0, sp=4000
def load_test_dataset(N=100,NP=200, s=0,sp=4000):
obc=np.zeros((N,7,2),dtype=np.float32)
temp=np.fromfile('../../dataset/obs.dat')
obs=temp.reshape(len(temp)/2,2)
temp=np.fromfile('../../dataset/obs_perm2.dat',np.int32)
perm=temp.reshape(77520,7)
## loading obstacles
for i in range(0,N):
for j in range(0,7):
for k in range(0,2):
obc[i][j][k]=obs[perm[i+s][j]][k]
Q = Encoder()
Q.load_state_dict(torch.load('../models/cae_encoder.pkl'))
if torch.cuda.is_available():
Q.cuda()
obs_rep=np.zeros((N,28),dtype=np.float32)
k=0
for i in range(s,s+N):
temp=np.fromfile('../../dataset/obs_cloud/obc'+str(i)+'.dat')
temp=temp.reshape(len(temp)/2,2)
obstacles=np.zeros((1,2800),dtype=np.float32)
obstacles[0]=temp.flatten()
inp=torch.from_numpy(obstacles)
inp=Variable(inp).cuda()
output=Q(inp)
output=output.data.cpu()
obs_rep[k]=output.numpy()
k=k+1
## calculating length of the longest trajectory
max_length=0
path_lengths=np.zeros((N,NP),dtype=np.int8)
for i in range(0,N):
for j in range(0,NP):
fname='../../dataset/e'+str(i+s)+'/path'+str(j+sp)+'.dat'
if os.path.isfile(fname):
path=np.fromfile(fname)
path=path.reshape(len(path)/2,2)
path_lengths[i][j]=len(path)
if len(path)> max_length:
max_length=len(path)
paths=np.zeros((N,NP,max_length,2), dtype=np.float32) ## padded paths
for i in range(0,N):
for j in range(0,NP):
fname='../../dataset/e'+str(i+s)+'/path'+str(j+sp)+'.dat'
if os.path.isfile(fname):
path=np.fromfile(fname)
path=path.reshape(len(path)/2,2)
for k in range(0,len(path)):
paths[i][j][k]=path[k]
return obc,obs_rep,paths,path_lengths
| 26.078313 | 144 | 0.662509 | 282 | 0.065142 | 0 | 0 | 0 | 0 | 0 | 0 | 723 | 0.167013 |
0d7f5efa7e68562621936b11080cd338789bdad4 | 1,584 | py | Python | qclib/backend/state.py | dylanljones/qclib | 91e63b642f57fee6eff0b3ff1d696880a66509c3 | [
"MIT"
] | 2 | 2021-10-31T11:14:51.000Z | 2021-11-17T13:39:50.000Z | qclib/backend/state.py | dylanljones/qclib | 91e63b642f57fee6eff0b3ff1d696880a66509c3 | [
"MIT"
] | null | null | null | qclib/backend/state.py | dylanljones/qclib | 91e63b642f57fee6eff0b3ff1d696880a66509c3 | [
"MIT"
] | 1 | 2021-10-31T11:15:00.000Z | 2021-10-31T11:15:00.000Z | # coding: utf-8
#
# This code is part of qclib.
#
# Copyright (c) 2021, Dylan Jones
import numpy as np
from ..math import apply_statevec, apply_density, density_matrix
from .measure import measure_qubit, measure_qubit_rho
class DensityMatrix:
def __init__(self, mat):
self._data = np.asarray(mat)
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, item):
return self._data[item]
def __str__(self):
return str(self._data)
def copy(self):
return self.__class__(self._data.copy())
def apply(self, operator):
self._data = apply_density(self._data, operator)
def measure(self, qubit, eigvals=None, eigvecs=None):
res, self._data = measure_qubit_rho(self._data, qubit, eigvals, eigvecs)
return res
class StateVector:
def __init__(self, vec):
self._data = np.asarray(vec)
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, item):
return self._data[item]
def __str__(self):
return str(self._data)
def copy(self):
return self.__class__(self._data.copy())
def apply(self, operator):
self._data = apply_statevec(self._data, operator)
def measure(self, qubit, eigvals=None, eigvecs=None):
res, self._data = measure_qubit(self._data, qubit, eigvals, eigvecs)
return res
def to_density(self):
return DensityMatrix(density_matrix(self._data))
| 22.956522 | 80 | 0.65404 | 1,355 | 0.855429 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.049874 |
0d7fab914540aa7a0c18cf59c2ed8e272d40f98a | 3,981 | py | Python | Upwelling_project_noCFM.py | gmarmin10/Theoretical_Coastal_Model | c045fd16b44ec4fa3e1a641a323f45b2daeef433 | [
"MIT"
] | null | null | null | Upwelling_project_noCFM.py | gmarmin10/Theoretical_Coastal_Model | c045fd16b44ec4fa3e1a641a323f45b2daeef433 | [
"MIT"
] | null | null | null | Upwelling_project_noCFM.py | gmarmin10/Theoretical_Coastal_Model | c045fd16b44ec4fa3e1a641a323f45b2daeef433 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# #### Modeling the elemental stoichiometry of phytoplankton and surrounding surface waters in and upwelling or estuarine system
# >Steps to complete project:
# >1. Translate matlab physical model into python
# >2. Substitute Dynamic CFM into model for eco component
# >3. Analyze the results
#
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math as m
from math import pi
import time
import pylab as lab
# In[2]:
#create the time grid
sperd=60*60*24 #seconds per day
spery=365*sperd #seconds per year
nyr=1 #number of years to simulate. This can be chagned by the user
T=spery*nyr #length of time in seconds to simulate
dt=spery/720 #time step
t=np.arange(0,T+dt,dt) #create a time array
Nt=T/dt+1 #number of time grid points
Nt=int(Nt)
# In[3]:
#create the spatial grid
Lx=5e5 #length in meters
dx=1e3 #grid spacing
Nx=(Lx/dx)+1 #number of grid points
Nx=int(Nx)
xx=np.arange(0,Lx+dx,dx) #create a space array
# In[4]:
#wind forcing
U0=0.1 #velocity (m/s) subject to change
# In[5]:
#initial conditions of nutrient variables NO3 (Nnut) and PO4 (Pnut)
Pnut=2*np.ones(Nx) #creating a ones array the size of the number of spatial grid points
Pnut_i=Pnut[0] #initial value of phosphorus at the coastal boundary
Rup_Po=10*Pnut_i/spery #baseline P uptake rate (will be changed with CFM-Phyto)
Nnut=Pnut*15 #assume NO3 concentration higher than PO4. Change based on field observations
Nnut_i=Pnut_i*15 #intial value of NO3 available at the coastal boundary
Rup_No=10*Nnut_i/spery #baseline N uptake rate (will be changed with CFM-Phyto)
# In[6]:
#initial condition of biomass variables- to be replaced with CFM later
Pbio=0.01*Pnut
Pbio_i=0.01*Pnut_i #phytoplankton P at coast (boundary condition)
Nbio=0.01*Nnut
Nbio_i=0.01*Nnut_i #phytoplankton N at coast (boundary condition)
print(np.size(Nbio))
# In[7]:
#initial biological parameters
Kp=0.1 #half-saturation constant for Pnut
Kn=1 #half-saturation constant for Nnut
mu= 1/sperd #growth rate per sec
phi=0.5 #fraction of uptake remineralized locally
Snp=16 #redfield N:P ratio of phytoplankton
m2=mu*0.2 #quadratic mortality
# In[8]:
#
Period=1 #period of oscillation in forcing (velocity) (yr)
w=(2*pi)/Period #frequency of oscillation
A0=0.5 #amplitude of oscillation
nn=0 #year counter
# In[9]:
it_Nx=np.arange(0,Nx-1,1)
it_Nt=np.arange(0,Nt+1,1)
f=[]
Ua=[]
for n in it_Nt:
#vary the circulation rates
for y in t:
f=A0*(m.sin(w*y/spery))
#fn.append(f)
U0_array=np.full_like(f,U0)
Ua=U0*f
U=U0+Ua
#calculate the biological rates-to be replaced by CFM
RgrowN=mu*Nbio*(Nnut/(Nnut+Kn))
RmortN=m2*Nbio**2
RbioN=RgrowN-RmortN
RnutN=-RgrowN+phi*RmortN
RbioP=RbioN/Snp
RnutP=RnutN/Snp
#update the distribution: Advection scheme
for i in it_Nx:
Pnut[i+1]=((dt/dx)*U*Pnut[i]+Pnut[i+1]+RnutP[i]*dt)/(1+dt/dx*U)
Nnut[i+1]=((dt/dx)*U*Nnut[i]+Nnut[i+1]+RnutN[i]*dt)/(1+dt/dx*U)
Pbio[i+1]=((dt/dx)*U*Pbio[i]+Pbio[i+1]+RbioP[i]*dt)/(1+dt/dx*U)
Nbio[i+1]=((dt/dx)*U*Nbio[i]+Nbio[i+1]+RbioN[i]*dt)/(1+dt/dx*U)
print((Pnut))
# In[33]:
#some plotting
ax=plt.figure(1)
plt.subplot(2,2,1)
x=np.arange(0,Lx+dx,dx)
x=x*1e-3
plt.plot(x,Pnut,marker='o',color='orange')
plt.xlabel('horizontal distance (km)')
plt.ylabel('PO4 (uM)')
plt.subplot(2,2,2)
plt.plot(x,Nnut,marker='o',color='green')
plt.xlabel('horizontal distance (km)')
plt.ylabel('NO3 (uM)')
plt.subplot(2,2,3)
plt.plot(x,Pbio,marker='o',color='red')
plt.xlabel('horizontal distance (km)')
plt.ylabel('Phyto P (uM)')
plt.subplot(2,2,4)
plt.plot(x,Nbio,marker='o',color='blue')
plt.xlabel('horizontal distance (km)')
plt.ylabel('Phyto N (uM)')
plt.tight_layout()
plt.savefig('Nutrient_Concentrations.png')
plt.show()
# In[ ]:
| 23.838323 | 128 | 0.678222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,057 | 0.516704 |
0d802fca09e29a84af3006203d6210a4292b4238 | 2,534 | py | Python | run.py | UNIFUZZ/getcvss | 360de42ef09d7e21ef7c539d48ac083d10e8e215 | [
"MIT"
] | 2 | 2020-08-16T12:06:17.000Z | 2021-01-05T05:35:54.000Z | run.py | UNIFUZZ/getcvss | 360de42ef09d7e21ef7c539d48ac083d10e8e215 | [
"MIT"
] | null | null | null | run.py | UNIFUZZ/getcvss | 360de42ef09d7e21ef7c539d48ac083d10e8e215 | [
"MIT"
] | 2 | 2020-08-16T11:07:29.000Z | 2022-01-04T02:18:50.000Z | import requests
sess = requests.session()
import gzip
import json
import time
import os
def downloadyear(year):
print("fetching year", year)
url = "https://nvd.nist.gov/feeds/json/cve/1.1/nvdcve-1.1-{year}.json.gz".format(year=year)
req = sess.get(url, stream=True)
return gzip.open(req.raw).read().decode()
def getdata(year):
# {"id": ["CWE1/CWE2", "CVSSV3 score", "CVSSV2 score", "vector V3", "vector V2"]}
# example: "CVE-2011-1474": ["CWE-400/CWE-835", 5.5, 4.9, "CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H", "AV:L/AC:L/Au:N/C:N/I:N/A:C"]
data = json.loads(downloadyear(year))
res = {}
for item in data["CVE_Items"]:
id = item["cve"]["CVE_data_meta"]["ID"]
cwes = [i["value"] for i in item["cve"]["problemtype"]["problemtype_data"][0]["description"]]
cwe = "/".join(cwes)
try:
cvssv3_score, vector_v3 = item["impact"]["baseMetricV3"]["cvssV3"]["baseScore"], item["impact"]["baseMetricV3"]["cvssV3"]["vectorString"]
except:
cvssv3_score, vector_v3 = -1, ""
try:
cvssv2_score, vector_v2 = item["impact"]["baseMetricV2"]["cvssV2"]["baseScore"], item["impact"]["baseMetricV2"]["cvssV2"]["vectorString"]
except:
cvssv2_score, vector_v2 = -1, ""
date = item["publishedDate"].split("T")[0]
res[id] = [cwe, cvssv3_score, cvssv2_score, vector_v3, vector_v2, date]
return res
def fullupdate():
res = {}
currentyear = int(time.strftime("%Y"))
for year in range(2002, currentyear+1):
res.update(getdata(year))
res.update(getdata("recent"))
return res
def writetofile(filepath, data):
d = sorted(data.items(), key=lambda i:(int(i[0].split("-")[1]),int(i[0].split("-")[2])))
with open(filepath, "w") as fp:
for id, itemdata in d:
fp.write(",".join([str(i) for i in [id]+itemdata])+"\n")
def readfromfile(filepath):
data = {}
for _line in open(filepath):
id, cwe, cvssv3_score, cvssv2_score, vector_v3, vector_v2 = _line.strip().split(",")
data[id] = [cwe, float(cvssv3_score), float(cvssv2_score), vector_v3, vector_v2]
return data
if __name__ == "__main__":
#print(getdata("recent"))
#print(os.path.getmtime("/tmp/cvssdata/cvss.csv"), time.time()-os.path.getmtime("/tmp/cvssdata/cvss.csv"))
data = fullupdate()
writetofile("/tmp/cvssdata/cvss.csv", data)
# TODO: add meta data comparation to avoid full update
| 40.870968 | 150 | 0.598658 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 839 | 0.331097 |
0d8131562321f737bfe94d6f5f98ee26c890434c | 2,974 | py | Python | src/enumerator.py | darkarnium/perimeterator | 8c694267d92ca1d28fc1494cd9394af34271ed39 | [
"MIT"
] | 56 | 2019-03-20T01:44:04.000Z | 2022-02-16T13:36:39.000Z | src/enumerator.py | darkarnium/perimeterator | 8c694267d92ca1d28fc1494cd9394af34271ed39 | [
"MIT"
] | 1 | 2020-07-08T20:30:23.000Z | 2020-11-07T15:41:25.000Z | src/enumerator.py | darkarnium/perimeterator | 8c694267d92ca1d28fc1494cd9394af34271ed39 | [
"MIT"
] | 9 | 2019-10-09T18:54:52.000Z | 2021-12-28T15:27:58.000Z | #!/usr/bin/env python3
''' Perimeterator Enumerator.
This wrapper is intended to allow for simplified AWS based deployment of the
Perimeterator enumerator. This allows for a cost effective method of
execution, as the Perimeterator poller component only needs to execute on a
defined schedule in order to detect changes.
'''
import os
import logging
import perimeterator
# TODO: This should likely be configurable.
MODULES = [
'rds',
'ec2',
'elb',
'elbv2',
'es',
]
def lambda_handler(event, context):
''' An AWS Lambda wrapper for the Perimeterator enumerator. '''
# Strip off any existing handlers that may have been installed by AWS.
logger = logging.getLogger()
for handler in logger.handlers:
logger.removeHandler(handler)
# Reconfigure the root logger the way we want it.
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(process)d - [%(levelname)s] %(message)s'
)
# Get the account id for the current AWS account.
account = perimeterator.helper.aws_account_id()
logger.info("Running in AWS account %s", account)
# Get configurable options from environment variables.
regions = os.getenv("ENUMERATOR_REGIONS", "us-west-2").split(",")
sqs_queue = os.getenv("ENUMERATOR_SQS_QUEUE", None)
logger.info("Configured results SQS queue is %s", sqs_queue)
logger.info(
"Configured regions for resource enumeration are %s",
", ".join(regions)
)
# Setup the SQS dispatcher for submission of addresses to scanners.
queue = perimeterator.dispatcher.sqs.Dispatcher(queue=sqs_queue)
# Process regions one at a time, enumerating addresses for all configured
# resources in the given region. Currently, it's not possible to only
# enumerate different resources types by region. Maybe later! :)
for region in regions:
logger.info("Attempting to enumerate resources in %s", region)
for module in MODULES:
logger.info("Attempting to enumerate %s resources", module)
try:
# Ensure a handler exists for this type of resource.
hndl = getattr(perimeterator.enumerator, module).Enumerator(
region=region
)
except AttributeError as err:
logger.error(
"Handler for %s resources not found, skipping: %s",
module,
err
)
continue
# Get all addresses and dispatch to SQS for processing.
logger.info(
"Submitting %s resources in %s for processing",
module,
region
)
queue.dispatch(account, hndl.get())
if __name__ == '__main__':
''' Allow the script to be invoked outside of Lambda. '''
lambda_handler(
dict(), # No real 'event' data.
dict() # No real 'context' data.
)
| 33.41573 | 77 | 0.631137 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,579 | 0.530935 |
0d819476af5932130a9089599a42a757e9fa6fab | 3,991 | py | Python | script/diffsel_script_utils.py | vlanore/COPTR | babe25070ddfa67dd04e0f96a143ce539cc27fbb | [
"CECILL-B"
] | null | null | null | script/diffsel_script_utils.py | vlanore/COPTR | babe25070ddfa67dd04e0f96a143ce539cc27fbb | [
"CECILL-B"
] | null | null | null | script/diffsel_script_utils.py | vlanore/COPTR | babe25070ddfa67dd04e0f96a143ce539cc27fbb | [
"CECILL-B"
] | null | null | null | # Copyright or Copr. Centre National de la Recherche Scientifique (CNRS) (2017/11/27)
# Contributors:
# - Vincent Lanore <vincent.lanore@gmail.com>
# This software is a computer program whose purpose is to provide small tools and scripts related to phylogeny and bayesian
# inference.
# This software is governed by the CeCILL-B license under French law and abiding by the rules of distribution of free software.
# You can use, modify and/ or redistribute the software under the terms of the CeCILL-B license as circulated by CEA, CNRS and
# INRIA at the following URL "http://www.cecill.info".
# As a counterpart to the access to the source code and rights to copy, modify and redistribute granted by the license, users
# are provided only with a limited warranty and the software's author, the holder of the economic rights, and the successive
# licensors have only limited liability.
# In this respect, the user's attention is drawn to the risks associated with loading, using, modifying and/or developing or
# reproducing the software by the user in light of its specific status of free software, that may mean that it is complicated
# to manipulate, and that also therefore means that it is reserved for developers and experienced professionals having in-depth
# computer knowledge. Users are therefore encouraged to load and test the software's suitability as regards their requirements
# in conditions enabling the security of their systems and/or data to be ensured and, more generally, to use and operate it in
# the same conditions as regards security.
# The fact that you are presently reading this means that you have had knowledge of the CeCILL-B license and that you accept
# its terms.
import sys
import random
# String handling functions
def strip(str):
if str[0]=='#':
return str[1:]
else:
return str.strip()
# Color-related functions
if sys.stdout.isatty():
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[34m'
OKGREEN = '\033[32m'
YELLOW = '\033[33m'
WARNING = '\033[93m'
CYAN = '\033[33m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
else:
class bcolors:
HEADER = ''
OKBLUE = ''
OKGREEN = ''
WARNING = ''
CYAN = ''
FAIL = ''
ENDC = ''
BOLD = ''
UNDERLINE = ''
def boldred(string):
return bcolors.FAIL+bcolors.BOLD+string+bcolors.ENDC
def red(string):
return bcolors.FAIL+string+bcolors.ENDC
def yellow(string):
return bcolors.YELLOW+string+bcolors.ENDC
def boldcyan(string):
return bcolors.CYAN+bcolors.BOLD+string+bcolors.ENDC
def param(myparam):
return bcolors.OKBLUE+str(myparam)+bcolors.ENDC
def data(myparam):
return bcolors.CYAN+str(myparam)+bcolors.ENDC
def step(string):
return bcolors.BOLD+bcolors.HEADER+string+bcolors.ENDC
def boldgreen(string):
return bcolors.BOLD+bcolors.OKGREEN+string+bcolors.ENDC
def green(string):
return bcolors.OKGREEN+string+bcolors.ENDC
def good(string):
return "-- ("+bcolors.OKGREEN+"Good"+bcolors.ENDC+") "+string
def bad(string):
return "-- ("+bcolors.FAIL+"Bad"+bcolors.ENDC+") "+string
def success(string):
return "-- ["+boldgreen("SUCCESS")+"] "+string
def failure(string):
return "-- ["+boldred("FAILURE")+"] "+string
def ask_input(string):
return "-- ["+boldcyan("INPUT")+"] "+str(string)
# Codon functions
bases = ["A", "C", "G", "T"]
def rand_codon():
return random.choice(bases)+random.choice(bases)+random.choice(bases)
def selected_codon():
aa1 = ["AAT", "AAC"]
return random.choice(aa1)
def mutate(codon, proba=100):
if random.randint(1,100) <= proba:
result = list(codon)
result[random.randint(0,2)] = random.choice(bases)
print("Decided to mutate codon "+codon+" to "+"".join(result)+" with probability "+str(proba))
return "".join(result)
else:
return codon
| 32.983471 | 127 | 0.687798 | 479 | 0.12002 | 0 | 0 | 0 | 0 | 0 | 0 | 2,033 | 0.509396 |
0d81c066a40ec6d9a8fc2da5c8835fbc0f207108 | 6,871 | py | Python | test/test_PointSource/test_point_source.py | guoxiaowhu/lenstronomy | dcdfc61ce5351ac94565228c822f1c94392c1ad6 | [
"MIT"
] | 1 | 2018-11-08T12:33:26.000Z | 2018-11-08T12:33:26.000Z | test/test_PointSource/test_point_source.py | guoxiaowhu/lenstronomy | dcdfc61ce5351ac94565228c822f1c94392c1ad6 | [
"MIT"
] | null | null | null | test/test_PointSource/test_point_source.py | guoxiaowhu/lenstronomy | dcdfc61ce5351ac94565228c822f1c94392c1ad6 | [
"MIT"
] | null | null | null | import pytest
import numpy as np
import numpy.testing as npt
from lenstronomy.PointSource.point_source import PointSource
from lenstronomy.LensModel.lens_model import LensModel
from lenstronomy.LensModel.Solver.lens_equation_solver import LensEquationSolver
import lenstronomy.Util.param_util as param_util
class TestPointSource(object):
def setup(self):
lensModel = LensModel(lens_model_list=['SPEP'])
solver = LensEquationSolver(lensModel=lensModel)
e1, e2 = param_util.phi_q2_ellipticity(0, 0.7)
self.kwargs_lens = [{'theta_E': 1., 'center_x': 0, 'center_y': 0, 'e1': e1, 'e2': e2, 'gamma': 2}]
self.sourcePos_x, self.sourcePos_y = 0.01, -0.01
self.x_pos, self.y_pos = solver.image_position_from_source(sourcePos_x=self.sourcePos_x,
sourcePos_y=self.sourcePos_y, kwargs_lens=self.kwargs_lens)
self.PointSource = PointSource(point_source_type_list=['LENSED_POSITION', 'UNLENSED', 'SOURCE_POSITION'],
lensModel=lensModel, fixed_magnification_list=[False]*4, additional_images_list=[False]*4)
self.kwargs_ps = [{'ra_image': self.x_pos, 'dec_image': self.y_pos, 'point_amp': np.ones_like(self.x_pos)},
{'ra_image': [1.], 'dec_image': [1.], 'point_amp': [10]},
{'ra_source': self.sourcePos_x, 'dec_source': self.sourcePos_y, 'point_amp': np.ones_like(self.x_pos)}, {}]
def test_image_position(self):
x_image_list, y_image_list = self.PointSource.image_position(kwargs_ps=self.kwargs_ps, kwargs_lens=self.kwargs_lens)
npt.assert_almost_equal(x_image_list[0][0], self.x_pos[0], decimal=8)
npt.assert_almost_equal(x_image_list[1], 1, decimal=8)
npt.assert_almost_equal(x_image_list[2][0], self.x_pos[0], decimal=8)
def test_source_position(self):
x_source_list, y_source_list = self.PointSource.source_position(kwargs_ps=self.kwargs_ps, kwargs_lens=self.kwargs_lens)
npt.assert_almost_equal(x_source_list[0], self.sourcePos_x, decimal=8)
npt.assert_almost_equal(x_source_list[1], 1, decimal=8)
npt.assert_almost_equal(x_source_list[2], self.sourcePos_x, decimal=8)
def test_num_basis(self):
num_basis = self.PointSource.num_basis(self.kwargs_ps, self.kwargs_lens)
assert num_basis == 9
def test_linear_response_set(self):
ra_pos, dec_pos, amp, n = self.PointSource.linear_response_set(self.kwargs_ps, kwargs_lens=self.kwargs_lens, with_amp=False, k=None)
num_basis = self.PointSource.num_basis(self.kwargs_ps, self.kwargs_lens)
assert n == num_basis
assert ra_pos[0][0] == self.x_pos[0]
def test_point_source_list(self):
ra_list, dec_list, amp_list = self.PointSource.point_source_list(self.kwargs_ps, self.kwargs_lens)
assert ra_list[0] == self.x_pos[0]
assert len(ra_list) == 9
def test_point_source_amplitude(self):
amp_list = self.PointSource.source_amplitude(self.kwargs_ps, self.kwargs_lens)
assert len(amp_list) == 3
def test_set_save_cache(self):
self.PointSource.set_save_cache(True)
assert self.PointSource._point_source_list[0]._save_cache == True
self.PointSource.set_save_cache(False)
assert self.PointSource._point_source_list[0]._save_cache == False
def test_update_lens_model(self):
lensModel = LensModel(lens_model_list=['SIS'])
self.PointSource.update_lens_model(lens_model_class=lensModel)
kwargs_lens = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}]
x_image_list, y_image_list = self.PointSource.image_position(kwargs_ps=self.kwargs_ps,
kwargs_lens=kwargs_lens)
npt.assert_almost_equal(x_image_list[0][0], -0.82654997748011705 , decimal=8)
class TestPointSource_fixed_mag(object):
def setup(self):
lensModel = LensModel(lens_model_list=['SPEP'])
solver = LensEquationSolver(lensModel=lensModel)
e1, e2 = param_util.phi_q2_ellipticity(0, 0.7)
self.kwargs_lens = [{'theta_E': 1., 'center_x': 0, 'center_y': 0, 'e1': e1, 'e2': e2, 'gamma': 2}]
self.sourcePos_x, self.sourcePos_y = 0.01, -0.01
self.x_pos, self.y_pos = solver.image_position_from_source(sourcePos_x=self.sourcePos_x,
sourcePos_y=self.sourcePos_y, kwargs_lens=self.kwargs_lens)
self.PointSource = PointSource(point_source_type_list=['LENSED_POSITION', 'UNLENSED', 'SOURCE_POSITION'],
lensModel=lensModel, fixed_magnification_list=[True]*4, additional_images_list=[False]*4)
self.kwargs_ps = [{'ra_image': self.x_pos, 'dec_image': self.y_pos, 'source_amp': 1},
{'ra_image': [1.], 'dec_image': [1.], 'point_amp': [10]},
{'ra_source': self.sourcePos_x, 'dec_source': self.sourcePos_y, 'source_amp': 1.}, {}]
def test_image_position(self):
x_image_list, y_image_list = self.PointSource.image_position(kwargs_ps=self.kwargs_ps, kwargs_lens=self.kwargs_lens)
npt.assert_almost_equal(x_image_list[0][0], self.x_pos[0], decimal=8)
npt.assert_almost_equal(x_image_list[1], 1, decimal=8)
npt.assert_almost_equal(x_image_list[2][0], self.x_pos[0], decimal=8)
def test_source_position(self):
x_source_list, y_source_list = self.PointSource.source_position(kwargs_ps=self.kwargs_ps, kwargs_lens=self.kwargs_lens)
npt.assert_almost_equal(x_source_list[0], self.sourcePos_x, decimal=8)
npt.assert_almost_equal(x_source_list[1], 1, decimal=8)
npt.assert_almost_equal(x_source_list[2], self.sourcePos_x, decimal=8)
def test_num_basis(self):
num_basis = self.PointSource.num_basis(self.kwargs_ps, self.kwargs_lens)
assert num_basis == 3
def test_linear_response_set(self):
ra_pos, dec_pos, amp, n = self.PointSource.linear_response_set(self.kwargs_ps, kwargs_lens=self.kwargs_lens, with_amp=False, k=None)
num_basis = self.PointSource.num_basis(self.kwargs_ps, self.kwargs_lens)
assert n == num_basis
assert ra_pos[0][0] == self.x_pos[0]
assert ra_pos[1][0] == 1
npt.assert_almost_equal(ra_pos[2][0], self.x_pos[0], decimal=8)
def test_point_source_list(self):
ra_list, dec_list, amp_list = self.PointSource.point_source_list(self.kwargs_ps, self.kwargs_lens)
assert ra_list[0] == self.x_pos[0]
assert len(ra_list) == 9
def test_check_image_positions(self):
bool = self.PointSource.check_image_positions(self.kwargs_ps, self.kwargs_lens, tolerance=0.001)
assert bool == True
if __name__ == '__main__':
pytest.main()
| 54.531746 | 140 | 0.679377 | 6,510 | 0.94746 | 0 | 0 | 0 | 0 | 0 | 0 | 430 | 0.062582 |
0d81f1dd86c97fac6a68af0051fa814935517691 | 3,493 | py | Python | invenio_rdm_pure/utils.py | utnapischtim/invenio-rdm-pure | 895addfb374dca640adc42ce68ab54ddfc8d412a | [
"MIT"
] | null | null | null | invenio_rdm_pure/utils.py | utnapischtim/invenio-rdm-pure | 895addfb374dca640adc42ce68ab54ddfc8d412a | [
"MIT"
] | 19 | 2020-10-20T09:38:09.000Z | 2021-04-01T09:13:59.000Z | invenio_rdm_pure/utils.py | utnapischtim/invenio-rdm-pure | 895addfb374dca640adc42ce68ab54ddfc8d412a | [
"MIT"
] | 2 | 2020-09-18T06:45:15.000Z | 2021-03-21T20:15:37.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2021 Technische Universität Graz
#
# invenio-rdm-pure is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Utility methods."""
import smtplib
from datetime import datetime
from os.path import dirname, isabs, isfile, join
from pathlib import Path
from typing import List
from flask import current_app
from flask_security.utils import hash_password
from invenio_db import db
def get_user_id(user_email: str, user_password: str):
"""Get the userId of the user.
In case the user doesn't exist yet,
create it with given credentials.
"""
datastore = current_app.extensions["security"].datastore
if datastore is not None:
user = datastore.get_user(user_email)
if not user:
user = datastore.create_user(
email=user_email,
password=hash_password(user_password),
active=True,
)
db.session.commit()
return user.id
def make_user_admin(self, id_or_email: str) -> None:
"""Gives the user with given id or email administrator rights."""
return None # FIXME: Method stub'd until auxiliary methods are implemented.
datastore = current_app.extensions["security"].datastore
if datastore is not None:
invenio_pure_user = datastore.get_user(
id_or_email
) # FIXME: Not implemented yet.
admin_role = datastore.find_role("admin") # FIXME: Not implemented yet.
datastore.add_role_to_user(invenio_pure_user, admin_role)
def load_file_as_string(path):
"""Open a file and return the content as UTF-8 encoded string."""
if not isabs(path):
path = join(dirname(__file__), path)
if not isfile(path):
return ""
with open(path, "rb") as fp:
input = fp.read()
return input.decode("utf-8")
def get_dates_in_span(
start: datetime.date, stop: datetime.date, step: int
) -> List[datetime.date]:
"""Returns an ascending list of dates with given step between the two endpoints of the span."""
dates = []
if start == stop:
return [start]
elif step == 0:
return []
elif step < 0:
if start < stop:
return []
else:
while start >= stop:
dates.append(start)
start += datetime.timedelta(step)
dates.reverse()
elif step > 0:
if stop < start:
return []
else:
while start <= stop:
dates.append(start)
start += datetime.timedelta(step)
return dates
def send_email(
uuid: str,
file_name: str,
email_sender: str,
email_sender_password: str,
email_receiver: str,
):
"""Send an email."""
email_smtp_server = "smtp.gmail.com"
email_smtp_port = 587
email_subject = "Delete Pure File"
email_message = (
"""Subject: """
+ email_subject
+ """Please remove from pure uuid {} the file {}."""
)
# create SMTP session
session = smtplib.SMTP(email_smtp_server, email_smtp_port)
# start TLS for security
session.starttls()
# Authentication
session.login(email_sender, email_sender_password)
# sending the mail
message = email_message.format(uuid, file_name)
session.sendmail(email_sender, email_receiver, message)
# terminating the session
session.quit()
| 28.398374 | 99 | 0.634412 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 971 | 0.277905 |
0d8360504869ccd639eced6db8d480f95abb8776 | 37 | py | Python | examples/project-sourcecode/c.py | wheatdog/guildai | 817cf179d0b6910d3d4fca522045a8139aef6c9e | [
"Apache-2.0"
] | 694 | 2018-11-30T01:06:30.000Z | 2022-03-31T14:46:26.000Z | examples/project-sourcecode/c.py | wheatdog/guildai | 817cf179d0b6910d3d4fca522045a8139aef6c9e | [
"Apache-2.0"
] | 323 | 2018-11-05T17:44:34.000Z | 2022-03-31T16:56:41.000Z | examples/project-sourcecode/c.py | wheatdog/guildai | 817cf179d0b6910d3d4fca522045a8139aef6c9e | [
"Apache-2.0"
] | 68 | 2019-04-01T04:24:47.000Z | 2022-02-24T17:22:04.000Z | from subproject import d
print("c")
| 9.25 | 24 | 0.72973 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.081081 |
0d836b79c1c35926c2be545a9cfa96aaad92778b | 1,992 | py | Python | cookiecutterassert/rules/run_script.py | yangzii0920/cookiecutterassert | d690bb06844821334e7b2b0e6361fb30556d718b | [
"Apache-2.0"
] | 3 | 2020-09-24T12:43:42.000Z | 2022-02-10T13:04:28.000Z | cookiecutterassert/rules/run_script.py | yangzii0920/cookiecutterassert | d690bb06844821334e7b2b0e6361fb30556d718b | [
"Apache-2.0"
] | 5 | 2020-11-05T22:04:07.000Z | 2021-07-07T15:45:38.000Z | cookiecutterassert/rules/run_script.py | cookiecutterassert/cooiecutterassert | 87a2b05c45c0cb30abfc11f944a6cb12a2863a09 | [
"Apache-2.0"
] | 4 | 2020-10-06T13:55:39.000Z | 2021-11-23T15:38:00.000Z | # Copyright 2020 Ford Motor Company
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import subprocess
from cookiecutterassert import messager
class RunScriptRule:
def __init__(self, options, testFolder, runFolder, script):
self.script = script
self.runFolder = runFolder
self.testFolder = testFolder
self.options = options
def execute(self, outputFolder):
workingDir = str(os.path.join(outputFolder, self.runFolder))
scriptprocess = subprocess.Popen(self.script, cwd = workingDir, shell=True)
scriptprocess.wait()
success = scriptprocess.returncode == 0
if (not success):
errorMessage = "assertion runScript {} {} failed. with non-zero return code [{}]".format(self.runFolder, self.script, scriptprocess.returncode)
messager.printError(errorMessage)
return success
def __eq__(self, obj):
return isinstance(obj, RunScriptRule) \
and obj.script == self.script \
and obj.runFolder == self.runFolder \
and obj.testFolder == self.testFolder \
and obj.options == self.options
def __ne__(self, obj):
return not self == obj
def __str__(self):
return "{0}: [testFolder={1}, runFolder={2}, script={3}, options={4}]".format(type(self).__name__, self.testFolder, self.runFolder, self.script, self.options)
def __repr__(self):
return self.__str__() | 31.125 | 166 | 0.674197 | 1,313 | 0.659137 | 0 | 0 | 0 | 0 | 0 | 0 | 706 | 0.354418 |
0d861b47d79b6a499c086d94f4c974f6d7dabf56 | 37,790 | py | Python | neutron/services/loadbalancer/plugin.py | CingHu/neutron-ustack | a1da17d0d63b3342a48c35da37984d6386ee1016 | [
"Apache-2.0"
] | null | null | null | neutron/services/loadbalancer/plugin.py | CingHu/neutron-ustack | a1da17d0d63b3342a48c35da37984d6386ee1016 | [
"Apache-2.0"
] | null | null | null | neutron/services/loadbalancer/plugin.py | CingHu/neutron-ustack | a1da17d0d63b3342a48c35da37984d6386ee1016 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2013 Radware LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Avishay Balderman, Radware
from neutron import manager
from neutron.api.v2 import attributes as attrs
from neutron.common import exceptions as n_exc
from neutron.common import rpc as n_rpc
from neutron import context as ncontext
from neutron.db.loadbalancer import loadbalancer_db as ldb
from neutron.db.loadbalancer import loadbalancer_dbv2 as ldbv2
from neutron.db.loadbalancer import models
from neutron.db import servicetype_db as st_db
from neutron.extensions import loadbalancer
from neutron.extensions import loadbalancerv2
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.loadbalancer import agent_scheduler
from neutron.services.loadbalancer import data_models
from neutron.services import provider_configuration as pconf
from neutron.services import service_base
from neutron.db import common_db_mixin as base_db
from neutron.services.loadbalancer import constants as lb_const
from neutron.agent.linux import utils
import contextlib
import tempfile
LOG = logging.getLogger(__name__)
class LoadBalancerPluginv2(loadbalancerv2.LoadBalancerPluginBaseV2,
agent_scheduler.LbaasAgentSchedulerDbMixin,
base_db.CommonDbMixin):
"""Implementation of the Neutron Loadbalancer Service Plugin.
This class manages the workflow of LBaaS request/response.
Most DB related works are implemented in class
loadbalancer_dbv2.LoadBalancerPluginDbv2.
"""
supported_extension_aliases = ["lbaasv2",
"lbaas_agent_scheduler",
"service-type"]
# lbaas agent notifiers to handle agent update operations;
# can be updated by plugin drivers while loading;
# will be extracted by neutron manager when loading service plugins;
agent_notifiers = {}
def __init__(self):
"""Initialization for the loadbalancer service plugin."""
self.db = ldbv2.LoadBalancerPluginDbv2()
self.service_type_manager = st_db.ServiceTypeManager.get_instance()
self._load_drivers()
self.start_periodic_agent_status_check()
#self.deploy_existing_instances();
@property
def _core_plugin(self):
return manager.NeutronManager.get_plugin()
def _load_drivers(self):
"""Loads plugin-drivers specified in configuration."""
self.drivers, self.default_provider = service_base.load_drivers(
constants.LOADBALANCERV2, self)
# we're at the point when extensions are not loaded yet
# so prevent policy from being loaded
self.ctx = ncontext.get_admin_context(load_admin_roles=False)
# stop service in case provider was removed, but resources were not
self._check_orphan_loadbalancer_associations(self.ctx, self.drivers.keys())
def _check_orphan_loadbalancer_associations(self, context, provider_names):
"""Checks remaining associations between loadbalancers and providers.
If admin has not undeployed resources with provider that was deleted
from configuration, neutron service is stopped. Admin must delete
resources prior to removing providers from configuration.
"""
loadbalancers = self.db.get_loadbalancers(context)
lost_providers = set(
[loadbalancer.provider.provider_name
for loadbalancer in loadbalancers
if ((loadbalancer.provider is not None and loadbalancer.provider.provider_name not in provider_names)
)])
# resources are left without provider - stop the service
if lost_providers:
msg = _("Delete associated load balancers before "
"removing providers %s") % list(lost_providers)
LOG.exception(msg)
raise SystemExit(1)
def reschedule_loadbalancer_instance(self, loadbalancer_id):
loadbalancer = self.db.get_loadbalancer(self.ctx, loadbalancer_id)
driver = self._get_driver_for_loadbalancer(
self.ctx, loadbalancer_id)
try:
self._call_driver_operation(
self.ctx, driver.load_balancer.reschedule,
loadbalancer)
except Exception as exc:
LOG.exception(exc)
LOG.error(_(" reschedule_loadbalancer_instance error"
" for loadbalancer '%(loadbalancer_id)s' ") %
{'loadbalancer_id': loadbalancer.id})
pass
def deploy_existing_instances(self):
loadbalancers = self.db.get_loadbalancers(self.ctx)
for loadbalancer in loadbalancers:
try:
driver = self.drivers[loadbalancer.provider.provider_name]
self._call_driver_operation(
self.ctx, driver.load_balancer.create, loadbalancer)
except:
LOG.error(_(" Deploy error for loadbalancer '%(loadbalancer_id)s' ") %
{'loadbalancer_id': loadbalancer.id})
# do not stop anything this is a minor error
pass
def _get_driver_for_provider(self, provider):
try:
return self.drivers[provider]
except KeyError:
# raise if not associated (should never be reached)
raise n_exc.Invalid(_("Error retrieving driver for provider %s") %
provider)
def get_driver_for_provider(self, provider):
return self._get_driver_for_provider(provider).device_driver
def _get_driver_for_loadbalancer(self, context, loadbalancer_id):
loadbalancer = self.db.get_loadbalancer(context, loadbalancer_id)
try:
return self.drivers[loadbalancer.provider.provider_name]
except KeyError:
raise n_exc.Invalid(
_("Error retrieving provider for load balancer. Possible "
"providers are %s.") % self.drivers.keys()
)
def _get_provider_name(self, entity):
if ('provider' in entity and
entity['provider'] != attrs.ATTR_NOT_SPECIFIED):
provider_name = pconf.normalize_provider_name(entity['provider'])
self.validate_provider(provider_name)
return provider_name
else:
if not self.default_provider:
raise pconf.DefaultServiceProviderNotFound(
service_type=constants.LOADBALANCERV2)
return self.default_provider
def _call_driver_operation(self, context, driver_method, db_entity,
old_db_entity=None):
manager_method = "%s.%s" % (driver_method.__self__.__class__.__name__,
driver_method.__name__)
LOG.info(_("Calling driver operation %s") % manager_method)
try:
if old_db_entity:
driver_method(context, old_db_entity, db_entity)
else:
driver_method(context, db_entity)
except Exception:
LOG.exception(_("There was an error in the driver"))
self.db.update_status(context, db_entity.__class__._SA_MODEL,
db_entity.id, constants.ERROR)
raise loadbalancerv2.DriverError()
def defer_listener(self, context, listener, cascade=True):
self.db.update_status(context, models.Listener, listener.id,
constants.DEFERRED)
if cascade and listener.default_pool:
self.defer_pool(context, listener.default_pool, cascade=cascade)
if cascade:
self.defer_l7policies(context, listener.l7_policies)
def defer_l7policies(self, context, l7policies):
for l7policy in l7policies:
if l7policy.redirect_pool:
self.defer_pool(context,l7policy.redirect_pool)
def defer_pool(self, context, pool, cascade=True):
self.db.update_status(context, models.PoolV2, pool.id,
constants.DEFERRED)
if cascade:
self.defer_members(context, pool.members)
def defer_members(self, context, members):
for member in members:
self.db.update_status(context, models.MemberV2,
member.id, constants.DEFERRED)
def defer_unlinked_entities(self, context, obj, old_obj=None):
# if old_obj is None then this is delete else it is an update
if isinstance(obj, models.Listener):
# if listener.loadbalancer_id is set to None set listener status
# to deferred
deleted_listener = not old_obj
unlinked_listener = (not obj.loadbalancer and old_obj and
old_obj.loadbalancer)
unlinked_pool = (bool(old_obj) and not obj.default_pool and
old_obj.default_pool)
if unlinked_listener:
self.db.update_status(context, models.Listener,
old_obj.id, constants.DEFERRED)
# if listener has been deleted OR if default_pool_id has been
# updated to None, then set Pool and its children statuses to
# DEFERRED
if deleted_listener or unlinked_pool or unlinked_listener:
if old_obj:
obj = old_obj
if not obj.default_pool:
return
self.db.update_status(context, models.PoolV2,
obj.default_pool.id, constants.DEFERRED)
for member in obj.default_pool.members:
self.db.update_status(context, models.MemberV2,
member.id, constants.DEFERRED)
elif isinstance(obj, models.PoolV2):
pass
def activate_linked_entities(self, context, obj):
if isinstance(obj, data_models.LoadBalancer):
self.db.update_status(context, models.LoadBalancer,
obj.id, constants.ACTIVE)
# only update loadbalancer's status because it's not able to
# change any links to children
return
if isinstance(obj, data_models.Listener):
self.db.update_status(context, models.Listener,
obj.id, constants.ACTIVE)
if obj.default_pool:
self.activate_linked_entities(context, obj.default_pool)
if isinstance(obj, data_models.Pool):
self.db.update_status(context, models.PoolV2,
obj.id, constants.ACTIVE)
for member in obj.members:
self.activate_linked_entities(context, member)
if isinstance(obj, data_models.Member):
# do not overwrite INACTVE status
if obj.status != constants.INACTIVE:
self.db.update_status(context, models.MemberV2, obj.id,
constants.ACTIVE)
if isinstance(obj, data_models.HealthMonitor):
self.db.update_status(context, models.HealthMonitorV2, obj.id,
constants.ACTIVE)
if isinstance(obj, data_models.L7Policy):
self.db.update_status(context, models.L7Policy, obj.id,
constants.ACTIVE)
if obj.redirect_pool:
self.activate_linked_entities(context, obj.redirect_pool)
if isinstance(obj, data_models.L7Rule):
self.db.update_status(context, models.L7Rule, obj.id,
constants.ACTIVE)
def get_plugin_type(self):
return constants.LOADBALANCERV2
def get_plugin_description(self):
return "Neutron LoadBalancer Service Plugin v2"
def validate_provider(self, provider):
if provider not in self.drivers:
raise pconf.ServiceProviderNotFound(
provider=provider, service_type=constants.LOADBALANCERV2)
def create_loadbalancer(self, context, loadbalancer):
loadbalancer = loadbalancer.get('loadbalancer')
loadbalancer['admin_state_up'] = True
provider_name = self._get_provider_name(loadbalancer)
lb_db = self.db.create_loadbalancer(context, loadbalancer)
self.service_type_manager.add_resource_association(
context,
constants.LOADBALANCERV2,
provider_name, lb_db.id)
driver = self.drivers[provider_name]
self._call_driver_operation(
context, driver.load_balancer.create, lb_db)
return self.db.get_loadbalancer(context, lb_db.id).to_dict()
def update_loadbalancer(self, context, id, loadbalancer):
loadbalancer = loadbalancer.get('loadbalancer')
old_lb = self.db.get_loadbalancer(context, id)
self.db.test_and_set_status(context, models.LoadBalancer, id,
constants.PENDING_UPDATE)
try:
updated_lb = self.db.update_loadbalancer(
context, id, loadbalancer)
except Exception as exc:
self.db.update_status(context, models.LoadBalancer, id,
old_lb.status)
LOG.exception(exc)
raise exc
driver = self._get_driver_for_provider(old_lb.provider.provider_name)
self._call_driver_operation(context,
driver.load_balancer.update,
updated_lb, old_db_entity=old_lb)
return self.db.get_loadbalancer(context, updated_lb.id).to_dict()
def delete_loadbalancer(self, context, id):
old_lb = self.db.get_loadbalancer(context, id)
#if old_lb.listeners:
# raise loadbalancerv2.EntityInUse(
# entity_using=models.Listener.NAME,
# id=old_lb.listeners[0].id,
# entity_in_use=models.LoadBalancer.NAME)
self.db.test_and_set_status(context, models.LoadBalancer, id,
constants.PENDING_DELETE)
driver = self._get_driver_for_provider(old_lb.provider.provider_name)
self._call_driver_operation(
context, driver.load_balancer.delete, old_lb)
def get_loadbalancer_instance(self, context, id):
lb_db = self.db.get_loadbalancer(context, id)
return lb_db
def get_loadbalancer(self, context, id, fields=None):
lb_db = self.db.get_loadbalancer(context, id)
return self.db._fields(lb_db.to_dict(), fields)
def get_loadbalancers(self, context, filters=None, fields=None):
loadbalancers = self.db.get_loadbalancers(context, filters=filters)
return [self.db._fields(lb.to_dict(), fields) for lb in loadbalancers]
def create_listener(self, context, listener):
listener = listener.get('listener')
listener_db = self.db.create_listener(context, listener)
if listener_db.attached_to_loadbalancer():
driver = self._get_driver_for_loadbalancer(
context, listener_db.loadbalancer_id)
self._call_driver_operation(
context, driver.listener.create, listener_db)
else:
# UOS : this will not reach forever.
self.db.update_status(context, models.Listener, listener_db.id,
constants.DEFERRED)
return self.db.get_listener(context, listener_db.id).to_dict()
def update_listener(self, context, id, listener):
listener = listener.get('listener')
old_listener = self.db.get_listener(context, id)
self.db.test_and_set_status(context, models.Listener, id,
constants.PENDING_UPDATE)
try:
listener_db = self.db.update_listener(context, id, listener)
except Exception as exc:
self.db.update_status(context, models.Listener, id,
old_listener.status)
raise exc
if (listener_db.attached_to_loadbalancer() or
old_listener.attached_to_loadbalancer()):
if listener_db.attached_to_loadbalancer():
driver = self._get_driver_for_loadbalancer(
context, listener_db.loadbalancer_id)
else:
driver = self._get_driver_for_loadbalancer(
context, old_listener.loadbalancer_id)
self._call_driver_operation(
context,
driver.listener.update,
listener_db,
old_db_entity=old_listener)
else:
# UOS : this will not reach forever.
self.db.update_status(context, models.Listener, id,
constants.DEFERRED)
return self.db.get_listener(context, listener_db.id).to_dict()
def delete_listener(self, context, id):
self.db.test_and_set_status(context, models.Listener, id,
constants.PENDING_DELETE)
listener_db = self.db.get_listener(context, id)
if listener_db.attached_to_loadbalancer():
driver = self._get_driver_for_loadbalancer(
context, listener_db.loadbalancer_id)
try:
self._call_driver_operation(
context, driver.listener.delete, listener_db)
except loadbalancerv2.OneListenerAdminStateUpAtLeast:
with excutils.save_and_reraise_exception():
self.db.update_status(context, models.Listener,
id,constants.ACTIVE)
else:
# UOS : this will not reach forever.
self.db.delete_listener(context, id)
def get_listener(self, context, id, fields=None):
listener_db = self.db.get_listener(context, id)
return self.db._fields(listener_db.to_dict(), fields)
def get_listeners(self, context, filters=None, fields=None):
listeners = self.db.get_listeners(context, filters=filters)
return [self.db._fields(listener.to_dict(), fields)
for listener in listeners]
def get_loadbalancer_lbaas_listeners(self, context,loadbalancer_id, filters=None, fields=None):
if filters:
filters.update(filters)
else:
filters = {'loadbalancer_id': [loadbalancer_id]}
listeners = self.get_listeners(context, filters=filters)
return listeners
def _check_session_persistence_info(self, info):
"""Performs sanity check on session persistence info.
:param info: Session persistence info
"""
if info['type'] == lb_const.SESSION_PERSISTENCE_APP_COOKIE:
if not info.get('cookie_name'):
raise ValueError(_("'cookie_name' should be specified for %s"
" session persistence.") % info['type'])
else:
if 'cookie_name' in info:
raise ValueError(_("'cookie_name' is not allowed for %s"
" session persistence") % info['type'])
def _prepare_healthmonitor_info(self, info):
if (info['type'] != lb_const.HEALTH_MONITOR_HTTP and
info['type'] != lb_const.HEALTH_MONITOR_HTTPS):
info.pop('http_method',None)
info.pop('url_path',None)
info.pop('expected_codes',None)
else :
if 'http_method' not in info:
info['http_method'] = 'GET'
if 'url_path' not in info:
info['url_path'] = '/'
if 'expected_codes' not in info:
info['expected_codes'] = 200
def create_pool(self, context, pool):
pool = pool.get('pool')
session_info = pool.get('session_persistence', None)
if session_info:
if pool['protocol'] != lb_const.PROTOCOL_HTTP:
raise n_exc.Invalid(_("Can not specify session persistence for TCP protocol."))
try:
self._check_session_persistence_info(pool['session_persistence'])
except ValueError:
raise n_exc.Invalid(_("Error value for session persistence type."))
healthmonitor_info = pool.get('healthmonitor', None)
if healthmonitor_info:
self._prepare_healthmonitor_info(pool['healthmonitor'])
db_pool = self.db.create_pool(context, pool)
# no need to call driver since on create it cannot be linked to a load
# balancer, but will still update status to DEFERRED
self.db.update_status(context, models.PoolV2, db_pool.id,
constants.DEFERRED)
return self.db.get_pool(context, db_pool.id).to_dict()
def update_pool(self, context, id, pool):
pool = pool.get('pool')
session_info = pool.get('session_persistence', None)
if session_info:
try:
self._check_session_persistence_info(pool['session_persistence'])
except ValueError:
raise n_exc.Invalid(_("Error value for session persistence type."))
healthmonitor_info = pool.get('healthmonitor', None)
if healthmonitor_info:
self._prepare_healthmonitor_info(pool['healthmonitor'])
old_pool = self.db.get_pool(context, id)
if (session_info and old_pool.protocol != lb_const.PROTOCOL_HTTP):
raise n_exc.Invalid(_("Can not specify session persistence for TCP protocol."))
self.db.test_and_set_status(context, models.PoolV2, id,
constants.PENDING_UPDATE)
try:
updated_pool = self.db.update_pool(context, id, pool)
except Exception as exc:
self.db.update_status(context, models.PoolV2, id, old_pool.status)
LOG.info('_update_pool exc: %s',exc)
raise exc
if (updated_pool.attached_to_loadbalancer()):
if updated_pool.l7policy:
loadbalancer_id = updated_pool.l7policy.listener.loadbalancer_id
else:
loadbalancer_id = updated_pool.listener.loadbalancer_id
driver = self._get_driver_for_loadbalancer(
context, loadbalancer_id)
self._call_driver_operation(context,
driver.pool.update,
updated_pool,
old_db_entity=old_pool)
elif (old_pool.attached_to_loadbalancer()):
if old_pool.l7policy:
loadbalancer_id = old_pool.l7policy.listener.loadbalancer_id
else:
loadbalancer_id = old_pool.listener.loadbalancer_id
driver = self._get_driver_for_loadbalancer(
context, loadbalancer_id)
self._call_driver_operation(context,
driver.pool.update,
updated_pool,
old_db_entity=old_pool)
else:
self.db.update_status(context, models.PoolV2, id,
constants.DEFERRED)
return self.db.get_pool(context, updated_pool.id).to_dict()
def delete_pool(self, context, id):
self.db.test_and_set_status(context, models.PoolV2, id,
constants.PENDING_DELETE)
db_pool = self.db.get_pool(context, id)
if db_pool.attached_to_loadbalancer():
if db_pool.l7policy:
loadbalancer_id = db_pool.l7policy.listener.loadbalancer_id
else:
loadbalancer_id = db_pool.listener.loadbalancer_id
driver = self._get_driver_for_loadbalancer(
context, loadbalancer_id)
self._call_driver_operation(context, driver.pool.delete, db_pool)
else:
self.db.delete_pool(context, id)
def get_pools(self, context, filters=None, fields=None):
pools = self.db.get_pools(context, filters=filters)
return [self.db._fields(pool.to_dict(), fields) for pool in pools]
def get_pool(self, context, id, fields=None):
pool_db = self.db.get_pool(context, id)
return self.db._fields(pool_db.to_dict(), fields)
def create_pool_member(self, context, member, pool_id):
member = member.get('member')
member_db = self.db.create_pool_member(context, member, pool_id)
if member_db.attached_to_loadbalancer():
driver = self._get_driver_for_loadbalancer(
context, member_db.pool.listener.loadbalancer_id)
self._call_driver_operation(context,
driver.member.create,
member_db)
else:
self.db.update_status(context, models.MemberV2, member_db.id,
constants.DEFERRED)
return self.db.get_pool_member(context, member_db.id,
pool_id).to_dict()
def update_pool_member(self, context, id, member, pool_id):
member = member.get('member')
old_member = self.db.get_pool_member(context, id, pool_id)
self.db.test_and_set_status(context, models.MemberV2, id,
constants.PENDING_UPDATE)
try:
updated_member = self.db.update_pool_member(context, id, member,
pool_id)
except Exception as exc:
self.db.update_status(context, models.MemberV2, id,
old_member.status)
raise exc
# cannot unlink a member from a loadbalancer through an update
# so no need to check if the old_member is attached
if updated_member.attached_to_loadbalancer():
driver = self._get_driver_for_loadbalancer(
context, updated_member.pool.listener.loadbalancer_id)
self._call_driver_operation(context,
driver.member.update,
updated_member,
old_db_entity=old_member)
else:
self.db.update_status(context, models.MemberV2, id,
constants.DEFERRED)
return self.db.get_pool_member(context, updated_member.id,
pool_id).to_dict()
def delete_pool_member(self, context, id, pool_id):
self.db.test_and_set_status(context, models.MemberV2, id,
constants.PENDING_DELETE)
db_member = self.db.get_pool_member(context, id, pool_id)
if db_member.attached_to_loadbalancer():
driver = self._get_driver_for_loadbalancer(
context, db_member.pool.listener.loadbalancer_id)
self._call_driver_operation(context,
driver.member.delete,
db_member)
else:
self.db.delete_pool_member(context, id, pool_id)
def get_pool_members(self, context, pool_id, filters=None, fields=None):
members = self.db.get_pool_members(context, pool_id, filters=filters)
return [self.db._fields(member.to_dict(), fields)
for member in members]
def get_pool_member(self, context, id, pool_id, filters=None, fields=None):
member = self.db.get_pool_member(context, id, pool_id, filters=filters)
return member.to_dict()
def create_l7policy(self, context, l7policy):
l7policy = l7policy.get('l7policy')
l7policy_db = self.db.create_l7policy(context, l7policy)
if l7policy_db.attached_to_loadbalancer():
driver = self._get_driver_for_loadbalancer(
context, l7policy_db.listener.loadbalancer_id)
self._call_driver_operation(context,
driver.l7policy.create,
l7policy_db)
else:
self.db.update_status(context, models.L7Policy, l7policy_db.id,
constants.DEFERRED)
return self.db.get_l7policy(context, l7policy_db.id).to_dict()
def update_l7policy(self, context, id, l7policy):
l7policy = l7policy.get('l7policy')
old_l7policy_db = self.db.get_l7policy(context, id)
self.db.test_and_set_status(context, models.L7Policy, id,
constants.PENDING_UPDATE)
try:
updated_l7policy_db = self.db.update_l7policy(
context, id, l7policy)
except Exception as exc:
self.db.update_status(context, models.L7Policy, id,
old_l7policy_db.status)
raise exc
if (updated_l7policy_db.attached_to_loadbalancer() or
old_l7policy_db.attached_to_loadbalancer()):
if updated_l7policy_db.attached_to_loadbalancer():
driver = self._get_driver_for_loadbalancer(
context, updated_l7policy_db.listener.loadbalancer_id)
else:
driver = self._get_driver_for_loadbalancer(
context, old_l7policy_db.listener.loadbalancer_id)
self._call_driver_operation(context,
driver.l7policy.update,
updated_l7policy_db,
old_db_entity=old_l7policy_db)
return self.db.get_l7policy(context, id).to_dict()
def delete_l7policy(self, context, id):
self.db.test_and_set_status(context, models.L7Policy, id,
constants.PENDING_DELETE)
l7policy_db = self.db.get_l7policy(context, id)
if l7policy_db.attached_to_loadbalancer():
driver = self._get_driver_for_loadbalancer(
context, l7policy_db.listener.loadbalancer_id)
self._call_driver_operation(context, driver.l7policy.delete,
l7policy_db)
else:
self.db.delete_l7policy(context, id)
def get_l7policies(self, context, filters=None, fields=None):
l7policy_dbs = self.db.get_l7policies(context, filters=filters)
return [self.db._fields(l7policy_db.to_dict(), fields)
for l7policy_db in l7policy_dbs]
def get_l7policy(self, context, id, fields=None):
l7policy_db = self.db.get_l7policy(context, id)
return self.db._fields(l7policy_db.to_dict(), fields)
def get_listener_lbaas_l7policies(self, context,listener_id, filters=None, fields=None):
if filters:
filters.update(filters)
else:
filters = {'listener_id': [listener_id]}
l7policy_dbs = self.get_l7policies(context, filters=filters)
return l7policy_dbs
def create_l7policy_rule(self, context, rule, l7policy_id):
rule = rule.get('rule')
rule_db = self.db.create_l7policy_rule(context, rule, l7policy_id)
if rule_db.attached_to_loadbalancer():
driver = self._get_driver_for_loadbalancer(
context, rule_db.l7policy.listener.loadbalancer_id)
self._call_driver_operation(context,
driver.l7rule.create,
rule_db)
else:
self.db.update_status(context, models.L7Rule, rule_db.id,
constants.DEFERRED)
return self.db.get_l7policy_rule(context, rule_db.id, l7policy_id).to_dict()
def update_l7policy_rule(self, context, id, rule, l7policy_id):
rule = rule.get('rule')
old_rule_db = self.db.get_l7policy_rule(context, id, l7policy_id)
self.db.test_and_set_status(context, models.L7Rule, id,
constants.PENDING_UPDATE)
try:
upd_rule_db = self.db.update_l7policy_rule(
context, id, rule, l7policy_id)
except Exception as exc:
self.update_status(context, models.L7Rule, id, old_rule_db.status)
raise exc
if (upd_rule_db.attached_to_loadbalancer() or
old_rule_db.attached_to_loadbalancer()):
if upd_rule_db.attached_to_loadbalancer():
driver = self._get_driver_for_loadbalancer(
context, upd_rule_db.l7policy.listener.loadbalancer_id)
else:
driver = self._get_driver_for_loadbalancer(
context, old_rule_db.listener.loadbalancer_id)
self._call_driver_operation(context,
driver.l7rule.update,
upd_rule_db,
old_db_entity=old_rule_db)
else:
self.db.update_status(context, models.L7Rule, id,
constants.DEFERRED)
return self.db.get_l7policy_rule(context, id, l7policy_id).to_dict()
def delete_l7policy_rule(self, context, id, l7policy_id):
self.db.test_and_set_status(context, models.L7Rule, id,
constants.PENDING_DELETE)
rule_db = self.db.get_l7policy_rule(context, id, l7policy_id)
if rule_db.attached_to_loadbalancer():
driver = self._get_driver_for_loadbalancer(
context, rule_db.l7policy.listener.loadbalancer_id)
self._call_driver_operation(context, driver.l7rule.delete,
rule_db)
else:
self.db.delete_l7policy_rule(context, id, l7policy_id)
def get_l7policy_rules(self, context, l7policy_id,
filters=None, fields=None):
rule_dbs = self.db.get_l7policy_rules(
context, l7policy_id, filters=filters)
return [self.db._fields(rule_db.to_dict(), fields)
for rule_db in rule_dbs]
def get_l7policy_rule(self, context, id, l7policy_id, fields=None):
rule_db = self.db.get_l7policy_rule(context, id, l7policy_id)
return self.db._fields(rule_db.to_dict(), fields)
def _get_members(self, loadbalancer):
for listener in loadbalancer.listeners:
if listener.default_pool:
for member in listener.default_pool.members:
yield member
for l7policy in listener.l7_policies:
if l7policy.redirect_pool:
for member in l7policy.redirect_pool.members:
yield member
def _set_member_status(self, context, loadbalancer, members_stats):
for member in self._get_members(loadbalancer):
if member.id in members_stats:
status = members_stats[member.id].get('status')
old_status = self.db.get_member_status_info(context, member.id)
if status and status == constants.ACTIVE:
self.db.update_status(
context, models.MemberV2, member.id,
constants.ACTIVE)
else:
self.db.update_status(
context, models.MemberV2, member.id,
constants.INACTIVE)
if old_status != status:
LOG.info(_('kiki_set_member_status: %(obj_id)s %(status)s notified'),
{'obj_id': member.id, 'status':status })
notifier = n_rpc.get_notifier('loadbalancer')
notifier.info(context, 'member.update.end', {'id':member.id})
def stats(self, context, loadbalancer_id, stats_data=None):
LOG.debug(_(" stats '%(loadbalancer_id)s' ,%(stat)s") %
{'loadbalancer_id': loadbalancer_id,"stat":stats_data})
try:
loadbalancer = self.db.get_loadbalancer(context, loadbalancer_id)
except Exception:
LOG.error("Exception when stats for loadbalancer %s", loadbalancer_id)
return
driver = self._get_driver_for_loadbalancer(context, loadbalancer_id)
# if we get something from the driver -
# update the db and return the value from db
# else - return what we have in db
if stats_data:
self.db.update_loadbalancer_stats(context, loadbalancer_id,
stats_data)
if 'members' in stats_data:
self._set_member_status(context, loadbalancer,
stats_data['members'])
db_stats = self.db.stats(context, loadbalancer_id)
return {'stats': db_stats.to_dict()}
# NOTE(brandon-logan): these need to be concrete methods because the
# neutron request pipeline calls these methods before the plugin methods
# are ever called
def get_members(self, context, filters=None, fields=None):
pass
def get_member(self, context, id, fields=None):
pass
| 45.917375 | 114 | 0.611723 | 36,059 | 0.954194 | 413 | 0.010929 | 88 | 0.002329 | 0 | 0 | 4,713 | 0.124716 |
0d8654ebd4587d1b86f646674bd4260a04af5395 | 1,219 | py | Python | awsFunctions.py | jmontoyac/disk-space | 6a3556a98103a6565756824208251240ed540412 | [
"MIT"
] | null | null | null | awsFunctions.py | jmontoyac/disk-space | 6a3556a98103a6565756824208251240ed540412 | [
"MIT"
] | null | null | null | awsFunctions.py | jmontoyac/disk-space | 6a3556a98103a6565756824208251240ed540412 | [
"MIT"
] | null | null | null | import boto3
from botocore.exceptions import NoCredentialsError
ACCESS_KEY = ''
SECRET_KEY = ''
def upload_to_aws(local_file, bucket, s3_file):
s3 = boto3.client('s3', aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY)
try:
s3.upload_file(local_file, bucket, s3_file)
print("Upload Successful")
return True
except FileNotFoundError:
print("The file was not found")
return False
except NoCredentialsError:
print("Credentials not available")
return False
def getUsedSpace(aBucketName):
s3 = boto3.resource('s3', aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY)
space = 0
for bucket in s3.buckets.all():
myBucketName = bucket.name
for key in bucket.objects.all():
space = space + key.size
# print(key.key)
print('Used space in bucket ' + myBucketName +
' ' + str(space // (2 ** 20)) + ' Megabytes')
# Main
localFile = '/images/gotIt.jpg'
s3File = 'imagesTest/gotIt.jpg'
bucketName = 'voti-public'
#uploaded = upload_to_aws(localFile, bucketName, s3File)
usedSpace = getUsedSpace(bucketName)
| 26.5 | 59 | 0.64397 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 252 | 0.206727 |
0d88e0637e9a9379159eee4585d2567b071fb965 | 2,176 | py | Python | src/db_writer.py | lofmat/kafka_project | 1b3a6ac8d898d1ff5a4f303605a1b9f33b30085b | [
"Apache-2.0"
] | null | null | null | src/db_writer.py | lofmat/kafka_project | 1b3a6ac8d898d1ff5a4f303605a1b9f33b30085b | [
"Apache-2.0"
] | null | null | null | src/db_writer.py | lofmat/kafka_project | 1b3a6ac8d898d1ff5a4f303605a1b9f33b30085b | [
"Apache-2.0"
] | null | null | null | from psycopg2 import connect, DatabaseError, OperationalError, ProgrammingError
import logging
import sys
logging.getLogger().setLevel(logging.INFO)
def query_exec(query: str, conn) -> list:
query_ok = False
with conn.cursor() as cursor:
logging.info(f'Executing query: {query}')
try:
cursor.execute(query)
try:
cursor.fetchone()
query_res = cursor.fetchone()
except ProgrammingError:
query_res = ''
conn.commit()
query_ok = True
except OperationalError:
logging.exception(f'Query {query} cannot be executed!')
conn.rollback()
return [query_ok, query_res]
class DrWriter:
def __init__(self, db_config, table_name):
self.db_name = db_config['db_name']
self.db_user = db_config['db_user']
self.db_password = db_config['db_password']
self.db_host = db_config['db_host']
self.db_port = db_config['db_port']
self.ssl_mode = db_config['ssl_mode']
self.table = table_name
def connect_to_db(self):
psql_creds = {
'dbname': self.db_name,
'user': self.db_user,
'password': self.db_password,
'host': self.db_host,
'port': self.db_port,
'sslmode': self.ssl_mode,
}
try:
psql_connection = connect(**psql_creds)
return psql_connection
except DatabaseError:
logging.exception(f"Connection to DB can't be established. Stopping consumer...")
sys.exit(1)
def convert_raw_data_to_queries(self, msg: dict) -> str:
"""
Prepare INSERT query from the dict
:param msg: dict
:return: insert string
"""
columns = [str(k) for k in msg.keys()]
values = []
for c in columns:
if isinstance(msg[c], str):
values.append(f"'{msg[c]}'")
else:
values.append(str(msg[c]))
insert_str = f"INSERT INTO {self.table} ({','.join(columns)}) VALUES ({','.join(values)})"
return insert_str
| 32 | 98 | 0.566636 | 1,446 | 0.664522 | 0 | 0 | 0 | 0 | 0 | 0 | 435 | 0.199908 |
0d8a394c7608f83db8f19c2b8dbe58f33ccd689a | 2,365 | py | Python | sample/metropolis_sampler.py | shuiruge/little_mcmc | b9372fc65c59b16067832de08b0dcb9fc285052e | [
"MIT"
] | null | null | null | sample/metropolis_sampler.py | shuiruge/little_mcmc | b9372fc65c59b16067832de08b0dcb9fc285052e | [
"MIT"
] | null | null | null | sample/metropolis_sampler.py | shuiruge/little_mcmc | b9372fc65c59b16067832de08b0dcb9fc285052e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Description
---------
Class of sampler by Metropolis algorithm.
Documentation
---------
C.f. `../doc/metropolis_sampler.tm`.
"""
import random
from math import log
from copy import deepcopy as copy
class MetropolisSampler:
"""
An implementation of sampler by Metropolis algorithm.
C.f. '/docs/metropolis_sampler.tm'.
Args:
iterations: int
initialize_state: (None -> State)
markov_process: (State -> State)
burn_in: int
Attributes:
accept_ratio: float
Generated only after calling `MetropolisSampler.sampling()`.
Methods:
sampling:
Do the sampling by Metropolis algorithm.
Remarks:
The "State" can be any abstract class.
"""
def __init__(
self,
iterations,
initialize_state,
markov_process,
burn_in,
log=True,
):
self.iterations = iterations
self.initialize_state = initialize_state
self.markov_process = markov_process
self.burn_in = burn_in
self.log = log
def sampling(self, log_target_distribution):
"""
Do the sampling.
Args:
log_target_distribution: (State -> float)
logarithm of target distribution.
Returns:
list of State, with length being iterations - burn_in.
"""
init_state = self.initialize_state()
if self.log:
print('Initial state: {0}'.format(init_state))
else:
pass
chain = [init_state]
accepted = 0
for i in range(self.iterations):
next_state = self.markov_process(init_state)
alpha = log_target_distribution(next_state) \
- log_target_distribution(init_state)
u = log(random.uniform(0, 1))
if alpha > u:
accepted += 1
chain.append(next_state)
init_state = copy(next_state)
else:
chain.append(init_state)
self.accept_ratio = accepted / self.iterations
if self.log:
print('Accept-ratio: {0}'.format(self.accept_ratio))
else:
pass
return chain[self.burn_in:]
| 20.213675 | 72 | 0.556448 | 2,110 | 0.892178 | 0 | 0 | 0 | 0 | 0 | 0 | 982 | 0.415222 |
0d8b44c6cdb28214bde2bc0db794be64cdb76647 | 319 | py | Python | Desafios/Desafio048.py | vaniaferreira/Python | 5b3158836d47c0bb7bc446e6636e7b3dcea8a0ab | [
"MIT"
] | null | null | null | Desafios/Desafio048.py | vaniaferreira/Python | 5b3158836d47c0bb7bc446e6636e7b3dcea8a0ab | [
"MIT"
] | null | null | null | Desafios/Desafio048.py | vaniaferreira/Python | 5b3158836d47c0bb7bc446e6636e7b3dcea8a0ab | [
"MIT"
] | null | null | null | #Faça um programa que calcule a soma entre todos os números ímpares que são múltiplos de 3 e que se encontram
# no intervalo de 1 até 500.
soma = 0
cont = 0
for c in range(1,501,2):
if c % 3 == 0:
cont = cont + 1
soma = soma + c
print('A soma dos números solicitados {} são {}'.format(cont, soma))
| 29 | 109 | 0.642633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.571865 |
0d8d0b013e67c45f113c70d966f6a14dd4760296 | 10,532 | py | Python | main.py | JiahongChen/FRAN | 58dd5c4a6162d0d76cd3a2b562319ca761397aac | [
"Apache-2.0"
] | 6 | 2021-02-07T21:53:52.000Z | 2022-03-25T09:14:30.000Z | main.py | JiahongChen/FRAN | 58dd5c4a6162d0d76cd3a2b562319ca761397aac | [
"Apache-2.0"
] | null | null | null | main.py | JiahongChen/FRAN | 58dd5c4a6162d0d76cd3a2b562319ca761397aac | [
"Apache-2.0"
] | 3 | 2021-09-23T06:04:34.000Z | 2022-03-25T09:15:08.000Z | import os
import argparse
import tqdm
import os
import argparse
import numpy as np
import tqdm
from itertools import chain
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import transforms
from torch.autograd import Variable
from torch.utils.data import TensorDataset, DataLoader
import time
from utils import weights_init, print_args
from model import *
import scipy.io
import random
import time
parser = argparse.ArgumentParser()
parser.add_argument("--data_root", default='./CWRU_dataset/')
parser.add_argument("--source", default='DE')
parser.add_argument("--target", default='FE')
parser.add_argument("--batch_size", default=64, type=int)
parser.add_argument("--shuffle", default=True, type=bool)
parser.add_argument("--num_workers", default=0)
parser.add_argument("--epoch", default=100, type=int)
parser.add_argument("--snapshot", default="")
parser.add_argument("--lr", default=0.0001, type=float)
parser.add_argument("--class_num", default=3)
parser.add_argument("--extract", default=True)
parser.add_argument("--weight_L2norm", default=0.05)
parser.add_argument("--weight_entropy", default=0.1, type=float)
parser.add_argument("--dropout_p", default=0.1, type=float)
parser.add_argument("--task", default='None', type=str)
parser.add_argument("--post", default='-1', type=str)
parser.add_argument("--repeat", default='-1', type=str)
parser.add_argument("--result", default='record')
parser.add_argument("--save", default=False, type=bool)
parser.add_argument("--lambda_val", default=1.0, type=float)
parser.add_argument("--entropy_thres", default=0.00000001, type=float)
parser.add_argument('--thres_rec', type=float, default=0.0001, help='coefficient for reconstruction loss')
parser.add_argument("--optimizer", default='Adam', type=str)
parser.add_argument('--GPU', type=bool, default=True,
help='enable train on GPU or not, default is False')
def guassian_kernel(source, target, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
n_samples = int(source.size()[0])+int(target.size()[0])
# resize for CWRU dataset
source = source.reshape(int(source.size(0)), int(source.size(1))* int(source.size(2)))
target = target.reshape(int(target.size(0)), int(target.size(1))* int(target.size(2)))
total = torch.cat([source, target], dim=0)
total0 = total.unsqueeze(0).expand(int(total.size(0)), int(total.size(0)), int(total.size(1)))
total1 = total.unsqueeze(1).expand(int(total.size(0)), int(total.size(0)), int(total.size(1)))
L2_distance = ((total0-total1)**2).sum(2)
if fix_sigma:
bandwidth = fix_sigma
else:
bandwidth = torch.sum(L2_distance.data) / (n_samples**2-n_samples)
bandwidth /= kernel_mul ** (kernel_num // 2)
bandwidth_list = [bandwidth * (kernel_mul**i) for i in range(int(kernel_num))]
kernel_val = [torch.exp(-L2_distance / bandwidth_temp) for bandwidth_temp in bandwidth_list]
return sum(kernel_val)
def MMDLoss(source, target):
kernel_num = 2.0
kernel_mul = 5
fix_sigma = None
batch_size = int(source.size()[0])
kernels = guassian_kernel(source, target, kernel_mul=kernel_mul, kernel_num=kernel_num, fix_sigma=fix_sigma)
XX = kernels[:batch_size, :batch_size]
YY = kernels[batch_size:, batch_size:]
XY = kernels[:batch_size, batch_size:]
YX = kernels[batch_size:, :batch_size]
loss = torch.mean(XX + YY - XY -YX)
return loss
def minmax_norm(data):
min_v = np.min(data)
range_v = np.max(data) - min_v
data = (data - min_v) / range_v
return data
# classification loss
def get_cls_loss(pred, gt):
cls_loss = F.nll_loss(F.log_softmax(pred), gt)
return cls_loss
# compute entropy loss
def get_entropy_loss(p_softmax):
mask = p_softmax.ge(args.entropy_thres)
mask_out = torch.masked_select(p_softmax, mask)
entropy = -(torch.sum(mask_out * torch.log(mask_out)))
return args.weight_entropy * (entropy / float(p_softmax.size(0)))
# compute entropy
def HLoss(x):
b = F.softmax(x, dim=1) * F.log_softmax(x, dim=1)
b = -1.0 * b.sum()
return b
def load_data(domain):
input_domain = np.load(args.data_root+'CWRU_'+domain+'.npy', allow_pickle=True)
input_domain = input_domain.item()
input_N = input_domain['Normal']
input_OR = input_domain['OR']
input_IR = input_domain['IR']
# print (np.shape(input_IR), np.shape(input_OR), np.shape(input_N))
input_label_N = np.zeros([np.size(input_N,0),1])
input_label_OR = np.ones([np.size(input_OR,0),1])
input_label_IR = np.ones([np.size(input_IR,0),1])+1
data = np.concatenate((input_N, input_OR, input_IR) , axis=0)
print(np.shape(data))
label = np.concatenate((input_label_N, input_label_OR, input_label_IR), axis=0)
print(np.shape(label))
# shuffle inputs
nums = [x for x in range(np.size(data, axis = 0))]
random.shuffle(nums)
data = data[nums, :]
label = label[nums, :]
data = np.transpose(data, (0, 2, 1))
label = np.squeeze(label)
return data, label
if __name__ == "__main__":
args = parser.parse_args()
print_args(args)
t = time.time()
# load source data
source_data, source_label = load_data(args.source)
# load target data
target_data, target_label = load_data(args.target)
# fead data to dataloder
source_data = Variable(torch.from_numpy(source_data).float(), requires_grad=False)
source_label= Variable(torch.from_numpy(source_label).long(), requires_grad=False)
target_data = Variable(torch.from_numpy(target_data).float(), requires_grad=False)
target_label= Variable(torch.from_numpy(target_label).long(), requires_grad=False)
source_dataset = TensorDataset(source_data, source_label)
target_dataset = TensorDataset(target_data, target_label)
source_loader = DataLoader(source_dataset,batch_size=args.batch_size)
target_loader = DataLoader(target_dataset,batch_size=args.batch_size)
source_loader_iter = iter(source_loader)
target_loader_iter = iter(target_loader)
# initialize model
netG = Generator(source='CWRU_'+args.source, target='CWRU_'+args.target)
netF = Classifier(source='CWRU_'+args.source, target='CWRU_'+args.target)
if args.GPU:
netG.cuda()
netF.cuda()
netG.apply(weights_init)
netF.apply(weights_init)
print ('Training using Adam')
opt_g = optim.Adam(netG.parameters(), lr=args.lr, weight_decay=0.0005)
opt_f = optim.Adam(netF.parameters(), lr=args.lr, weight_decay=0.0005)
max_correct = -1.0
correct_array = []
# start training
for epoch in range(1, args.epoch+1):
source_loader_iter = iter(source_loader)
target_loader_iter = iter(target_loader)
print(">>training " + args.task + " epoch : " + str(epoch))
netG.train()
netF.train()
tic = time.time()
for i, (t_imgs, _) in tqdm.tqdm(enumerate(target_loader_iter)):
try:
s_imgs, s_labels = source_loader_iter.next()
except:
source_loader_iter = iter(source_loader)
s_imgs, s_labels = source_loader_iter.next()
if s_imgs.size(0) != args.batch_size or t_imgs.size(0) != args.batch_size:
continue
if args.GPU:
s_imgs = Variable(s_imgs.cuda())
s_labels = Variable(s_labels.cuda())
t_imgs = Variable(t_imgs.cuda())
opt_g.zero_grad()
opt_f.zero_grad()
# apply feature extractor to input images
s_bottleneck = netG(s_imgs)
t_bottleneck = netG(t_imgs)
# get classification results
s_logit = netF(s_bottleneck)
t_logit = netF(t_bottleneck)
t_logit_entropy = HLoss(t_bottleneck)
s_logit_entropy = HLoss(s_bottleneck)
# get source domain classification error
s_cls_loss = get_cls_loss(s_logit, s_labels)
# compute entropy loss
t_prob = F.softmax(t_logit)
t_entropy_loss = get_entropy_loss(t_prob)
# MMFD loss
MMD = MMDLoss(s_bottleneck, t_bottleneck)
# Full loss function
loss = s_cls_loss + t_entropy_loss + args.lambda_val*MMD - args.thres_rec*(t_logit_entropy +s_logit_entropy)
loss.backward()
if (i+1) % 50 == 0:
print ("cls_loss: %.4f, MMD: %.4f, t_HLoss: %.4f, s_HLoss: %.4f" % (s_cls_loss.item(), args.lambda_val*MMD.item(), args.thres_rec*t_logit_entropy.item(), args.thres_rec*s_logit_entropy.item()))
opt_g.step()
opt_f.step()
print('Training time:', time.time()-tic)
# evaluate model
tic = time.time()
netG.eval()
netF.eval()
correct = 0
t_loader = DataLoader(target_dataset, batch_size=args.batch_size, shuffle=args.shuffle, num_workers=args.num_workers)
for (t_imgs, t_labels) in t_loader:
if args.GPU:
t_imgs = Variable(t_imgs.cuda())
t_bottleneck = netG(t_imgs)
t_logit = netF(t_bottleneck)
pred = F.softmax(t_logit)
pred = pred.data.cpu().numpy()
pred = pred.argmax(axis=1)
t_labels = t_labels.numpy()
correct += np.equal(t_labels, pred).sum()
t_imgs = []
t_bottleneck = []
t_logit = []
pred = []
t_labels = []
# compute classification accuracy for target domain
correct = correct * 1.0 / len(target_dataset)
correct_array.append(correct)
if correct >= max_correct:
max_correct = correct
print('Test time:', time.time()-tic)
print ("Epoch {0} accuray: {1}; max acc: {2}".format(epoch, correct, max_correct))
# save results
print("max acc: ", max_correct)
max_correct = float("{0:.3f}".format(max_correct))
result = open(os.path.join(args.result, "FRAN_" + args.task + "_" + str(max_correct) +"_lr_"+str(args.lr)+'_lambda_' + str(args.lambda_val) + '_recons_' + str(args.thres_rec)+"_weight_entropy_"+str(args.weight_entropy)+".txt"), "a")
for c in correct_array:
result.write(str(c) + "\n")
result.write("Max: "+ str(max_correct) + "\n")
elapsed = time.time() - t
print("elapsed: ", elapsed)
result.write(str(elapsed) + "\n")
result.close() | 38.15942 | 236 | 0.654766 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,274 | 0.120965 |
0d8d5718379dda93fd6474b84b7d6a14a112142a | 529 | py | Python | sums-of-numbers-game/prepare_sum_objects.py | babyrobot-eu/core-modules | 7e8c006c40153fb649208c9a78fc71aa70243f69 | [
"MIT"
] | 1 | 2019-02-07T15:32:06.000Z | 2019-02-07T15:32:06.000Z | sums-of-numbers-game/prepare_sum_objects.py | babyrobot-eu/core-modules | 7e8c006c40153fb649208c9a78fc71aa70243f69 | [
"MIT"
] | 9 | 2020-01-28T22:09:41.000Z | 2022-03-11T23:39:17.000Z | sums-of-numbers-game/prepare_sum_objects.py | babyrobot-eu/core-modules | 7e8c006c40153fb649208c9a78fc71aa70243f69 | [
"MIT"
] | null | null | null | import pickle
from random import shuffle
sums = [(5, 105) , (205, 305), (405, 1005), (1105, 1205), (1305, 1405)]
shuffle(sums)
a = {'sums': sums, 'current_sum': 0}
with open('child_data/child1.pkl', 'wb') as f:
pickle.dump(obj=a, file=f)
print(a)
shuffle(sums)
b = {'sums': sums, 'current_sum': 0}
with open('child_data/child2.pkl', 'wb') as f:
pickle.dump(obj=b, file=f)
print(b)
shuffle(sums)
c = {'sums': sums, 'current_sum': 0}
with open('child_data/child3.pkl', 'wb') as f:
pickle.dump(obj=c, file=f)
print(c)
| 25.190476 | 71 | 0.640832 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 138 | 0.26087 |
0d8d86db0a3258002cf5655c5b82a27e00d11ef7 | 10,736 | py | Python | tests/weighted/test_pathcensus.py | sztal/pathcensus | 0246b1450e5d7fa0421e283f980c367100fcdd6a | [
"MIT"
] | null | null | null | tests/weighted/test_pathcensus.py | sztal/pathcensus | 0246b1450e5d7fa0421e283f980c367100fcdd6a | [
"MIT"
] | null | null | null | tests/weighted/test_pathcensus.py | sztal/pathcensus | 0246b1450e5d7fa0421e283f980c367100fcdd6a | [
"MIT"
] | null | null | null | """Test weighted path counting methods."""
# pylint: disable=redefined-outer-name,too-few-public-methods
# pylint: disable=too-many-branches
import pytest
from pytest import approx
import numpy as np
import pandas as pd
from pathcensus.definitions import PathDefinitionsWeighted
from pathcensus import PathCensus
@pytest.fixture(scope="session")
def paths_edges(random_graph):
"""Fixture for generating path census data frames
for edges and node/global counts based `random_graph` fixture.
"""
_, S = random_graph
E = S.census("edges")
return E, S
@pytest.fixture(scope="session")
def paths_edges_nodes(paths_edges):
"""Get edge and node path/cycle counts."""
E, S = paths_edges
return E, S.census("nodes")
@pytest.fixture(scope="session")
def paths_edges_global(paths_edges):
"""Get edge and global path/cycle counts."""
E, S = paths_edges
return E, S.census("global")
@pytest.fixture(scope="session")
def graph_weights_one(random_graph):
"""Pair of :py:class:`pathcensus.PathCensus` objects for weighted and
unweighted version of the same graph with all weights equal to ``1``.
"""
G, _ = random_graph
G.es["weight"] = np.ones((G.ecount(),))
P0 = PathCensus(G, weighted=False)
P1 = PathCensus(G, weighted=True)
return P0, P1
@pytest.fixture(scope="session")
def graph_weights_uniform(random_graph):
"""Pair of :py:class:`pathcensus.PathCensus` objects for weighted and
unweighted version of the same graph with all weights being uniform
but other than ``1``.
"""
G, _ = random_graph
G.es["weight"] = 3*np.ones((G.ecount(),))
P0 = PathCensus(G, weighted=False)
P1 = PathCensus(G, weighted=True)
return P0, P1
class TestPathCounting:
"""Tests of different path counting methods.
All main path counting methods are defined for overall graph counts,
node counts and node-pair (edge) counts. The below tests check whether
the results of all different counting methods are consistent in a sense
that they give the same answers after proper summing.
"""
class TestAggregationConsistency:
"""Tests of aggregation consistency between edge, node
and global counts.
"""
paths = PathDefinitionsWeighted().get_column_names()
@pytest.mark.parametrize("path", paths)
def test_edges_to_nodes(self, path, paths_edges_nodes):
"""Check consistency between edge and node counts
of paths and cycles.
"""
E, N = paths_edges_nodes
m0 = N[path].dropna()
m1 = E[path].groupby(level="i").sum() \
.reindex(N.index) \
.fillna(0)
arules = PathDefinitionsWeighted().aggregation.get("nodes", {})
m1 /= arules.get(path, 1)
assert np.allclose(m0, m1)
@pytest.mark.parametrize("path", paths)
def test_edges_to_global(self, path, paths_edges_global):
"""Check consistency between edge and global counts
of paths and cycles.
"""
E, G = paths_edges_global
m0 = G[path].iloc[0]
m1 = E[path].sum()
arules = PathDefinitionsWeighted().aggregation.get("global", {})
m1 /= arules.get(path, 1)
assert m0 == approx(m1)
class TestCountingAgainstOtherImplementations:
"""Test weighted path counting against mean weighted local
clustering coefficient as defined by Barrat et al.
and implemented in :py:mod:`igraph`.
In general, weighted `t`-clustering should be equal to
the method by Barrat et al.
"""
@pytest.mark.parametrize("undefined", ["nan", "zero"])
def test_mean_local_clustering(self, random_graph, undefined):
G, P = random_graph
c0 = G.transitivity_avglocal_undirected(weights="weight", mode=undefined)
c1 = P.tclust(undefined=undefined).mean(skipna=False)
assert np.isnan([c0, c1]).all() or c0 == approx(c1)
class TestConsistencyBounds:
"""Test consistency in terms of bounds between open
and closed paths. In particular, closed paths (e.g. triangles)
cannot be more frequent than their open counterparts.
Moreover, relational coefficients (similarity and complementarity)
must be bounded between their min/max of their corresponding
clustering and closure coefficients.
"""
@pytest.mark.parametrize("mode", ["edges", "nodes", "global"])
def test_path_counts_consistency(self, random_graph, mode):
_, P = random_graph
C = P.census(mode)
tol = 1e-6
assert (C.values >= 0).all()
assert (C["twc"] <= C["tw"] + tol).all()
assert (C["thc"] <= C["th"] + tol).all()
assert (C["q0wc"] <= C["qw"] + tol).all()
assert (C["q0hc"] <= C["qh"] + tol).all()
@pytest.mark.parametrize("mode", ["edges", "nodes", "global"])
def test_similarity_coefs_consistency(self, random_graph, mode):
_, P = random_graph
C = P.coefs(mode).dropna()
vals = C.values
assert (vals >= -1e-6).all() and (vals <= 1+1e-6).all()
if mode == "nodes":
m0 = C[["tclust", "tclosure"]].min(axis=1)
m1 = C[["tclust", "tclosure"]].max(axis=1)
assert (C["sim"].between(m0, m1)).all()
@pytest.mark.parametrize("mode", ["edges", "nodes", "global"])
def test_complementarity_coefs_consistency(self, random_graph, mode):
_, P = random_graph
C = P.coefs(mode).dropna()
vals = C.values
assert (vals >= -1e-6).all() and (vals <= 1+1e-6).all()
if mode == "nodes":
m0 = C[["qclust", "qclosure"]].min(axis=1)
m1 = C[["qclust", "qclosure"]].max(axis=1)
assert (C["comp"].between(m0, m1)).all()
class TestConsistencyWithUnweightedMethods:
"""Test whether weighted counts with uniform weights
are consistent with the unweighted counts etc.
"""
@staticmethod
def to_unweighted(df):
"""Combine weighted counts so they have the same columns
as unweighted counts.
"""
return pd.DataFrame({
"t": (df["twc"] + df["thc"]) / 2,
"tw": df["tw"],
"th": df["th"],
"q0": (df["q0wc"] + df["q0hc"]) / 2,
"qw": df["qw"],
"qh": df["qh"]
})
@pytest.mark.parametrize("mode", ["edges", "nodes", "global"])
def test_path_counts_consistency(self, graph_weights_one, mode):
"""Test consistency of path counts."""
P0, P1 = graph_weights_one
assert P1.weighted
p0 = P0.census(mode)
p1 = self.to_unweighted(P1.census(mode))
assert np.allclose(p0.values, p1.values)
@pytest.mark.parametrize("mode", ["edges", "nodes", "global"])
def test_coefs_consistency(self, graph_weights_uniform, mode):
"""Test consistency of coefficients."""
P0, P1 = graph_weights_uniform
assert P1.weighted
c0 = P0.coefs(mode, undefined="zero")
c1 = P1.coefs(mode, undefined="zero")
assert np.allclose(c0.values, c1.values)
class TestSimpleMotifs:
"""Test agreement with counts expected for simple motifs
such as triangle, quadrangle and star.
"""
simcoefs = ("sim_g", "sim", "tclust", "tclosure")
compcoefs = ("comp_g", "comp", "qclust", "qclosure")
def approx_in(self, obj, vals, allow_nan=False, **kwds):
"""Auxiliary method for approximate testing if
values in ``objs`` are in ``vals``.
"""
x = obj.values
l = np.zeros_like(x, dtype=bool)
for val in vals:
if allow_nan:
l |= np.isnan(x) | np.isclose(x, val, **kwds)
else:
l |= np.isclose(x, val, **kwds)
return l.all()
def approx_between(self, obj, lo, hi, allow_nan=False, tol=1e-6):
"""Auxiliary method for approximate testing if
valuesin ``obj`` are between ``lo`` and ``hi``.
"""
x = obj.values
l = np.isnan(x) if allow_nan else np.zeros_like(x, dtype=bool)
return (l | (x >= lo-tol) | (x <= hi+tol)).all()
@pytest.mark.parametrize("undefined", ["nan", "zero"])
def test_simple_motifs_global(self, simple_motif, undefined):
"""Check values of global structural coefficients
in simple motifs.
"""
motif, P = simple_motif
kwds = dict(undefined=undefined)
sim = P.simcoefs("global", **kwds)
comp = P.compcoefs("global", **kwds)
if motif == "triangle":
assert self.approx_in(sim, [1])
assert self.approx_in(comp, [0], allow_nan=True)
elif motif == "quadrangle":
assert self.approx_in(sim, [0])
assert self.approx_in(comp, [1])
@pytest.mark.parametrize("undefined", ["nan", "zero"])
def test_simple_motifs_nodes(self, simple_motif, undefined):
"""Check values of node-wise structural coefficients
in simple motifs.
"""
motif, P = simple_motif
kwds = dict(undefined=undefined)
sim = P.simcoefs("nodes", **kwds)
comp = P.compcoefs("nodes", **kwds)
if motif == "triangle":
assert self.approx_in(sim, [1])
assert self.approx_in(comp, [0], allow_nan=True)
elif motif == "quadrangle":
assert self.approx_in(sim, [0])
assert self.approx_in(comp, [1])
@pytest.mark.parametrize("undefined", ["nan", "zero"])
def test_simple_motifs_edges(self, simple_motif, undefined):
"""Check values of edge-wise structural coefficients
in simple motifs.
"""
motif, P = simple_motif
kwds = dict(undefined=undefined)
sim = P.similarity("edges", **kwds)
comp = P.complementarity("edges", **kwds)
if motif == "triangle":
assert self.approx_in(sim, [1])
assert self.approx_in(comp, [0], allow_nan=True)
elif motif == "quadrangle":
assert self.approx_in(sim, [0])
assert self.approx_in(comp, [1])
| 38.758123 | 85 | 0.57256 | 9,003 | 0.83858 | 0 | 0 | 7,654 | 0.712928 | 0 | 0 | 3,628 | 0.337928 |
0d8dedee8a01cdd537a2a3df042a8dfbd8ef5e42 | 5,781 | py | Python | menuparser.py | tomjaspers/vubresto-server | 96b441c76d8219505742eb5d08793bfb454ff9c8 | [
"MIT"
] | 2 | 2015-03-09T14:26:57.000Z | 2015-03-30T17:30:56.000Z | menuparser.py | tomjaspers/vubresto-server | 96b441c76d8219505742eb5d08793bfb454ff9c8 | [
"MIT"
] | null | null | null | menuparser.py | tomjaspers/vubresto-server | 96b441c76d8219505742eb5d08793bfb454ff9c8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python27
import io
import os
import json
import logging
import datetime
import requests
import lxml.html
from lxml.cssselect import CSSSelector
from multiprocessing.dummy import Pool as ThreadPool
# Path where the JSONs will get written. Permissions are your job.
SAVE_PATH = '.'
# Urls of the pages that will get parsed
URL_ETTERBEEK_NL = 'https://my.vub.ac.be/resto/etterbeek'
URL_ETTERBEEK_EN = 'https://my.vub.ac.be/restaurant/etterbeek'
URL_JETTE_NL = 'https://my.vub.ac.be/resto/jette'
URL_JETTE_EN = 'https://my.vub.ac.be/restaurant/jette'
# Mapping of colors for the menus.
DEFAULT_COLOR = '#f0eb93' # very light yellow
COLOR_MAPPING = {
'soep': '#fdb85b', # yellow
'soup': '#fdb85b', # yellow
'menu 1': '#68b6f3', # blue
'dag menu': '#68b6f3', # blue
'dagmenu': '#68b6f3', # blue
'health': '#ff9861', # orange
'menu 2': '#cc93d5', # purple
'meals of the world': '#cc93d5', # purple
'fairtrade': '#cc93d5', # purple
'fairtrade menu': '#cc93d5', # purple
'veggie': '#87b164', # green
'veggiedag': '#87b164', # green
'pasta': '#de694a', # red
'pasta bar': '#de694a', # red
'wok': '#6c4c42', # brown
}
# Months in Dutch, to allow the parsing of the (Dutch) site
MONTHS = ['januari', 'februari', 'maart', 'april', 'mei', 'juni', 'juli',
'augustus', 'september', 'oktober', 'november', 'december']
LOCAL_MONTHS = {month: i for i, month in enumerate(MONTHS, 1)}
def is_veggiedag_img(img):
return img and 'veggiedag' in img.get('src', '')
def normalize_text(text):
return text.replace(u'\xa0', u' ').strip()
def parse_restaurant(name, url):
data = []
# Construct CSS Selectors
sel_day_divs = CSSSelector('#content .views-row')
sel_date_span = CSSSelector('.date-display-single')
sel_tablerows = CSSSelector('table tr')
sel_img = CSSSelector('img')
# Request and build the DOM Tree
r = requests.get(url)
tree = lxml.html.fromstring(r.text)
# Apply selector to get divs representing 1 day
day_divs = sel_day_divs(tree)
for day_div in day_divs:
menus = []
# Apply selector to get date span (contains date string of day)
date_span = sel_date_span(day_div)
# date string should be format '29 september 2014', normally
date_string = normalize_text(date_span[0].text_content()).lower()
date_components = date_string.split()[1:]
month_name = normalize_text(date_components[1]).lower()
month = LOCAL_MONTHS.get(date_components[1], None)
if month:
date = datetime.date(int(date_components[2]), # year
month, # month
int(date_components[0])) # day
else:
# If we couldn't find a month, we try to use the previous date
logging.warning("{0} - Failed to get a month \
for the month_name {1} ".format(name, month_name))
try:
prev_date_components = map(int, data[-1]['date'].split('-'))
prev_date = datetime.date(prev_date_components[0], # year
prev_date_components[1], # month
prev_date_components[2]) # day
date = prev_date + datetime.timedelta(days=1)
except Exception:
# If we can't find any date, we'll skip the day
logging.exception("{0} - Couldn't derive date \
from previous dates".format(name))
continue
# Get the table rows
tablerows = sel_tablerows(day_div)
try:
for tr in tablerows:
tds = tr.getchildren()
menu_name = normalize_text(tds[0].text_content())
menu_dish = normalize_text(tds[1].text_content())
# Sometimes there is no menu name,
# but just an image (e.g., for "Veggiedag")
if not menu_name:
img = sel_img(tds[0])
img = img[0] if img else None
menu_name = 'Veggiedag' if is_veggiedag_img(img) else 'Menu'
menu_color = COLOR_MAPPING.get(menu_name.lower(), None)
if menu_color is None:
logging.warning(name + " - No color found for the menu: '" +
menu_name + "' (" + str(date) + ")")
menu_color = DEFAULT_COLOR
if menu_dish:
menus.append({'name': menu_name,
'dish': menu_dish,
'color': menu_color})
except:
# cba
pass
data.append({'date': str(date), 'menus': menus})
return data
def write_to_json(data, filename):
with io.open(os.path.join(SAVE_PATH, filename), 'w', encoding='utf8') as f:
f.write(unicode(json.dumps(data, ensure_ascii=False)))
def parse_and_save((name, url)):
try:
data = parse_restaurant(name, url)
except Exception:
logging.exception(name + " - Failed to parse")
data = []
try:
write_to_json(data, name.lower() + '.json')
except Exception:
logging.exception(name + " - Failed to save to json")
def main():
# Configure the logger
logging.basicConfig(filename='menuparser.log', level='WARNING')
# Parse and save the 2 restaurants
pool = ThreadPool(4)
pool.map(parse_and_save, [
('Etterbeek.nl', URL_ETTERBEEK_NL),
('Jette.nl', URL_JETTE_NL),
('Etterbeek.en', URL_ETTERBEEK_EN),
('Jette.en', URL_JETTE_EN),
])
if __name__ == "__main__":
main()
| 35.036364 | 80 | 0.572219 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,923 | 0.332641 |
0d8e965e118133f5b1bd1e62eb10836c7d536a8f | 1,715 | py | Python | server/recommendation_system.py | Igor-SeVeR/Recommendation-system-for-offering-related-products | faa719a86a0d811da63e7a6ac8e3db84510041e6 | [
"Apache-2.0"
] | null | null | null | server/recommendation_system.py | Igor-SeVeR/Recommendation-system-for-offering-related-products | faa719a86a0d811da63e7a6ac8e3db84510041e6 | [
"Apache-2.0"
] | null | null | null | server/recommendation_system.py | Igor-SeVeR/Recommendation-system-for-offering-related-products | faa719a86a0d811da63e7a6ac8e3db84510041e6 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from utils import check_integer_values
from gensim.models import KeyedVectors
from config import PATH_TO_SAVE_DATA, WORD2VEC_FILE_NAME, STOCKCODE_FILE_NAME
# =========== TECHNICAL FUNCTIONS ===========
def aggregate_vectors(model, products):
product_vec = []
for prod in products:
try:
product_vec.append(model[prod])
except KeyError:
continue
if not product_vec:
all_vects = model[model.wv.vocab]
i = np.random.randint(len(all_vects))
return all_vects[i]
res = np.mean(product_vec, axis=0)
return res
def similar_products(model, products, n=7):
products_dict = np.load(PATH_TO_SAVE_DATA + STOCKCODE_FILE_NAME,
allow_pickle='TRUE').item()
cart_vec = aggregate_vectors(model, products)
# получим наиболее похожие продукты для входного вектора
N = len(products) + n + 1
prods_sorted = model.similar_by_vector(cart_vec, topn=N)[1:]
res = []
count = 0
for prod in prods_sorted:
if count == n:
break
cur_id = prod[0]
cur_description = products_dict[prod[0]][0]
cur_confidence = prod[1]
if cur_id in products:
continue
new_out = (cur_id, cur_description, cur_confidence)
res.append(new_out)
count += 1
return res
# ============ MAIN PART ============
def get_recommendations(json):
# loading model
model = KeyedVectors.load(PATH_TO_SAVE_DATA + WORD2VEC_FILE_NAME, mmap='r')
products = json['product_list']
n = check_integer_values(n=json['n'])
res = similar_products(model, products, n=n)
return {'recommendation': res}
| 24.5 | 79 | 0.630904 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 243 | 0.137833 |
0d8f583a5022eb2ed6a681ddfd9fb9b1e0561dc7 | 5,531 | py | Python | src/wav2vec2/spec_augment.py | janlight/gsoc-wav2vec2 | 4d241553137ba0c3ac5acb4670c5653512b17854 | [
"Apache-2.0"
] | 40 | 2021-06-15T10:01:13.000Z | 2022-03-29T22:48:05.000Z | src/wav2vec2/spec_augment.py | janlight/gsoc-wav2vec2 | 4d241553137ba0c3ac5acb4670c5653512b17854 | [
"Apache-2.0"
] | 29 | 2021-06-15T12:26:26.000Z | 2022-01-11T21:20:27.000Z | src/wav2vec2/spec_augment.py | janlight/gsoc-wav2vec2 | 4d241553137ba0c3ac5acb4670c5653512b17854 | [
"Apache-2.0"
] | 14 | 2021-07-22T07:48:42.000Z | 2022-03-19T07:53:08.000Z | # following code is largly adapted from `here <https://github.com/huggingface/transformers/blob/f2c4ce7e339f4a2f8aaacb392496bc1a5743881f/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py#L206>__`
import tensorflow as tf
import numpy as np
def tf_multinomial_no_replacement(distribution, num_samples):
"""
Categorical sampling without replacement is currently not implemented. The gumbel-max trick will do for now - see
https://github.com/tensorflow/tensorflow/issues/9260 for more info
"""
# tf.random generators not working on XLA devices
random_numbers = np.random.uniform(0, 1, distribution.shape)
random_numbers = tf.constant(random_numbers, dtype=distribution.dtype)
z = -1 * tf.math.log(random_numbers)
_, indices = tf.nn.top_k(distribution + z, num_samples)
return indices
def _scatter_values_on_batch_indices(values, batch_indices, output_shape):
"""
Scatter function as in PyTorch with indices in format (batch_dim, indixes)
adapted from `here <https://github.com/huggingface/transformers/blob/2e5dbdf2db4599a6694d0974575a70f9bc3c978e/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py#L191>`
"""
indices_shape = batch_indices.shape
# broadcast batch dim to indices_shape
broadcasted_batch_dims = tf.reshape(
tf.broadcast_to(
tf.expand_dims(tf.range(indices_shape[0]), axis=-1), indices_shape
),
[1, -1],
)
# transform batch_indices to pair_indices
pair_indices = tf.transpose(
tf.concat([broadcasted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0)
)
# scatter values to pair indices
return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), output_shape)
def _compute_mask_indices(shape, mask_prob, mask_length, min_masks=2):
batch_size, seqlen = shape
if mask_length > seqlen:
raise ValueError(
f"`mask_length` ({mask_length}) must be smaller than `seq_length` ({seqlen})."
)
# how many spans to mask, this will get decided by `mask_prob`
num_mask_spans = int(mask_prob * (seqlen / mask_length) + np.random.rand(1))
num_mask_spans = max(num_mask_spans, min_masks)
# incase num_mask_spans goes over seq_length, we will have to reset them
# this can happen when we specify some big value to `min_masks`
if num_mask_spans * mask_length > seqlen:
num_mask_spans = seqlen // mask_length
# sample some indices randomly along the time axis
# we are giving same priority to all the tokens in a sample for now
distribution = tf.ones((batch_size, seqlen - (mask_length - 1)))
# now that distribution is specified, get some indices
# these indices will act as initial index for each mask span
mask_indices = tf_multinomial_no_replacement(distribution, num_mask_spans)
# some interesting code below!!!
# first, we will fill-up all the spans with same start indices
# then we will simply add offset to each of them for calculating actual value of indices
mask_indices = tf.broadcast_to(
mask_indices[:, :, None], (batch_size, num_mask_spans, mask_length)
)
mask_indices = tf.reshape(mask_indices, (batch_size, num_mask_spans * mask_length))
offsets = tf.broadcast_to(
tf.range(mask_length)[None, None, :], (batch_size, num_mask_spans, mask_length)
)
offsets = tf.reshape(offsets, (batch_size, num_mask_spans * mask_length))
mask_indices += offsets
# now we will put 1 at all the positions with masked_indices and will put 0 at remaining positions
# we will use `tf.scatter(...)` for gather those indices & update them
mask_indices = _scatter_values_on_batch_indices(
tf.ones_like(mask_indices), mask_indices, (batch_size, seqlen)
)
return mask_indices
def apply_spec_augmentation(features, masked_spec_augment, mask_prob, mask_length):
"""
This method apply spec-augmentation to the `hidden_states`
Args:
features (:obj: `tf.Tensor`) of shape (batch_size, seqlen, hidden_size):
hidden states which we want to mask.
masked_spec_augment (:obj: `tf.Tensor`) of shape (hidden_states,):
replace indices to be masked with these values.
mask_prob (:obj: `float`):
probability if certain token should be masked, this decides number of tokens to be masked.
mask_length (:obj: `int`):
span length of the tokens to be masked.
Return:
features (:obj: `tf.Tensor`) of shape (batch_size, seqlen, hidden_size):
hidden states masked at certain positions which are chosen randomly.
"""
# first find the indices to mask from the sequence
# choose mask such that we conserve the mask_length
mask_indices = _compute_mask_indices(
features.shape[:2], mask_prob, mask_length, min_masks=2
)
# since we are going to `tf.where(...)`, we need True at positions where we want to mask
# while False at indices which we don't want to change
mask_indices = tf.cast(mask_indices[:, :, None], tf.bool)
# It's important to keep dtype of masked_spec_augment & features same
# since we are going to accomodate both in a single tensor
masked_spec_augment = tf.cast(masked_spec_augment, features.dtype)[None, None, :]
# simply call `tf.where(...)`, and replace True positions (chosen randomly)
# with trainable weights (i.e. masked_spec_augment)
features = tf.where(mask_indices, masked_spec_augment, features)
return features
| 42.875969 | 202 | 0.711445 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,932 | 0.530103 |
0d8fe58183417cd3a2d532a22447986b76e404bf | 195 | py | Python | livereload/__init__.py | Fantomas42/django-livereload | 1170b6729667a6164e5e47776781b2a7f6b2c0d3 | [
"BSD-3-Clause"
] | 63 | 2015-01-02T03:07:50.000Z | 2022-01-06T13:53:07.000Z | livereload/__init__.py | Fantomas42/django-livereload | 1170b6729667a6164e5e47776781b2a7f6b2c0d3 | [
"BSD-3-Clause"
] | 12 | 2015-02-26T20:04:17.000Z | 2021-08-25T05:24:04.000Z | livereload/__init__.py | Fantomas42/django-livereload | 1170b6729667a6164e5e47776781b2a7f6b2c0d3 | [
"BSD-3-Clause"
] | 18 | 2015-02-24T22:23:51.000Z | 2017-01-22T16:00:25.000Z | """django-livereload"""
__version__ = '1.7'
__license__ = 'BSD License'
__author__ = 'Fantomas42'
__email__ = 'fantomas42@gmail.com'
__url__ = 'https://github.com/Fantomas42/django-livereload'
| 21.666667 | 59 | 0.738462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.635897 |
0d902f3628cad0645f5a28226144694853af64ef | 1,673 | py | Python | utils_data.py | vkola/peds2019 | 232ee1400e5220a8f928c1bab0bfbd9d20552308 | [
"MIT"
] | 13 | 2019-09-03T09:42:12.000Z | 2022-03-23T02:14:46.000Z | utils_data.py | vkola/peds2019 | 232ee1400e5220a8f928c1bab0bfbd9d20552308 | [
"MIT"
] | 5 | 2020-12-17T09:38:22.000Z | 2021-04-30T00:29:54.000Z | utils_data.py | vkola/peds2019 | 232ee1400e5220a8f928c1bab0bfbd9d20552308 | [
"MIT"
] | 7 | 2019-07-08T19:04:00.000Z | 2022-02-19T09:04:45.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 8 16:05:54 2019
@author: Chonghua Xue (Kolachalama's Lab, BU)
"""
from torch.utils.data import Dataset
# true if gapped else false
vocab_o = { True: ['-'] + ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y'],
False: ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']}
aa2id_o = { True: dict(zip(vocab_o[True], list(range(len(vocab_o[True]))))),
False: dict(zip(vocab_o[False], list(range(len(vocab_o[False])))))}
id2aa_o = { True: dict(zip(list(range(len(vocab_o[True]))), vocab_o[True])),
False: dict(zip(list(range(len(vocab_o[False]))), vocab_o[False]))}
vocab_i = { True: vocab_o[True] + ['<SOS>', '<EOS>'],
False: vocab_o[False] + ['<SOS>', '<EOS>']}
aa2id_i = { True: dict(zip(vocab_i[True], list(range(len(vocab_i[True]))))),
False: dict(zip(vocab_i[False], list(range(len(vocab_i[False])))))}
id2aa_i = { True: dict(zip(list(range(len(vocab_i[True]))), vocab_i[True])),
False: dict(zip(list(range(len(vocab_i[False]))), vocab_i[False]))}
class ProteinSeqDataset(Dataset):
def __init__(self, fn, gapped=True):
# load data
with open(fn, 'r') as f:
self.data = [l.strip('\n') for l in f]
# char to id
self.data = [[aa2id_i[gapped][c] for c in r] for r in self.data]
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def collate_fn(batch):
return batch, [x for seq in batch for x in seq] | 38.906977 | 127 | 0.548117 | 399 | 0.238494 | 0 | 0 | 0 | 0 | 0 | 0 | 343 | 0.205021 |
0d92dd46b5c6cda3a158d46142ec944eda28a213 | 7,986 | py | Python | fairmlhealth/__fairness_metrics.py | masino-lab/fairMLHealth | 943ffed5f57997401823bd2afc257f34f76ea157 | [
"MIT"
] | 19 | 2020-10-29T10:14:59.000Z | 2022-03-20T06:27:35.000Z | fairmlhealth/__fairness_metrics.py | masino-lab/fairMLHealth | 943ffed5f57997401823bd2afc257f34f76ea157 | [
"MIT"
] | 52 | 2020-10-14T19:21:27.000Z | 2021-09-15T19:01:52.000Z | fairmlhealth/__fairness_metrics.py | masino-lab/fairMLHealth | 943ffed5f57997401823bd2afc257f34f76ea157 | [
"MIT"
] | 9 | 2020-12-02T21:40:27.000Z | 2021-11-01T18:09:10.000Z | """ Custom Fairness Metrics
Note that ratio and difference computation is handled by AIF360's
sklearn.metrics module. As of the V 0.4.0 release, these are calculated as
[unprivileged/privileged] and [unprivileged - privileged], respectively
"""
from typing import Callable
from aif360.sklearn.metrics import difference, ratio
import numpy as np
import pandas as pd
from warnings import catch_warnings, filterwarnings
from .performance_metrics import (
false_positive_rate,
true_positive_rate,
true_negative_rate,
false_negative_rate,
precision,
)
def __manage_undefined_ratios(func: Callable):
""" Wraps ratio functions to return NaN values instead of 0.0 in cases
where the ratio is undefined
"""
def wrapper(*args, **kwargs):
funcname = getattr(func, "__name__", "an unknown function")
msg = (
"The ratio is ill-defined and being set to 0.0 because"
+ f" '{funcname}' for privileged samples is 0."
)
with catch_warnings(record=True) as w:
filterwarnings("ignore", message=msg)
res = func(*args, **kwargs)
if len(w) > 0:
return np.nan
else:
return res
return wrapper
@__manage_undefined_ratios
def ppv_ratio(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group ratio of Postive Predictive Values
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return ratio(precision, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp)
@__manage_undefined_ratios
def tpr_ratio(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group ratio of True Positive Rates
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return ratio(
true_positive_rate, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp
)
@__manage_undefined_ratios
def fpr_ratio(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group ratio of False Positive Rates
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return ratio(
false_positive_rate, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp
)
@__manage_undefined_ratios
def tnr_ratio(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group ratio of True Negative Rates
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return ratio(
true_negative_rate, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp
)
@__manage_undefined_ratios
def fnr_ratio(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group ratio of False Negative Rates
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return ratio(
false_negative_rate, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp
)
def ppv_diff(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group difference of Positive Predictive Values
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return difference(precision, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp)
def tpr_diff(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group difference of True Positive Rates
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return difference(
true_positive_rate, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp
)
def fpr_diff(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group difference of False Positive Rates
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return difference(
false_positive_rate, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp
)
def tnr_diff(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group difference of True Negative Rates
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return difference(
true_negative_rate, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp
)
def fnr_diff(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group difference of False Negative Rates
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return difference(
false_negative_rate, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp
)
""" Combined Metrics """
def eq_odds_diff(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the greatest discrepancy between the between-group FPR
difference and the between-group TPR difference
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
fprD = fpr_diff(y_true, y_pred, pa_name=pa_name, priv_grp=priv_grp)
tprD = tpr_diff(y_true, y_pred, pa_name=pa_name, priv_grp=priv_grp)
if abs(fprD) > abs(tprD):
return fprD
else:
return tprD
def eq_odds_ratio(
y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1
):
""" Returns the greatest discrepancy between the between-group FPR
ratio and the between-group TPR ratio
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
priv_grp (int, optional): . Defaults to 1.
"""
fprR = fpr_ratio(y_true, y_pred, pa_name=pa_name, priv_grp=priv_grp)
tprR = tpr_ratio(y_true, y_pred, pa_name=pa_name, priv_grp=priv_grp)
if np.isnan(fprR) or np.isnan(tprR):
return np.nan
elif round(abs(fprR - 1), 6) > round(abs(tprR - 1), 6):
return fprR
else:
return tprR
| 29.577778 | 88 | 0.660155 | 0 | 0 | 0 | 0 | 2,682 | 0.335838 | 0 | 0 | 4,388 | 0.549462 |
0d93c4592d73e829b821284fa1eb81e23780c084 | 6,815 | py | Python | tests/portstat/test_portstat.py | xwjiang2021/sonic-mgmt | 82c446b9fb016eb070af765aa9d9999e55b27342 | [
"Apache-2.0"
] | 2 | 2021-11-24T09:33:41.000Z | 2021-12-03T09:08:29.000Z | tests/portstat/test_portstat.py | xwjiang2021/sonic-mgmt | 82c446b9fb016eb070af765aa9d9999e55b27342 | [
"Apache-2.0"
] | null | null | null | tests/portstat/test_portstat.py | xwjiang2021/sonic-mgmt | 82c446b9fb016eb070af765aa9d9999e55b27342 | [
"Apache-2.0"
] | null | null | null |
import logging
import pytest
from tests.common.helpers.assertions import pytest_assert
from tests.common.portstat_utilities import parse_portstat
from tests.common.utilities import wait
logger = logging.getLogger('__name__')
pytestmark = [
pytest.mark.topology('any')
]
@pytest.fixture(scope='function', autouse=True)
def reset_portstat(duthosts, enum_rand_one_per_hwsku_frontend_hostname):
duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
logger.info('Clear out all tags')
duthost.command('portstat -D', become=True, module_ignore_errors=True)
yield
logger.info("Reset portstate ")
duthost.command('portstat -D', become=True, module_ignore_errors=True)
@pytest.mark.parametrize('command', ['portstat -c', 'portstat --clear'])
def test_portstat_clear(duthosts, enum_rand_one_per_hwsku_frontend_hostname, command):
duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
wait(30, 'Wait for DUT to receive/send some packets')
before_portstat = parse_portstat(duthost.command('portstat')['stdout_lines'])
pytest_assert(before_portstat, 'No parsed command output')
duthost.command(command)
wait(1, 'Wait for portstat counters to refresh')
after_portstat = parse_portstat(duthost.command('portstat')['stdout_lines'])
pytest_assert(after_portstat, 'No parsed command output')
"""
Assert only when rx/tx count is no smaller than COUNT_THRES because DUT may send or receive
some packets during test after port status are clear
"""
COUNT_THRES = 10
for intf in before_portstat:
tmp_ok_cnt = before_portstat[intf]['rx_ok'].replace(',','')
rx_ok_before = int(0 if tmp_ok_cnt == 'N/A' else tmp_ok_cnt)
tmp_ok_cnt = after_portstat[intf]['rx_ok'].replace(',','')
rx_ok_after = int(0 if tmp_ok_cnt == 'N/A' else tmp_ok_cnt)
tmp_ok_cnt = before_portstat[intf]['tx_ok'].replace(',','')
tx_ok_before = int(0 if tmp_ok_cnt == 'N/A' else tmp_ok_cnt)
tmp_ok_cnt = after_portstat[intf]['tx_ok'].replace(',','')
tx_ok_after = int(0 if tmp_ok_cnt == 'N/A' else tmp_ok_cnt)
if int(rx_ok_before >= COUNT_THRES):
pytest_assert(rx_ok_before >= rx_ok_after,
'Value of RX_OK after clear should be lesser')
if int(tx_ok_before >= COUNT_THRES):
pytest_assert(tx_ok_before >= tx_ok_after,
'Value of TX_OK after clear should be lesser')
@pytest.mark.parametrize('command', ['portstat -D', 'portstat --delete-all'])
def test_portstat_delete_all(duthosts, enum_rand_one_per_hwsku_frontend_hostname, command):
duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
stats_files = ('test_1', 'test_2', 'test_test')
logger.info('Create several test stats files')
for stats_file in stats_files:
duthost.command('portstat -c -t {}'.format(stats_file))
logger.info('Verify that the file names are in the /tmp directory')
uid = duthost.command('id -u')['stdout'].strip()
for stats_file in stats_files:
pytest_assert(duthost.stat(path='/tmp/portstat-{uid}/{uid}-{filename}'\
.format(uid=uid, filename=stats_file))['stat']['exists'])
logger.info('Run the command to be tested "{}"'.format(command))
duthost.command(command)
logger.info('Verify that the file names are not in the /tmp directory')
for stats_file in stats_files:
pytest_assert(not duthost.stat(path='/tmp/portstat-{uid}/{uid}-{filename}'\
.format(uid=uid, filename=stats_file))['stat']['exists'])
@pytest.mark.parametrize('command',
['portstat -d -t', 'portstat -d --tag', 'portstat --delete -t', 'portstat --delete --tag'])
def test_portstat_delete_tag(duthosts, enum_rand_one_per_hwsku_frontend_hostname, command):
duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
stats_files = ('test_1', 'test_2', 'test_delete_me')
file_to_delete = stats_files[2]
files_not_deleted = stats_files[:2]
logger.info('Create several test stats files')
for stats_file in stats_files:
duthost.command('portstat -c -t {}'.format(stats_file))
logger.info('Verify that the file names are in the /tmp directory')
uid = duthost.command('id -u')['stdout'].strip()
for stats_file in stats_files:
pytest_assert(duthost.stat(path='/tmp/portstat-{uid}/{uid}-{filename}'\
.format(uid=uid, filename=stats_file))['stat']['exists'])
full_delete_command = command + ' ' + file_to_delete
logger.info('Run the command to be tested "{}"'.format(full_delete_command))
duthost.command(full_delete_command)
logger.info('Verify that the deleted file name is not in the directory')
pytest_assert(not duthost.stat(path='/tmp/portstat-{uid}/{uid}-{filename}'\
.format(uid=uid, filename=file_to_delete))['stat']['exists'])
logger.info('Verify that the remaining file names are in the directory')
for stats_file in files_not_deleted:
pytest_assert(duthost.stat(path='/tmp/portstat-{uid}/{uid}-{filename}'\
.format(uid=uid, filename=stats_file))['stat']['exists'])
@pytest.mark.parametrize('command', ['portstat -a', 'portstat --all'])
def test_portstat_display_all(duthosts, enum_rand_one_per_hwsku_frontend_hostname, command):
duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
base_portstat = parse_portstat(duthost.command('portstat')['stdout_lines'])
all_portstats = parse_portstat(duthost.command(command)['stdout_lines'])
pytest_assert(base_portstat and all_portstats, 'No parsed command output')
logger.info('Verify the all number of columns is greater than the base number of columns')
for intf in all_portstats.keys():
pytest_assert(len(all_portstats[intf].keys()) > len(base_portstat[intf].keys()))
@pytest.mark.parametrize('command', ['portstat -p 1', 'portstat --period 1'])
def test_portstat_period(duthosts, enum_rand_one_per_hwsku_frontend_hostname, command):
duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
output = duthost.command(command)
pytest_assert('The rates are calculated within 1 seconds period' in output['stdout_lines'][0])
@pytest.mark.parametrize('command', ['portstat -h', 'portstat --help', 'portstat', 'portstat -v',
'portstat --version', 'portstat -j', 'portstat --json',
'portstat -r', 'portstat --raw'])
def test_portstat_no_exceptions(duthosts, enum_rand_one_per_hwsku_frontend_hostname, command):
duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
logger.info('Verify that the commands do not cause tracebacks')
duthost.command(command)
| 45.738255 | 116 | 0.699486 | 0 | 0 | 374 | 0.054879 | 6,517 | 0.956273 | 0 | 0 | 2,058 | 0.301981 |
0d94ba5e46412f0822ea3dc209d00e080804ba7d | 597 | py | Python | src/bxcommon/utils/crypto.py | dolphinridercrypto/bxcommon | 8f70557c1dbff785a5dd3fcdf91176066e085c3a | [
"MIT"
] | 12 | 2019-11-06T17:39:10.000Z | 2022-03-01T11:26:19.000Z | src/bxcommon/utils/crypto.py | dolphinridercrypto/bxcommon | 8f70557c1dbff785a5dd3fcdf91176066e085c3a | [
"MIT"
] | 8 | 2019-11-06T21:31:11.000Z | 2021-06-02T00:46:50.000Z | src/bxcommon/utils/crypto.py | dolphinridercrypto/bxcommon | 8f70557c1dbff785a5dd3fcdf91176066e085c3a | [
"MIT"
] | 5 | 2019-11-14T18:08:11.000Z | 2022-02-08T09:36:22.000Z | from hashlib import sha256
from nacl.secret import SecretBox
from nacl.utils import random
# Length of a SHA256 double hash
SHA256_HASH_LEN = 32
KEY_SIZE = SecretBox.KEY_SIZE
def bitcoin_hash(content):
return sha256(sha256(content).digest()).digest()
def double_sha256(content):
return sha256(sha256(content).digest()).digest()
def symmetric_encrypt(content, key=None):
if not key:
key = random(KEY_SIZE)
ciphertext = SecretBox(key).encrypt(content)
return key, ciphertext
def symmetric_decrypt(key, ciphertext):
return SecretBox(key).decrypt(ciphertext)
| 21.321429 | 52 | 0.745394 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.053601 |
0d966087cea0a7295db776cecd1d057649f3d673 | 829 | py | Python | src/PyMIPS/tests/memory_test.py | shenganzhang/Py-MI-PS | 2d22327c75bac1b58a4804a61e7a703ecc5ba978 | [
"MIT"
] | 3 | 2019-05-14T21:24:59.000Z | 2021-08-04T01:43:22.000Z | src/PyMIPS/tests/memory_test.py | shenganzhang/Py-MI-PS | 2d22327c75bac1b58a4804a61e7a703ecc5ba978 | [
"MIT"
] | null | null | null | src/PyMIPS/tests/memory_test.py | shenganzhang/Py-MI-PS | 2d22327c75bac1b58a4804a61e7a703ecc5ba978 | [
"MIT"
] | 2 | 2021-08-04T01:43:25.000Z | 2021-11-23T06:54:17.000Z | try:
from src.PyMIPS.Datastructure.memory import Memory
except:
from PyMIPS.Datastructure.memory import Memory
import unittest
class TestMemory(unittest.TestCase):
def test_storage(self):
Memory.store_word(16, 2214)
Memory.store_word(17, 2014)
self.assertEqual(Memory.get_word(2214), 16)
self.assertEqual(Memory.get_word(2014), 17)
def test_bad_access(self):
Memory.store_word(16, 2000)
Memory.get_word(2004)
Memory.get_word(2001)
Memory.get_word(2002)
Memory.get_word(2003)
def test_overwrite(self):
Memory.store_word(16, 2000)
self.assertEqual(Memory.get_word(2000), 16)
Memory.store_word(20, 2001)
self.assertEqual(Memory.get_word(2001), 20)
self.assertEqual(Memory.get_word(2000), 0)
| 25.121212 | 54 | 0.671894 | 690 | 0.832328 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0d97e6e62284344f9f71b3d49de7c9c09fe6e5dd | 4,122 | py | Python | experimentum/Storage/Migrations/Schema.py | PascalKleindienst/experimentum | 5d547e0e8135d4d7ffe42b0c8a57e70a7ac2af4b | [
"Apache-2.0"
] | null | null | null | experimentum/Storage/Migrations/Schema.py | PascalKleindienst/experimentum | 5d547e0e8135d4d7ffe42b0c8a57e70a7ac2af4b | [
"Apache-2.0"
] | 3 | 2019-04-17T08:07:12.000Z | 2019-04-28T15:24:18.000Z | experimentum/Storage/Migrations/Schema.py | PascalKleindienst/experimentum | 5d547e0e8135d4d7ffe42b0c8a57e70a7ac2af4b | [
"Apache-2.0"
] | null | null | null | """The :py:class:`.Schema` class provides a database agnostic way of manipulating tables.
Tables
======
Creating Tables
---------------
To create a new database table, the :py:meth:`~.Schema.create` method is used.
The :py:meth:`~.Schema.create` method accepts a table name as its argument and returns
a :py:class:`.Blueprint` instance that can be used to define the new table.
When creating the table, you may use any of the :py:class:`.Blueprint` column methods
to define the table's columns::
with self.schema.create('users') as table:
table.increments('id')
Checking Existence
------------------
To check if a table or column exist you can use the :py:meth:`~.Schema.has_table` or
:py:meth:`~.Schema.has_column` methods respectively::
if self.schema.has_table('users'):
# ...
if self.schema.has_column('users', 'email'):
# ...
Renaming / Dropping Tables
--------------------------
To rename an existing database table, use the :py:meth:`~.Schema.rename` method::
self.schema.rename('from', 'to')
To drop a table, you can use the :py:meth:`~.Schema.drop` or
:py:meth:`~.Schema.drop_if_exists` methods::
self.schema.drop('users')
self.schema.drop_if_exists('users')
"""
from experimentum.cli import print_failure
from contextlib import contextmanager
class Schema(object):
"""Database agnostic way of manipulating tables.
The :py:class:`.Schema` class was inspired by the Laravel Schema Builder
(https://laravel.com/docs/5.6/migrations#tables).
Attributes:
app (App): Main App Class.
store (AbstractStore): Data Store.
"""
def __init__(self, app):
"""Set app and store.
Args:
app (App): Main App Class.
"""
self.app = app
self.store = app.make('store')
@contextmanager
def create(self, name):
"""Create a new table blueprint.
Args:
name (str): Name of the table.
Yields:
Blueprint: New Instance of a table blueprint
"""
try:
blueprint = self.app.make('blueprint', name)
blueprint.create()
yield blueprint
except Exception as exc:
print_failure('Error while creating blueprint: ' + str(exc), 1)
self._build(blueprint)
@contextmanager
def table(self, name):
"""Create a blueprint for an existing table.
Args:
name (str): Name of the table
Yields:
Blueprint: New Instance of a table blueprint
"""
try:
blueprint = self.app.make('blueprint', name)
yield blueprint
except Exception as exc:
print_failure('Error while creating blueprint: ' + str(exc), 1)
self._build(blueprint)
def rename(self, old, new):
"""Rename a table.
Args:
old (str): Old table name
new (str): New table name
"""
self.store.rename(old, new)
def drop(self, name):
"""Drop a table.
Args:
name (str): Name of the table
"""
self.store.drop(name)
def drop_if_exists(self, name):
"""Drop a table if it exists.
Args:
name (str): Name of the table
"""
self.store.drop_if_exists(name)
def has_table(self, table):
"""Check if database has a specific table.
Args:
table (str): Table to check existance of
"""
return self.store.has_table(table)
def has_column(self, table, column):
"""Check if table has a specific column.
Args:
table (str): Table to check
column (str): Column to check
"""
return self.store.has_column(table, column)
def _build(self, blueprint):
"""Build Schema based on the blueprint.
Args:
blueprint (Blueprint): Blueprint to build.
"""
if blueprint.action == 'create':
self.store.create(blueprint)
elif blueprint.action == 'alter':
self.store.alter(blueprint)
| 26.423077 | 89 | 0.586608 | 2,808 | 0.681223 | 937 | 0.227317 | 977 | 0.237021 | 0 | 0 | 2,770 | 0.672004 |
0d982ecf9da92c8bdff12bac020ce7bfc1ac4155 | 6,024 | py | Python | custom_components/kostal/sensor.py | zittix/kostalpiko-sensor-homeassistant | 3fd3531985cede3764992d1f380bf77608c0e79a | [
"MIT"
] | null | null | null | custom_components/kostal/sensor.py | zittix/kostalpiko-sensor-homeassistant | 3fd3531985cede3764992d1f380bf77608c0e79a | [
"MIT"
] | null | null | null | custom_components/kostal/sensor.py | zittix/kostalpiko-sensor-homeassistant | 3fd3531985cede3764992d1f380bf77608c0e79a | [
"MIT"
] | 1 | 2021-09-17T09:29:22.000Z | 2021-09-17T09:29:22.000Z | """The Kostal piko integration."""
import logging
import xmltodict
from datetime import timedelta
from homeassistant.const import (
CONF_USERNAME,
CONF_PASSWORD,
CONF_HOST,
CONF_MONITORED_CONDITIONS,
)
from homeassistant.components.sensor import SensorEntity
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import SENSOR_TYPES, MIN_TIME_BETWEEN_UPDATES, DOMAIN
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=10)
async def async_setup_entry(hass, entry, async_add_entities):
"""Add an Kostal piko entry."""
# Add the needed sensors to hass
data = PikoData(entry.data[CONF_HOST], entry.data[CONF_USERNAME], entry.data[CONF_PASSWORD], hass)
await data.async_update()
entities = []
for sensor in entry.data[CONF_MONITORED_CONDITIONS]:
entities.append(PikoInverter(data, sensor, entry.title))
async_add_entities(entities)
class PikoInverter(SensorEntity):
"""Representation of a Piko inverter."""
def __init__(self, piko_data, sensor_type, name):
"""Initialize the sensor."""
self.entity_description = SENSOR_TYPES[sensor_type]
self._attr_name = f"{self.name}"
self._attr_unique_id = f"{piko_data.host}_{self.entity_description.key}"
self._name = name
self.type = sensor_type
self.piko = piko_data
self._state = None
self.serial_number = None
self.model = None
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def device_info(self):
"""Return information about the device."""
return {
"identifiers": {(DOMAIN, self.serial_number)},
"name": self._name,
"manufacturer": "Kostal",
"model": self.model,
}
async def async_update(self):
"""Update data."""
await self.piko.async_update()
self.serial_number = self.piko.info['sn']
self.model = self.piko.info['model']
if self.type == "solar_generator_power":
if "AC_Power" in self.piko.measurements:
self._state = self.piko.measurements['AC_Power']
else:
return "No value available"
elif self.type == "ac_voltage":
if "AC_Voltage" in self.piko.measurements:
self._state = self.piko.measurements['AC_Voltage']
else:
return "No value available"
elif self.type == "ac_current":
if "AC_Current" in self.piko.measurements:
self._state = self.piko.measurements['AC_Current']
else:
return "No value available"
elif self.type == "total_solar_power":
if "Produced_Total" in self.piko.yields:
self._state = self.piko.yields['Produced_Total']
else:
return "No value available"
class PikoData(Entity):
"""Representation of a Piko inverter."""
def __init__(self, host, username, password, hass):
"""Initialize the data object."""
self.host = host
self.hass = hass
self.info = {}
self.measurements = None
self.yields = None
self.session = async_get_clientsession(hass)
async def retrieve(self):
async with self.session.get(self.host + '/all.xml') as resp:
text = await resp.text()
if resp.status != 200:
_LOGGER.error("Error while fetching the data from kostal: %d %s", resp.status, text)
else:
obj = xmltodict.parse(text)
self.info['model'] = obj["root"]["Device"]["@Name"]
self.info['sn'] = obj["root"]["Device"]["@Serial"]
self.measurements = {}
self.yields = {}
for i in obj["root"]["Device"]["Measurements"]["Measurement"]:
if '@Value' in i and '@Type' in i:
self.measurements[i["@Type"]] = float(i["@Value"])
# <Measurement Value="241.4" Unit="V" Type="AC_Voltage"/>
# <Measurement Value="0.876" Unit="A" Type="AC_Current"/>
# <Measurement Value="206.7" Unit="W" Type="AC_Power"/>
# <Measurement Value="205.8" Unit="W" Type="AC_Power_fast"/>
# <Measurement Value="49.976" Unit="Hz" Type="AC_Frequency"/>
# <Measurement Value="267.9" Unit="V" Type="DC_Voltage"/>
# <Measurement Value="0.854" Unit="A" Type="DC_Current"/>
# <Measurement Value="357.2" Unit="V" Type="LINK_Voltage"/>
# <Measurement Unit="W" Type="GridPower"/>
# <Measurement Unit="W" Type="GridConsumedPower"/>
# <Measurement Unit="W" Type="GridInjectedPower"/>
# <Measurement Unit="W" Type="OwnConsumedPower"/>
# <Measurement Value="100.0" Unit="%" Type="Derating"/>
for i in obj["root"]["Device"]["Yields"]:
o = obj["root"]["Device"]["Yields"][i]
if '@Type' in o and '@Slot' in o:
self.yields[o["@Type"] + "_" + o["@Slot"]] = float(o["YieldValue"]["@Value"])
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Update inverter data."""
# pylint: disable=protected-access
await self.retrieve()
_LOGGER.debug(self.measurements)
_LOGGER.debug(self.yields)
_LOGGER.debug(self.info)
if __name__ == "__main__":
import sys
data = PikoData(sys.argv[1], None, None, None)
print(data.measurements)
print(data.yields)
print(data.info) | 39.116883 | 103 | 0.569555 | 4,777 | 0.792995 | 0 | 0 | 684 | 0.113546 | 3,851 | 0.639276 | 1,779 | 0.295319 |
0d98cd3509f57f05513feadf15808accb6f7add9 | 601 | py | Python | kaskopy/items.py | aspirin1988/KASKO | 924278f67a303a861e2367412bbc1dc3df59c742 | [
"MIT"
] | null | null | null | kaskopy/items.py | aspirin1988/KASKO | 924278f67a303a861e2367412bbc1dc3df59c742 | [
"MIT"
] | null | null | null | kaskopy/items.py | aspirin1988/KASKO | 924278f67a303a861e2367412bbc1dc3df59c742 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from kaskopy.models import Car, RawData
class CarItem(scrapy.Item):
brand = scrapy.Field()
model = scrapy.Field()
year = scrapy.Field()
price = scrapy.Field()
def save(self):
kwargs = {
'mark': self['brand'],
'model': self['model'],
'year': self['year']
}
car, created = Car.get_or_create(**kwargs)
RawData.create(price=self['price'], car=car)
| 23.115385 | 52 | 0.592346 | 392 | 0.652246 | 0 | 0 | 0 | 0 | 0 | 0 | 191 | 0.317804 |
0d991e73f8352c51439f70b45abdfecc3e0a81fe | 26 | py | Python | 6 - Python/Introduction/2 - Raw Input.py | Terence-Guan/Python.HackerRank | 165a5f0e739c7678dfac7eae95443018e2167c3d | [
"MIT"
] | 88 | 2016-10-23T16:41:14.000Z | 2019-12-30T23:51:47.000Z | HackerRank/6 - Python/Introduction/2 - Raw Input.py | natalie-o-perret/coding-challenges | 9a242e0ec54488f59be82592822b31ff51af1633 | [
"MIT"
] | 1 | 2018-10-13T14:31:54.000Z | 2018-10-13T14:31:54.000Z | HackerRank/6 - Python/Introduction/2 - Raw Input.py | natalie-o-perret/coding-challenges | 9a242e0ec54488f59be82592822b31ff51af1633 | [
"MIT"
] | 82 | 2017-02-01T17:02:56.000Z | 2020-02-01T11:45:58.000Z | line = input()
print(line) | 13 | 14 | 0.692308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0d9aee5955af9ee204ebd39cc82058caf26d7cbf | 2,466 | py | Python | GameLogServer/GameLogServer/log_reader.py | Frankity/IW4M-Admin | 515443c84a574944e946d3691a4682d916fd582e | [
"MIT"
] | null | null | null | GameLogServer/GameLogServer/log_reader.py | Frankity/IW4M-Admin | 515443c84a574944e946d3691a4682d916fd582e | [
"MIT"
] | null | null | null | GameLogServer/GameLogServer/log_reader.py | Frankity/IW4M-Admin | 515443c84a574944e946d3691a4682d916fd582e | [
"MIT"
] | null | null | null | import re
import os
import time
class LogReader(object):
def __init__(self):
self.log_file_sizes = {}
# (if the file changes more than this, ignore ) - 1 MB
self.max_file_size_change = 1000000
# (if the time between checks is greater, ignore ) - 5 minutes
self.max_file_time_change = 1000
def read_file(self, path):
# prevent traversing directories
if re.search('r^.+\.\.\\.+$', path):
return False
# must be a valid log path and log file
if not re.search(r'^.+[\\|\/](userraw|mods)[\\|\/].+.log$', path):
return False
# set the initialze size to the current file size
file_size = 0
if path not in self.log_file_sizes:
self.log_file_sizes[path] = {
'length' : self.file_length(path),
'read': time.time()
}
return ''
# grab the previous values
last_length = self.log_file_sizes[path]['length']
last_read = self.log_file_sizes[path]['read']
# the file is being tracked already
new_file_size = self.file_length(path)
# the log size was unable to be read (probably the wrong path)
if new_file_size < 0:
return False
now = time.time()
file_size_difference = new_file_size - last_length
time_difference = now - last_read
# update the new size and actually read the data
self.log_file_sizes[path] = {
'length': new_file_size,
'read': now
}
# if it's been too long since we read and the amount changed is too great, discard it
# todo: do we really want old events? maybe make this an "or"
if file_size_difference > self.max_file_size_change and time_difference > self.max_file_time_change:
return ''
new_log_info = self.get_file_lines(path, file_size_difference)
return new_log_info
def get_file_lines(self, path, length):
try:
file_handle = open(path, 'rb')
file_handle.seek(-length, 2)
file_data = file_handle.read(length)
file_handle.close()
return file_data.decode('utf-8')
except:
return False
def file_length(self, path):
try:
return os.stat(path).st_size
except:
return -1
reader = LogReader()
| 32.447368 | 108 | 0.576642 | 2,410 | 0.977291 | 0 | 0 | 0 | 0 | 0 | 0 | 667 | 0.270479 |
0d9ef0b73b31da3c8aa84b2d46d763a801003401 | 1,121 | py | Python | python/defaultdict-tutorial/main.py | shollingsworth/HackerRank | 2f0e048044e643d6aa9d07c1898f3b00adf489b0 | [
"Apache-2.0"
] | null | null | null | python/defaultdict-tutorial/main.py | shollingsworth/HackerRank | 2f0e048044e643d6aa9d07c1898f3b00adf489b0 | [
"Apache-2.0"
] | null | null | null | python/defaultdict-tutorial/main.py | shollingsworth/HackerRank | 2f0e048044e643d6aa9d07c1898f3b00adf489b0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import __future__
import sys
import json
def banner():
ban = '====' * 30
print("{}\nSAMPLE INP:\n{}\n{}".format(ban,ban,open(ip, 'r').read()))
print("{}\nSAMPLE OUT:\n{}\n{}".format(ban,ban,open(op, 'r').read()))
print("{}\nSTART:\n{}".format(ban,ban))
sys.stdin = open(ip, 'r')
cnt = -1
def comp(inp,ln):
outl = output_arr[ln]
if str(inp) != outl:
raise Exception("Error input output: line {}, file: {}\ngot: {} expected: {}".format(ln,op,inp,outl))
ip = "./challenge_sample_input"
op = "./challenge_sample_output"
ip = "./input01.txt"
op = "./output01.txt"
output_arr = map(str,open(op,'r').read().split('\n'))
banner()
# https://www.hackerrank.com/challenges/defaultdict-tutorial/problem
import sys
from collections import defaultdict
n,m = map(int,raw_input().split(' '))
A = [raw_input() for _ in range(n)]
B = [raw_input() for _ in range(m)]
track = defaultdict(list)
for idx,v in enumerate(A):
track[v].append(idx + 1)
for i in B:
if i not in A:
print(-1)
else:
print(" ".join(map(str,track[i])))
| 25.477273 | 109 | 0.609277 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 351 | 0.313113 |
0d9efb97f4ca82cd321aeee3291d3a4197c0e6af | 10,934 | py | Python | EpikCord/interactions.py | Conchbot-Development/EpikCord.py | d81f4075ee9621edec05c8ab0ccbb4a6c4ec46c6 | [
"MIT"
] | null | null | null | EpikCord/interactions.py | Conchbot-Development/EpikCord.py | d81f4075ee9621edec05c8ab0ccbb4a6c4ec46c6 | [
"MIT"
] | null | null | null | EpikCord/interactions.py | Conchbot-Development/EpikCord.py | d81f4075ee9621edec05c8ab0ccbb4a6c4ec46c6 | [
"MIT"
] | null | null | null | from .embed import Embed
from .file import Attachment
from .member import GuildMember, User
from .slash import SlashCommandOptionChoice, AnyOption
from .commands import MessageButton, MessageSelectMenu, MessageTextInput, MessageSelectMenuOption
from typing import Optional, List, Union
class BaseInteraction:
def __init__(self, client, data: dict):
self.id: str = data.get("id")
self.client = client
self.type: int = data.get("type")
self.application_id: int = data.get("application_id")
self.data: Optional[dict] = data.get("data")
self.guild_id: Optional[str] = data.get("guild_id")
self.channel_id: Optional[str] = data.get("channel_id")
self.member: Optional[GuildMember] = GuildMember(client, data.get("member")) if data.get("member") else None
self.user: Optional[User] = User(client, data.get("user")) if data.get("user") else None
self.token: str = data.get("token")
self.version: int = data.get("version")
self.locale: Optional[str] = data.get("locale")
self.guild_locale: Optional[str] = data.get("guild_locale")
def is_application_command(self):
return self.type == 2
def is_message_component(self):
return self.type == 3
def is_autocomplete(self):
return self.type == 4
def is_modal_submit(self):
return self.type == 5
async def reply(self, *, tts: bool = False, content: Optional[str] = None, embeds: Optional[List[Embed]] = None, allowed_mentions = None, flags: Optional[int] = None, components: Optional[List[Union[MessageButton, MessageSelectMenu, MessageTextInput]]] = None, attachments: Optional[List[Attachment]] = None) -> None:
message_data = {
"tts": tts
}
if content:
message_data["content"] = content
if embeds:
message_data["embeds"] = [embed.to_dict() for embed in embeds]
if allowed_mentions:
message_data["allowed_mentions"] = allowed_mentions.to_dict()
if flags:
message_data["flags"] = flags
if components:
message_data["components"] = [component.to_dict() for component in components]
if attachments:
message_data["attachments"] = [attachment.to_dict() for attachment in attachments]
payload = {
"type": 4,
"data": message_data
}
await self.client.http.post(f"/interactions/{self.id}/{self.token}/callback", json = payload)
async def defer(self):
payload = {
"type": 5
}
response = await self.client.http.post(f"/interactions/{self.id}/{self.token}/callback", json = payload)
return await response.json()
async def fetch_reply(self):
response = await self.client.http.get(f"/webhooks/{self.application_id}/{self.token}/messages/@original")
return await response.json()
async def edit_reply(self, *, tts: bool = False, content: Optional[str] = None, embeds: Optional[List[Embed]] = None, allowed_mentions = None, flags: Optional[int] = None, components: Optional[List[Union[MessageButton, MessageSelectMenu, MessageTextInput]]] = None, attachments: Optional[List[Attachment]] = None):
message_data = {
"tts": tts
}
if content:
message_data["content"] = content
if embeds:
message_data["embeds"] = [embed.to_dict() for embed in embeds]
if allowed_mentions:
message_data["allowed_mentions"] = allowed_mentions.to_dict()
if flags:
message_data["flags"] = flags
if components:
message_data["components"] = [component.to_dict() for component in components]
if attachments:
message_data["attachments"] = [attachment.to_dict() for attachment in attachments]
payload = {
'type' : 2,
'data' : message_data
}
response = await self.client.http.patch(f"/webhooks/{self.application_id}/{self.token}/messages/@original", json = payload)
return await response.json()
async def delete_reply(self):
response = await self.client.http.delete(f"/webhooks/{self.application_id}/{self.token}/messages/@original")
return await response.json()
async def followup(self, message_data: dict):
response = await self.client.http.post(f"/webhooks/{self.application_id}/{self.token}", data=message_data)
return await response.json()
async def fetch_followup_message(self, message_id: str):
response = await self.client.http.get(f"/webhooks/{self.application_id}/{self.token}/messages/{message_id}")
return await response.json()
async def edit_followup(self, message_id: str, message_data):
response = await self.client.http.patch(f"/webhooks/{self.application_id}/{self.token}/messages/{message_id}", data=message_data)
return await response.json()
async def delete_followup(self, message_id: str):
response = await self.client.http.delete(f"/webhooks/{self.application_id}/{self.token}/messages/{message_id}")
return await response.json()
class ModalSubmitInteraction(BaseInteraction):
def __init__(self, client, data: dict):
super().__init__(client, data)
self.components: List[Union[MessageButton, MessageSelectMenu, MessageTextInput]] = []
for component in data.get("components"):
if component.get("type") == 2:
self.components.append(MessageButton(component))
elif component.get("type") == 3:
self.components.append(MessageSelectMenu(component))
elif component.get("type") == 4:
self.components.append(MessageTextInput(component))
class ApplicationCommandOption:
def __init__(self, data: dict):
self.command_name: str = data.get("name")
self.command_type: int = data.get("type")
self.value: Optional[Union[str, int, float]] = data.get("value")
self.focused: Optional[bool] = data.get("focused")
class AutoCompleteInteraction(BaseInteraction):
def __init__(self, client, data: dict):
super().__init__(client, data)
self.options: List[ApplicationCommandOption] = [ApplicationCommandOption(option) for option in data.get("options", [])]
async def reply(self, choices: List[SlashCommandOptionChoice]) -> None:
payload = {
"type": 9,
"data": []
}
for choice in choices:
if not isinstance(choice, SlashCommandOptionChoice):
raise TypeError(f"{choice} must be of type SlashCommandOptionChoice")
payload["data"]["choices"].append(choice.to_dict())
await self.client.http.post(f"/interactions/{self.id}/{self.token}/callback", json = payload)
class MessageComponentInteraction(BaseInteraction):
def __init__(self, client, data: dict):
super().__init__(client, data)
self.custom_id: str = self.data.get("custom_id")
self.component_type: Optional[int] = self.data.get("component_type")
self.values: Optional[dict] = [MessageSelectMenuOption(option) for option in self.data.get("values", [])]
class ApplicationCommandSubcommandOption(ApplicationCommandOption):
def __init__(self, data: dict):
super().__init__(data)
self.options: List[ApplicationCommandOption] = [ApplicationCommandOption(option) for option in data.get("options", [])]
class ReceivedSlashCommandOption:
def __init__(self, option: dict):
self.name: str = option.get("name")
self.value: Optional[Union[str, int, float]] = option.get("value")
class ApplicationCommandOptionResolver:
def __init__(self, options: List[AnyOption]):
options = []
for option in options:
if not option.get("options"):
options.append(ReceivedSlashCommandOption(option))
else:
options.append(ApplicationCommandSubcommandOption(option))
self.options: Optional[List[AnyOption]] = options
def get_string_option(self, name: str) -> Optional[str]:
filter_object = filter(lambda option: option.name == name, self.options)
option = list(filter_object)
if bool(option):
return str(option[0].value)
# def get_subcommand_option(self, name: str) -> Optional[ApplicationCommandSubcommandOption]:
# filter_object = filter(lambda option: option.name == name, self.options)
# option = list(filter_object)
# if bool(option):
# return
# def get_subcommand_group_option(self, name: str) -> Optional[ApplicationCommandSubcommandOption]:
# return list(filter(lambda option: option.name == name, self.options))[0] if len(filter(lambda option: option.name == name, self.options)) else None
def get_int_option(self, name: str) -> Optional[int]:
return list(filter(lambda option: option.name == name, self.options))[0].value if len(filter(lambda option: option.name == name, self.options)) else None
def get_bool_option(self, name: str) -> Optional[bool]:
return list(filter(lambda option: option.name == name, self.options))[0].value if len(filter(lambda option: option.name == name, self.options)) else None
class ResolvedDataHandler:
def __init__(self, client, resolved_data: dict):
self.data: dict = resolved_data # In case we miss anything and people can just do it themselves
self.users: dict = [User(client, user) for user in self.data.get("users", [])]
self.members: dict = [GuildMember()]
self.roles: dict = self.data["roles"]
self.channels: dict = self.data["channels"]
class ApplicationCommandInteraction(BaseInteraction):
def __init__(self, client, data: dict):
super().__init__(client, data)
self.command_id: str = self.data.get("id")
self.command_name: str = self.data.get("name")
self.command_type: int = self.data.get("type")
self.resolved: ResolvedDataHandler(client, data.get("resolved", {}))
self.options = ApplicationCommandOptionResolver(self.data.get("options"))
class UserCommandInteraction(ApplicationCommandInteraction):
def __init__(self, client, data: dict):
super().__init__(client, data)
self.target_id: str = data.get("target_id")
class MessageCommandInteraction(UserCommandInteraction):
... # Literally the same thing.
class MessageInteraction:
def __init__(self, client, data: dict):
self.id: str = data.get("id")
self.type: int = data.get("type")
self.name: str = data.get("name")
self.user: User = User(client, data.get("user"))
self.member: Optional[GuildMember] = GuildMember(client, data.get("member")) if data.get("member") else None
self.user: User = User(client, data.get("user"))
| 44.628571 | 321 | 0.657856 | 10,621 | 0.971374 | 0 | 0 | 0 | 0 | 4,245 | 0.388239 | 1,802 | 0.164807 |
0da232f99858da8e8ab6976dc3b106393438ba8a | 3,571 | py | Python | satchmo/contact/views.py | sankroh/satchmo | e48df0c2a4be4ce14785d0a5d6dd1e516c57a838 | [
"BSD-3-Clause"
] | 1 | 2016-05-09T12:21:04.000Z | 2016-05-09T12:21:04.000Z | satchmo/contact/views.py | sankroh/satchmo | e48df0c2a4be4ce14785d0a5d6dd1e516c57a838 | [
"BSD-3-Clause"
] | null | null | null | satchmo/contact/views.py | sankroh/satchmo | e48df0c2a4be4ce14785d0a5d6dd1e516c57a838 | [
"BSD-3-Clause"
] | null | null | null | from django import http
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import login_required
from django.core import urlresolvers
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from satchmo.configuration import config_value, config_get_group, SettingNotSet
from satchmo.contact import signals, CUSTOMER_ID
from satchmo.contact.forms import ExtendedContactInfoForm
from satchmo.contact.models import Contact
from satchmo.shop.models import Config
import logging
log = logging.getLogger('satchmo.contact.views')
def view(request):
"""View contact info."""
try:
user_data = Contact.objects.get(user=request.user.id)
except Contact.DoesNotExist:
user_data = None
contact_dict = {
'user_data': user_data,
}
signals.satchmo_contact_view.send(user_data, contact=user_data, contact_dict=contact_dict)
context = RequestContext(request, contact_dict)
return render_to_response('contact/view_profile.html', context)
view = login_required(view)
def update(request):
"""Update contact info"""
init_data = {}
shop = Config.objects.get_current()
try:
contact = Contact.objects.from_request(request, create=False)
except Contact.DoesNotExist:
contact = None
if request.method == "POST":
new_data = request.POST.copy()
form = ExtendedContactInfoForm(new_data, shop=shop, contact=contact, shippable=True,
initial=init_data)
if form.is_valid():
if contact is None and request.user:
contact = Contact(user=request.user)
custID = form.save(contact=contact)
request.session[CUSTOMER_ID] = custID
redirect_to = request.REQUEST.get(REDIRECT_FIELD_NAME, '')
if not redirect_to or '//' in redirect_to or ' ' in redirect_to:
redirect_to = urlresolvers.reverse('satchmo_account_info')
return http.HttpResponseRedirect(redirect_to)
else:
signals.satchmo_contact_view.send(contact, contact=contact, contact_dict=init_data)
else:
if contact:
#If a person has their contact info, make sure we populate it in the form
for item in contact.__dict__.keys():
init_data[item] = getattr(contact,item)
if contact.shipping_address:
for item in contact.shipping_address.__dict__.keys():
init_data["ship_"+item] = getattr(contact.shipping_address,item)
if contact.billing_address:
for item in contact.billing_address.__dict__.keys():
init_data[item] = getattr(contact.billing_address,item)
if contact.primary_phone:
init_data['phone'] = contact.primary_phone.phone
signals.satchmo_contact_view.send(contact, contact=contact, contact_dict=init_data)
form = ExtendedContactInfoForm(shop=shop, contact=contact, shippable=True, initial=init_data)
init_data['form'] = form
if shop.in_country_only:
init_data['country'] = shop.sales_country
else:
countries = shop.countries()
if countries and countries.count() == 1:
init_data['country'] = countries[0]
context = RequestContext(request, init_data)
return render_to_response('contact/update_form.html', context)
update = login_required(update)
| 36.814433 | 101 | 0.679362 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 284 | 0.07953 |
0da2968bb359986875b9af1a418b84d7e635b431 | 480 | py | Python | slackbuilder/blocks.py | kbeauregard/slackbuilder | 866ec0ad737a2277bd043081dc08c6ebc182b2e3 | [
"MIT"
] | null | null | null | slackbuilder/blocks.py | kbeauregard/slackbuilder | 866ec0ad737a2277bd043081dc08c6ebc182b2e3 | [
"MIT"
] | null | null | null | slackbuilder/blocks.py | kbeauregard/slackbuilder | 866ec0ad737a2277bd043081dc08c6ebc182b2e3 | [
"MIT"
] | 1 | 2021-02-01T13:26:57.000Z | 2021-02-01T13:26:57.000Z | DEFAULT_TEXT_TYPE = "mrkdwn"
class BaseBlock:
def generate(self):
raise NotImplemented("Subclass missing generate implementation")
class TextBlock(BaseBlock):
def __init__(self, text, _type=DEFAULT_TEXT_TYPE):
self._text = text
self._type = _type
def __repr__(self):
return f"TextBlock({self._text}, _type={self._type})"
def generate(self):
return {"type": "section", "text": {"type": self._type, "text": self._text}}
| 25.263158 | 84 | 0.65625 | 445 | 0.927083 | 0 | 0 | 0 | 0 | 0 | 0 | 129 | 0.26875 |
0da30113756629b303663abfe1005ea53d21c7f3 | 551 | py | Python | lopy_gateway/config.py | haroal/choco-lora | d72cce0bb3e090463fafb993fb1f51db2c634416 | [
"MIT"
] | null | null | null | lopy_gateway/config.py | haroal/choco-lora | d72cce0bb3e090463fafb993fb1f51db2c634416 | [
"MIT"
] | null | null | null | lopy_gateway/config.py | haroal/choco-lora | d72cce0bb3e090463fafb993fb1f51db2c634416 | [
"MIT"
] | null | null | null | """ LoPy LoRaWAN Nano Gateway configuration options """
import machine
import ubinascii
WIFI_MAC = ubinascii.hexlify(machine.unique_id()).upper()
# Set the Gateway ID to be the first 3 bytes of MAC address + 'FFFE' + last 3 bytes of MAC address
GATEWAY_ID = '30aea4fffe4e5638' #WIFI_MAC[:6] + "FFFE" + WIFI_MAC[6:12]
SERVER = 'router.eu.thethings.network'
PORT = 1700
NTP = "pool.ntp.org"
NTP_PERIOD_S = 3600
WIFI_SSID = 'S9-Alexis'
WIFI_PASS = 'aeiouy95'
# for EU868
LORA_FREQUENCY = 868100000
LORA_GW_DR = "SF7BW125" # DR_5
LORA_NODE_DR = 5
| 23.956522 | 99 | 0.731397 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 302 | 0.548094 |
0da6aee5954b956052c8a6ffe1cd4bb993643d75 | 1,560 | py | Python | 1-python-basico (Logica de programacao)/desafio-validador-cpf/desafio-cpf.py | Leodf/projetos-python | 64e6262e6535d92624ad50148634d881608a7523 | [
"MIT"
] | null | null | null | 1-python-basico (Logica de programacao)/desafio-validador-cpf/desafio-cpf.py | Leodf/projetos-python | 64e6262e6535d92624ad50148634d881608a7523 | [
"MIT"
] | null | null | null | 1-python-basico (Logica de programacao)/desafio-validador-cpf/desafio-cpf.py | Leodf/projetos-python | 64e6262e6535d92624ad50148634d881608a7523 | [
"MIT"
] | null | null | null | """
CPF = 079.004.419-64
----------------------
0 * 10 = 0 # 0 * 11 = 0
7 * 9 = 63 # 7 * 10 = 70
9 * 8 = 72 # 9 * 9 = 81
0 * 7 = 0 # 0 * 8 = 0
0 * 6 = 0 # 0 * 7 = 0
4 * 5 = 20 # 4 * 6 = 24
4 * 4 = 16 # 4 * 5 = 20
1 * 3 = 3 # 1 * 4 = 4
9 * 2 = 18 # 9 * 3 = 27
# digito 1 * 2 = 12
soma = 192 # soma = 238
11 - (192 % 11) = 6 # 11 - (238 % 11) = 4
se soma > 9 == 0 # se soma > 9 == 0
se soma <= 9 == soma # se soma <= 9 == soma
Digito 1 = 6 # Digito 1 = 4
"""
# Input de dados e verificação dos numeros
print()
print('*'*50)
titulo = ' Validador de CPF '
print(f'{titulo:*^50}')
print('*'*50)
print()
while True:
cpf = input('Digite o seu CPF: ')
if not cpf.isnumeric():
print('Digite apenas os números do seu CPF sem ponto e hífen')
continue
elif not len(cpf) == 11:
print('Opa seu CPF está não está com 11 números')
continue
else:
print(cpf)
break
soma_1 = 0
i = 0
for n in range(10,1,-1):
soma_1 += int(cpf[i])*n
i += 1
calc_digito_1 = str (11 - (soma_1 % 11))
soma_2 = 0
j = 0
for n in range(11,1,-1):
soma_2 += int(cpf[j])*n
j += 1
calc_digito_2 = str (11 - (soma_2 % 11))
if calc_digito_1 == cpf[9] and calc_digito_2 == cpf[10]:
print('Seu CPF é válido no território nacional')
else:
print('Você está com um CPF não válido')
| 21.666667 | 70 | 0.442949 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 964 | 0.612063 |
0da6ec5d34b8304770452661b5f8b2646b4f55b0 | 1,414 | py | Python | HW6/mashavskih/2.py | kolyasalubov/Lv-677.PythonCore | c9f9107c734a61e398154a90b8a3e249276c2704 | [
"MIT"
] | null | null | null | HW6/mashavskih/2.py | kolyasalubov/Lv-677.PythonCore | c9f9107c734a61e398154a90b8a3e249276c2704 | [
"MIT"
] | null | null | null | HW6/mashavskih/2.py | kolyasalubov/Lv-677.PythonCore | c9f9107c734a61e398154a90b8a3e249276c2704 | [
"MIT"
] | 6 | 2022-02-22T22:30:49.000Z | 2022-03-28T12:51:19.000Z | import math
def rectangle(lenght_rectangle, breadth_rectangle):
area_rectangle = lenght_rectangle*breadth_rectangle
print(f'The area of rectangle is {area_rectangle}.')
def triangle(base_triangle, height_triangle):
area_triangle = 0.5 * base_triangle * height_triangle
print(f'The area of triangle is {area_triangle}.')
def circle(radius_squared):
area_circle = math.pi * math.pow(radius_squared, 2)
print(f'The area of circle is {area_circle}.')
def calc_area(choice):
if choice == 1:
lenght_rectangle = int(input('Enter lenght of rectangle:\n'))
breadth_rectangle = int(input('Enter breadth of rectangle:\n'))
return rectangle(lenght_rectangle, breadth_rectangle)
elif choice == 2:
base_triangle = int(input('Enter base of triangle:\n'))
height_triangle = int(input('Enter heighr of triangle:\n'))
return triangle(base_triangle, height_triangle)
elif choice == 3:
radius_squared = float(input('Enter radius squared of circle:\n'))
return circle(radius_squared)
else:
print('This number not available!')
if __name__ == "__main__" :
print('Calculate square of area shape:\n 1 -> Rectangle;\n 2 -> Triangle;\n 3 -> Circle')
choice= int(input('Enter the number of shape whose area you want to find: '))
calc_area(choice)
| 44.1875 | 93 | 0.662659 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 456 | 0.322489 |
0da7e379e2b733a5f37fb8dcb2fa4d62be4fbe82 | 1,204 | py | Python | examples/constraints.py | jbarberia/PFNET.py | a2f327d84401acc63fd4ce91c99086770ce72a6b | [
"BSD-2-Clause"
] | 3 | 2018-03-21T11:54:38.000Z | 2020-12-29T16:46:14.000Z | examples/constraints.py | jbarberia/PFNET.py | a2f327d84401acc63fd4ce91c99086770ce72a6b | [
"BSD-2-Clause"
] | 23 | 2018-03-29T00:42:06.000Z | 2021-01-05T19:15:05.000Z | examples/constraints.py | ttinoco/PFNET.py | 3a6845d132ddba6053fc84b1099597d99c0ac5e2 | [
"BSD-2-Clause"
] | 5 | 2018-10-01T19:05:11.000Z | 2020-05-27T06:19:11.000Z | #***************************************************#
# This file is part of PFNET. #
# #
# Copyright (c) 2015, Tomas Tinoco De Rubira. #
# #
# PFNET is released under the BSD 2-clause license. #
#***************************************************#
# Optimization Problems - Constraints
import sys
sys.path.append('.')
import pfnet
net = pfnet.Parser(sys.argv[1]).parse(sys.argv[1])
net.set_flags('bus',
'variable',
'any',
['voltage magnitude','voltage angle'])
print(net.num_vars == 2*net.num_buses)
constr = pfnet.Constraint('AC power balance',net)
print(constr.name == 'AC power balance')
x = net.get_var_values()
constr.analyze()
print(constr.num_extra_vars)
constr.eval(x + 0.01)
constr.eval(x)
import numpy as np
f = constr.f
print(type(f), f.shape)
print(np.linalg.norm(f,np.inf))
bus = net.get_bus(5)
Hi = constr.get_H_single(bus.dP_index)
print(type(Hi), Hi.shape, Hi.nnz)
coefficients = np.random.randn(f.size)
constr.combine_H(coefficients)
H = constr.H_combined
print(type(H), H.shape, H.nnz)
| 19.419355 | 53 | 0.538206 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 501 | 0.416113 |
0da93efc72f90b6e00eddc17e58f0b6bc35a1911 | 11,343 | py | Python | src/transbigdata/getbusdata.py | anitagraser/transbigdata | 0eb972c78f9154c0a3f780f197ef9af406b2bb71 | [
"BSD-3-Clause"
] | 1 | 2022-03-06T00:15:19.000Z | 2022-03-06T00:15:19.000Z | src/transbigdata/getbusdata.py | anitagraser/transbigdata | 0eb972c78f9154c0a3f780f197ef9af406b2bb71 | [
"BSD-3-Clause"
] | null | null | null | src/transbigdata/getbusdata.py | anitagraser/transbigdata | 0eb972c78f9154c0a3f780f197ef9af406b2bb71 | [
"BSD-3-Clause"
] | null | null | null | import pandas as pd
import numpy as np
import geopandas as gpd
from shapely.geometry import Polygon,LineString
import urllib.request
import json
from .CoordinatesConverter import gcj02towgs84,bd09towgs84,bd09mctobd09
from urllib import parse
def getadmin(keyword,ak,subdistricts = False):
'''
Input the keyword and the Amap ak. The output is the GIS file of the administrative boundary (Only in China)
Parameters
-------
keywords : str
The keyword. It might be the city name such as Shengzheng, or the administrative code such as 440500
ak : str
Amap accesstoken
subdistricts : bool
Whether to output the information of the administrative district boundary
Returns
-------
admin : GeoDataFrame
Administrative district
districts : DataFrame
The information of subdistricts. This can be used to further get the boundary of lower level districts
'''
#API url
url = 'https://restapi.amap.com/v3/config/district?'
#Condition
dict1 = {
'subdistrict':'3',
'showbiz':'false',
'extensions':'all',
'key':ak,
's':'rsv3',
'output':'json',
'level':'district',
'keywords':keyword,
'platform':'JS',
'logversion':'2.0',
'sdkversion':'1.4.10'
}
url_data = parse.urlencode(dict1)
url = url+url_data
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
webpage = response.read()
result = json.loads(webpage.decode('utf8','ignore'))
#Organize Data
datas = []
k = 0
polyline = result['districts'][k]['polyline']
polyline1 = polyline.split('|')
res = []
for polyline2 in polyline1:
polyline2 = polyline2.split(';')
p = []
for i in polyline2:
a,b = i.split(',')
p.append([float(a),float(b)])
x = pd.DataFrame(p)
x[0],x[1] = gcj02towgs84(x[0],x[1])
p = x.values
res.append(Polygon(p))
data = pd.DataFrame()
data1 = pd.DataFrame()
data1['geometry'] = res
data1 = gpd.GeoDataFrame(data1)
poly = data1.unary_union
data['geometry'] = [poly]
try:
data['citycode'] = result['districts'][k]['citycode']
except:
pass
try:
data['adcode'] = result['districts'][k]['adcode']
except:
pass
try:
data['name'] = result['districts'][k]['name']
except:
pass
try:
data['level'] = result['districts'][k]['level']
except:
pass
try:
data['center'] = result['districts'][k]['center']
except:
pass
datas.append(data)
datas = pd.concat(datas)
admin = gpd.GeoDataFrame(datas)
if subdistricts:
districts = result['districts'][k]['districts']
districts = pd.DataFrame(districts)
return admin,districts
else:
return admin
def getbusdata(city,keywords):
'''
Obtain the geographic information of the bus station and bus line from the map service (Only in China)
Parameters
-------
city : str
city name
keywords : List
Keyword, the line name
Returns
-------
data : GeoDataFrame
The generated bus line
stop : GeoDataFrame
The generated bus station
'''
def getlineuid(keyword,c):
url = 'http://map.baidu.com/?qt=s&wd='+urllib.parse.quote(keyword)+'&c='+c
response1 = urllib.request.urlopen(url)
searchinfo=json.loads(response1.read().decode('utf8'))
if searchinfo['content'][0]['catalogID'] ==904 or searchinfo['content'][0]['catalogID'] ==905:
try:
uidlist = list(pd.DataFrame(searchinfo['content'][8]['blinfo'])['uid'])
except:
uidlist = []
uidlist.append(searchinfo['content'][0]['uid'])
uidlist.append(searchinfo['content'][1]['uid'])
return list(set(uidlist))
else:
return []
def getcitycode(c):
url = 'http://map.baidu.com/?qt=s&wd='+urllib.parse.quote(c)
response1 = urllib.request.urlopen(url,timeout = 60)
searchinfo=json.loads(response1.read().decode('utf8'))
return str(searchinfo['content']['code'])
def getlinegeo(uid,c):
url = 'http://map.baidu.com/?qt=bsl&uid='+uid+'&c='+c
response = urllib.request.urlopen(url,timeout = 60)
searchinfo=json.loads(response.read().decode('utf8'))
linename = searchinfo['content'][0]['name']
stations = searchinfo['content'][0]['stations']
geo = searchinfo['content'][0]['geo'].split('|')[2][:-1].split(',')
stationgeo = []
stationnames = []
for station in stations:
stationname = station['name']
coo = station['geo'].split(';')[1].split('|')[0]
stationnames.append(stationname)
stationgeo.append(coo)
coo=[]
t=0
cood = ''
for each in geo:
t += 1
cood += each + ','
if t == 2:
t=0
coo.append(cood[:-1])
cood = ''
def coodconvert(coo):
coo = pd.DataFrame(list(pd.DataFrame(coo)[0].str.split(','))).astype(float)
coo[0],coo[1] = bd09mctobd09(coo[0],coo[1])
return list(coo[0].astype(str)+','+coo[1].astype(str))
return linename,coodconvert(coo),stationnames,coodconvert(stationgeo)
print('Obtaining city id:',city,end = '')
linenames = []
lines = []
c = getcitycode(city)
print('success')
stop = []
uids = []
for keyword in keywords:
print(keyword)
for uid in getlineuid(keyword,c):
if uid not in uids:
try:
linename,coo,stationnames,stationgeo = getlinegeo(uid,c)
coo = pd.DataFrame(list(pd.DataFrame(coo)[0].str.split(',')))
coo[0],coo[1] = bd09towgs84(coo[0],coo[1])
line = LineString(coo.values)
linenames.append(linename)
lines.append(line)
stops = pd.DataFrame({'stationnames':stationnames})
stops['linename']=linename
stops['geo'] = stationgeo
stops['lon'] = stops['geo'].apply(lambda row:row.split(',')[0])
stops['lat'] = stops['geo'].apply(lambda row:row.split(',')[1])
stop.append(stops)
print(linename+' success')
uids.append(uid)
except:
pass
data = gpd.GeoDataFrame()
data['linename'] = linenames
data['geometry'] = lines
data['city'] = city
stop = pd.concat(stop)
stop['lon'],stop['lat'] = bd09towgs84(stop['lon'],stop['lat'])
stop['geometry'] = gpd.points_from_xy(stop['lon'],stop['lat'])
stop = stop.drop('geo',axis = 1)
stop = gpd.GeoDataFrame(stop)
data['line'] = data['linename'].str.split('(').apply(lambda r:r[0])
stop['line'] = stop['linename'].str.split('(').apply(lambda r:r[0])
stop['id'] = range(len(stop))
stop['id'] = stop.groupby('linename')['id'].rank()
data = data.drop_duplicates(subset = ['linename'])
stop = stop.drop_duplicates(subset = ['linename','stationnames'])
return data,stop
def split_subwayline(line,stop):
'''
To slice the metro line with metro stations to obtain metro section information (This step is useful in subway passenger flow visualization)
Parameters
-------
line : GeoDataFrame
Bus/metro lines
stop : GeoDataFrame
Bus/metro stations
Returns
-------
metro_line_splited : GeoDataFrame
Generated section line shape
'''
def getline(r2,line_geometry):
ls = []
if r2['o_project']<=r2['d_project']:
tmp1 = np.linspace(r2['o_project'],r2['d_project'],10)
if r2['o_project']>r2['d_project']:
tmp1 = np.linspace(r2['o_project']-line_geometry.length,r2['d_project'],10)
tmp1[tmp1<0] = tmp1[tmp1<0]+line_geometry.length
for j in tmp1:
ls.append(line_geometry.interpolate(j))
return LineString(ls)
lss = []
for k in range(len(line)):
r = line.iloc[k]
line_geometry = r['geometry']
tmp = stop[stop['linename'] == r['linename']].copy()
for i in tmp.columns:
tmp[i+'1'] = tmp[i].shift(-1)
tmp = tmp.iloc[:-1]
tmp = tmp[['stationnames','stationnames1','geometry','geometry1','linename']]
tmp['o_project'] = tmp['geometry'].apply(lambda r1:r['geometry'].project(r1))
tmp['d_project'] = tmp['geometry1'].apply(lambda r1:r['geometry'].project(r1))
tmp['geometry'] = tmp.apply(lambda r2:getline(r2,line_geometry),axis = 1)
lss.append(tmp)
metro_line_splited = pd.concat(lss).drop('geometry1',axis = 1)
return metro_line_splited
def metro_network(stop,traveltime = 3,transfertime = 5,nxgraph = True):
'''
Inputting the metro station data and outputting the network topology model. The graph generated relies on NetworkX.
Parameters
-------
stop : GeoDataFrame
Bus/metro stations
traveltime : number
Travel time per section
transfertime : number
Travel time per transfer
nxgraph : bool
Default True, if True then output the network G constructed by NetworkX, if False then output the edges1(line section),edge2(station transfer), and the node of the network
Returns
-------
G : networkx.classes.graph.Graph
Network G built by networkx. Output when the nxgraph parameter is True
edge1 : DataFrame
Network edge for line section. Output when the nxgraph parameter is False
edge2 : DataFrame
Network edge for transfering. Output when the nxgraph parameter is False
node : List
Network nodes. Output when the nxgraph parameter is False
'''
linestop = stop.copy()
for i in linestop.columns:
linestop[i+'1'] = linestop[i].shift(-1)
linestop = linestop[linestop['linename'] == linestop['linename1']].copy()
linestop = linestop.rename(columns = {'stationnames':'ostop','stationnames1':'dstop'})
linestop['ostation'] = linestop['line']+linestop['ostop']
linestop['dstation'] = linestop['line']+linestop['dstop']
edge1 = linestop[['ostation','dstation']].copy()
edge1['duration'] = traveltime
linestop = stop.copy()
linestop['station'] = linestop['line'] + linestop['stationnames']
tmp = linestop.groupby(['stationnames'])['linename'].count().rename('count').reset_index()
tmp = pd.merge(linestop,tmp[tmp['count']>2]['stationnames'],on = 'stationnames')
tmp = tmp[['stationnames','line','station']].drop_duplicates()
tmp = pd.merge(tmp,tmp,on ='stationnames')
edge2 =tmp[tmp['line_x'] != tmp['line_y']][['station_x','station_y']]
edge2['duration'] = transfertime
edge2.columns = edge1.columns
edge = edge1.append(edge2)
node = list(edge['ostation'].drop_duplicates())
if nxgraph:
import networkx as nx
G = nx.Graph()
G.add_nodes_from(node)
G.add_weighted_edges_from(edge.values)
return G
else:
return edge1,edge2,node
| 35.782334 | 179 | 0.587234 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,988 | 0.351582 |
0da9e74b8a89d0e0f298b79e792dfb00d17cb34f | 83 | py | Python | user/apps.py | salimking/movepass | e03fa519f41974f25a70d23179722654b292ef11 | [
"MIT"
] | null | null | null | user/apps.py | salimking/movepass | e03fa519f41974f25a70d23179722654b292ef11 | [
"MIT"
] | null | null | null | user/apps.py | salimking/movepass | e03fa519f41974f25a70d23179722654b292ef11 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class userConfig(AppConfig):
name = 'user'
| 13.833333 | 33 | 0.73494 | 46 | 0.554217 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.072289 |
0dac47ce9c74512652d2d18d61755c2ace5c01fc | 819 | py | Python | banner4.py | KingNasirul/BHBVirus | 219c28ace488e9e47d94f82911c86a1d5974f941 | [
"Unlicense"
] | null | null | null | banner4.py | KingNasirul/BHBVirus | 219c28ace488e9e47d94f82911c86a1d5974f941 | [
"Unlicense"
] | null | null | null | banner4.py | KingNasirul/BHBVirus | 219c28ace488e9e47d94f82911c86a1d5974f941 | [
"Unlicense"
] | null | null | null |
import time
import sys
# Set color
R = '\033[31m' # Red
N = '\033[1;37m' # White
G = '\033[32m' # Green
O = '\033[0;33m' # Orange
B = '\033[1;34m' #Blue
def delay_print(s):
for c in s:
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(0.01)
delay_print
delay_print (""+R+" db db d888888b d8888b. db db .d8888. \n")
delay_print (""+R+" 88 88 `88' 88 `8D 88 88 88' YP \n")
delay_print (""+R+" Y8 8P 88 88oobY' 88 88 `8bo. \n")
delay_print (""+R+" `8b d8' 88 88`8b 88 88 `Y8b. \n")
delay_print (""+R+" `8bd8' .88. 88 `88. 88b d88 db 8D \n")
delay_print (""+R+" YP Y888888P 88 YD ~Y8888P' `8888Y' "+G+"kingNasirul\n")
print
| 32.76 | 82 | 0.467643 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 411 | 0.501832 |
0dadea50da63c33394e8442bed79c49670d3fd33 | 1,864 | py | Python | flod_facilities_backend/app.py | Trondheim-kommune/Bookingbasen | 58235a5a1fd6ad291cb237e6ec9a67bfe8c463c6 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2017-10-17T12:15:28.000Z | 2017-10-17T12:15:28.000Z | flod_facilities_backend/app.py | Trondheim-kommune/Bookingbasen | 58235a5a1fd6ad291cb237e6ec9a67bfe8c463c6 | [
"BSD-2-Clause-FreeBSD"
] | 6 | 2021-03-22T17:15:52.000Z | 2022-01-13T00:39:58.000Z | flod_facilities_backend/app.py | Trondheim-kommune/Bookingbasen | 58235a5a1fd6ad291cb237e6ec9a67bfe8c463c6 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2019-09-09T13:35:03.000Z | 2019-09-09T13:35:03.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from logging import StreamHandler, INFO
from flask import Flask
from flask.ext.mail import Message, Mail
from api import create_api
from database import init_db
API_VERSION = "v1"
def check_environment(app):
file_backend = os.environ.get('FILE_BACKEND', 'file').lower()
if 'FILE_BACKEND' not in os.environ:
app.logger.info('FILE_BACKEND is not set, will default to "%s"',
file_backend)
if file_backend == 's3':
if 'S3_BUCKET' not in os.environ:
app.logger.warn('S3_BUCKET is not set, will default to "flod"')
if 'AWS_ACCESS_KEY_ID' not in os.environ:
raise EnvironmentError(('AWS_ACCESS_KEY_ID must be set for S3 '
'backend'))
if 'AWS_SECRET_ACCESS_KEY' not in os.environ:
raise EnvironmentError(('AWS_SECRET_ACCESS_KEY must be set for'
' S3 backend'))
if file_backend == 'file' and 'UPLOAD_PATH' not in os.environ:
app.logger.info('UPLOAD_PATH is not set, will default to /tmp')
if 'AUTH_TOKEN_SECRET' not in os.environ:
raise EnvironmentError('AUTH_TOKEN_SECRET must be set')
def create_app(db_url):
app = Flask(__name__)
(app.db_session, app.db_metadata, app.db_engine) = init_db(db_url)
@app.teardown_request
def shutdown_session(exception=None):
app.db_session.remove()
create_api(app, API_VERSION)
if not app.debug:
stream_handler = StreamHandler()
app.logger.addHandler(stream_handler)
app.logger.setLevel(INFO)
check_environment(app)
return app
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5000))
app = create_app(os.environ.get('DATABASE_URL'))
app.run(host='0.0.0.0', port=port, debug=True)
| 31.066667 | 75 | 0.647532 | 0 | 0 | 0 | 0 | 95 | 0.050966 | 0 | 0 | 486 | 0.26073 |
0dae67493e4ce5dba398f2d0ee4a34110cbc91fc | 1,463 | py | Python | aioneo4j4/client.py | zhangmoon/aioneo4j4 | 94b4544d2764eba5eea740959f194e289a581fd2 | [
"MIT"
] | null | null | null | aioneo4j4/client.py | zhangmoon/aioneo4j4 | 94b4544d2764eba5eea740959f194e289a581fd2 | [
"MIT"
] | null | null | null | aioneo4j4/client.py | zhangmoon/aioneo4j4 | 94b4544d2764eba5eea740959f194e289a581fd2 | [
"MIT"
] | null | null | null | import asyncio
import collections
from yarl import URL
from .transport import Transport
class Client:
def __init__(
self,
url='http://127.0.0.1:7474/',
auth=None,
transport=Transport,
request_timeout=...,
*, loop=None
):
if loop is None:
loop = asyncio.get_event_loop()
self.loop = loop
url = URL(url)
if url.user and url.password:
auth = url.user, url.password
url = url.with_user(None)
# TODO: not sure is it needed
url = url.with_password(None)
self.transport = transport(
url=url,
auth=auth,
request_timeout=request_timeout,
loop=self.loop,
)
async def begin_and_commit(
self,
cypher,
db='neo4j',
path='db/%s/tx/commit',
request_timeout=...,
):
_, data = await self.transport.perform_request(
method='POST',
path=path % db,
data={
"statements": [{
"statement": cypher,
}]
},
request_timeout=request_timeout,
)
return data
async def close(self):
await self.transport.close()
async def __aenter__(self): # noqa
return self
async def __aexit__(self, *exc_info): # noqa
await self.close()
| 20.041096 | 55 | 0.500342 | 1,370 | 0.936432 | 0 | 0 | 0 | 0 | 666 | 0.455229 | 118 | 0.080656 |
0dae8fa388ad674d62363a2f970444080cf8a34c | 125 | py | Python | a.5.4.py | AmanMishra148/python-repo | 5b07fe19f2058fc2c909b96ae173f4346ac8d3da | [
"bzip2-1.0.6"
] | null | null | null | a.5.4.py | AmanMishra148/python-repo | 5b07fe19f2058fc2c909b96ae173f4346ac8d3da | [
"bzip2-1.0.6"
] | 1 | 2021-10-18T09:59:45.000Z | 2021-10-18T09:59:45.000Z | a.5.4.py | AmanMishra148/python-repo | 5b07fe19f2058fc2c909b96ae173f4346ac8d3da | [
"bzip2-1.0.6"
] | 4 | 2021-10-18T09:40:54.000Z | 2021-10-19T14:14:28.000Z | #Script to calc. area of circle.
print("enter the radius")
r= float(input())
area= 3.14*r**2
print("area of circle is",area)
| 20.833333 | 32 | 0.688 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 69 | 0.552 |
0db1ad1545f9291e0a6a356a3b76f6cf63cf27eb | 2,132 | py | Python | examples/jet_substructure/syn.py | juliovicenzi/logicnets | ce4ca89e3b11702bade591c320177f17b7d8d187 | [
"Apache-2.0"
] | null | null | null | examples/jet_substructure/syn.py | juliovicenzi/logicnets | ce4ca89e3b11702bade591c320177f17b7d8d187 | [
"Apache-2.0"
] | null | null | null | examples/jet_substructure/syn.py | juliovicenzi/logicnets | ce4ca89e3b11702bade591c320177f17b7d8d187 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2021 Xilinx, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from argparse import ArgumentParser
import torch
from torch.utils.data import DataLoader
from logicnets.nn import generate_truth_tables, \
lut_inference, \
module_list_to_verilog_module
from train import configs, model_config, dataset_config, test
from dataset import JetSubstructureDataset
from models import JetSubstructureNeqModel, JetSubstructureLutModel
from logicnets.synthesis import synthesize_and_get_resource_counts
from logicnets.util import proc_postsynth_file
if __name__ == "__main__":
parser = ArgumentParser(description="Synthesize convert a PyTorch trained model into verilog")
parser.add_argument('--fpga-part', type=str, default="xcu280-fsvh2892-2L-e",
help="FPGA synthesis part (default: %(default)s)")
parser.add_argument('--clock-period', type=float, default=1.0,
help="Target clock frequency to use during Vivado synthesis (default: %(default)s)")
parser.add_argument('--log-dir', type=str, default='./log',
help="A location to store the log output of the training run and the output model (default: %(default)s)")
args = parser.parse_args()
if not os.path.exists(args.log_dir):
print(f"Could not find log directory {args.log_dir}")
exit(-1)
print("Running out-of-context synthesis")
ret = synthesize_and_get_resource_counts(
args.log_dir,
"logicnet",
fpga_part=args.fpga_part,
clk_period_ns=args.clock_period,
post_synthesis=1)
| 39.481481 | 130 | 0.720919 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,024 | 0.4803 |
0db4cdbd4df856ae4ab6fd19cf45e6fa8d7272ae | 348 | py | Python | Test.py | ashmoreinc/IP-Scanner | 55bbf45b43c4f8dd4d15066c0bebf2dbf8ed689e | [
"MIT"
] | 4 | 2018-11-08T15:37:18.000Z | 2022-03-18T01:59:20.000Z | Test.py | ashmoreinc/IP-Scanner | 55bbf45b43c4f8dd4d15066c0bebf2dbf8ed689e | [
"MIT"
] | null | null | null | Test.py | ashmoreinc/IP-Scanner | 55bbf45b43c4f8dd4d15066c0bebf2dbf8ed689e | [
"MIT"
] | 1 | 2020-12-11T13:35:43.000Z | 2020-12-11T13:35:43.000Z | from Scanner import *
from threading import Thread
Scanner = Scan_Handler(verbose=False, verbosity="high", threads=50, ports=[80, 443])
Scanner.Start_Scanner("192.168.0.1", "192.168.0.5")
def Background ():
for data in Scanner.Get_Outputs_Realtime():
print(str(data))
bg = Thread(target=Background)
#bg.daemon = True
bg.start() | 26.769231 | 85 | 0.704023 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.149425 |
0db528e74438080829d841b6eb4aaef15ebfc109 | 325 | py | Python | tests/views/test_ping.py | Eldies/image_storage | 6bdf55b426813da2e45407418a09cc585b245a22 | [
"MIT"
] | null | null | null | tests/views/test_ping.py | Eldies/image_storage | 6bdf55b426813da2e45407418a09cc585b245a22 | [
"MIT"
] | null | null | null | tests/views/test_ping.py | Eldies/image_storage | 6bdf55b426813da2e45407418a09cc585b245a22 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
from app import app
class TestPingView(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
self.client = app.test_client()
def test_ping(self):
response = self.client.get('/ping')
assert response.data.decode('utf-8') == 'pong'
| 21.666667 | 54 | 0.621538 | 261 | 0.803077 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.16 |
0db65eebf31a3d9c1ea003346b7e0a6a218692e0 | 527 | py | Python | DailyAssingments/Week1/Day2Assignments2.py | smooth-dasilva/Smoothstack-Workload | 165ac6df72c7765a594fa472131def1ab2e44c78 | [
"MIT"
] | null | null | null | DailyAssingments/Week1/Day2Assignments2.py | smooth-dasilva/Smoothstack-Workload | 165ac6df72c7765a594fa472131def1ab2e44c78 | [
"MIT"
] | null | null | null | DailyAssingments/Week1/Day2Assignments2.py | smooth-dasilva/Smoothstack-Workload | 165ac6df72c7765a594fa472131def1ab2e44c78 | [
"MIT"
] | null | null | null |
#doc4
#1.
print([1, 'Hello', 1.0])
#2
print([1, 1, [1,2]][2][1])
#3. out: 'b', 'c'
print(['a','b', 'c'][1:])
#4.
weekDict= {'Sunday':0,'Monday':1,'Tuesday':2,'Wednesday':3,'Thursday':4,'Friday':5,'Saturday':6, }
#5. out: 2 if you replace D[k1][1] with D['k1][1]
D={'k1':[1,2,3]}
print(D['k1'][1])
#6.
tup = ( 'a', [1,[2,3]] )
print(tup)
#7.
x= set('Missipi')
print(x)
#8
x.add('X')
print(x)
#9 out: [1, 2, ,3]
print(set([1,1,2,3]))
#10
for i in range(2000,3001):
if (i%7==0) and (i%5!=0):
print(i) | 13.175 | 98 | 0.489564 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 213 | 0.404175 |
0db66f59bd69f7523d8edb86aebc42ecb9136011 | 4,609 | py | Python | 10/14/solve.py | juancroldan/tuenti-challenge | 4b0b233f457366dd78e80c011ade138cd162e297 | [
"Unlicense"
] | null | null | null | 10/14/solve.py | juancroldan/tuenti-challenge | 4b0b233f457366dd78e80c011ade138cd162e297 | [
"Unlicense"
] | null | null | null | 10/14/solve.py | juancroldan/tuenti-challenge | 4b0b233f457366dd78e80c011ade138cd162e297 | [
"Unlicense"
] | null | null | null | from threading import Thread
from time import sleep
from socket import socket, AF_INET, SOCK_STREAM
from re import compile
HOST = ('52.49.91.111', 2092)
MAX_BUFFER = 2**15
MSGS = compile(r'^ROUND (\d+): (\d+) -> (\w+) \{(.+?)\}( no_proposal)?( \(ROUND FINISHED\))?$').search
LEARN_ARGS = compile(r'servers: \[(.+?)\], secret_owner: (\d+)').search
PROMISE_ARGS = compile(r'(\d+),(\d+)').search
ACCEPT_ARGS = compile(r'id: \{(\d+),(\d+)\}, value: \{servers: \[(.+?)\], secret_owner: (\d+)\}').search
NUM_NODES = 7
class Node:
connected = set()
injected = set()
def __init__(self, master):
self.master = master
self.socket = socket(AF_INET, SOCK_STREAM)
self.socket.connect(HOST)
self.server = None
self.n = 1000
self.state = 'idle'
self.promises_received = 0
self.accepted_received = 0
self.quorum = None
def say(self, msg='', parse=True):
if self.master: print('=' * 83)
if len(msg):
if self.master: print('--- SENT %s\n%s' % ('-' * 74, msg.strip()))
self.socket.send(msg.encode())
ans = self.socket.recv(MAX_BUFFER).decode().strip()
if len(ans) and self.master: print('--- RECEIVED %s\n%s' % ('-' * 70, ans))
if ans.startswith('SERVER ID'):
self.server = int(ans.rsplit(' ID: ', 1)[-1].split('\n', 1)[0])
Node.connected.add(self.server)
msgs = [MSGS(msg) for msg in ans.split('\n')]
msgs = [msg.groups() for msg in msgs if msg is not None]
res = []
for rnd, src, cmd, args, no_prop, end in msgs:
if cmd in ['LEARN', 'ACCEPTED']:
args = LEARN_ARGS(args).groups()
args = {
'servers': list(map(int, args[0].split(','))),
'secret_owner': int(args[1])
}
elif cmd == 'PROMISE':
args = PROMISE_ARGS(args).groups()
args = {
'n': int(args[0]),
'server': int(args[1])
}
elif cmd == 'PREPARE':
self.socket.send(('PROMISE %s no_proposal -> %s\n' % (args, src)).encode())
elif cmd == 'ACCEPT':
n, server, servers, secret_owner = ACCEPT_ARGS(args).groups()
self.socket.send(('ACCEPTED {servers: [%s], secret_owner: %s} no_proposal -> %s\n' % (servers, secret_owner, src)).encode())
else:
print('I don\'t know anything about %s' % cmd)
res.append({
'round': int(rnd),
'src': int(src),
'cmd': cmd,
'args': args,
'no_prop': no_prop is not None,
'end': end is not None
})
return res
def run(self):
self.say() # get server id
while len(Node.connected) < NUM_NODES: sleep(.1) # wait for every node to connect
if self.master: print(Node.connected)
next_msg = ''
while True:
sleep(.01)
msgs = self.say(next_msg)
next_msg = ''
for msg in msgs:
if msg['cmd'] == 'LEARN':
self.quorum = [s for s in msg['args']['servers']]
self.secret_owner = msg['args']['secret_owner']
if self.server in self.quorum and self.master:
self.state = 'prepare'
if self.state == 'prepare':
for dst in self.quorum:
next_msg += 'PREPARE {%d,%d} -> %d\n' % (self.n, self.server, dst)
self.promises_received = 0
self.state = 'accept'
elif self.state == 'accept':
self.promises_received += len([
m for m in msgs
if m['cmd'] == 'PROMISE' and m['args']['n'] == self.n and m['args']['server'] == self.server
])
if self.promises_received == len(self.quorum):
kick_members = [m for m in self.quorum if m not in Node.connected and m != self.secret_owner]
add_members = [m for m in Node.connected if m not in self.quorum]
if len([m for m in self.quorum if m in Node.connected]) / len(self.quorum) > .5:
print("=== INJECTING ALTERATION: CHANGE KEY OWNER ===")
new_quorum = self.quorum
new_owner = self.server
elif len(add_members):
print("=== INJECTING ALTERATION: ADD MEMBER ===")
new_quorum = self.quorum + [add_members[0]]
new_owner = self.secret_owner
elif len(kick_members) and len(self.quorum) > 3:
print("=== INJECTING ALTERATION: KICK MEMBER ===")
kick = kick_members[0]
new_quorum = [m for m in self.quorum if m != kick]
new_owner = self.secret_owner
for dst in self.quorum:
next_msg += 'ACCEPT {id: {%d,%d}, value: {servers: [%s], secret_owner: %d}} -> %d\n' % (
self.n,
self.server,
','.join(map(str, new_quorum)),
new_owner,
dst
)
self.n += 1
self.quorum = new_quorum
self.secret_owner = new_owner
self.accepted_received = 0
self.state = 'idle'
if __name__ == '__main__':
nodes = [Node(n == 0) for n in range(NUM_NODES)]
threads = [Thread(target=node.run) for node in nodes]
[t.start() for t in threads]
[t.join() for t in threads] | 35.183206 | 128 | 0.605988 | 3,903 | 0.846821 | 0 | 0 | 0 | 0 | 0 | 0 | 968 | 0.210024 |
0db68207e367ebc14875e48b897fec665fce2710 | 7,600 | py | Python | tests/test_packages/test_skills/test_registration_aw1/test_behaviours.py | bryanchriswhite/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | 126 | 2019-09-07T09:32:44.000Z | 2022-03-29T14:28:41.000Z | tests/test_packages/test_skills/test_registration_aw1/test_behaviours.py | salman6049/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | 1,814 | 2019-08-24T10:08:07.000Z | 2022-03-31T14:28:36.000Z | tests/test_packages/test_skills/test_registration_aw1/test_behaviours.py | salman6049/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | 46 | 2019-09-03T22:13:58.000Z | 2022-03-22T01:25:16.000Z | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests of the behaviour classes of the registration_aw1 skill."""
import logging
from pathlib import Path
from unittest.mock import patch
from aea.helpers.transaction.base import RawMessage, Terms
from packages.fetchai.protocols.register.message import RegisterMessage
from packages.fetchai.protocols.signing.message import SigningMessage
from tests.conftest import ROOT_DIR
from tests.test_packages.test_skills.test_registration_aw1.intermediate_class import (
RegiatrationAW1TestCase,
)
class TestAW1Registration(RegiatrationAW1TestCase):
"""Test registration behaviour of registration_aw1."""
path_to_skill = Path(ROOT_DIR, "packages", "fetchai", "skills", "registration_aw1")
@classmethod
def setup(cls):
"""Setup the test class."""
super().setup()
def test_setup_i(self):
"""Test the setup method of the registration behaviour NOT developer_handle_mode and announce_termination_key is None."""
# setup
self.strategy.announce_termination_key = None
self.strategy.developer_handle_mode = False
# operation
with patch.object(self.logger, "log") as mock_logger:
self.register_behaviour.setup()
# after
self.assert_quantity_in_decision_making_queue(1)
message = self.get_message_from_decision_maker_inbox()
has_attributes, error_str = self.message_has_attributes(
actual_message=message,
message_type=SigningMessage,
performative=SigningMessage.Performative.SIGN_MESSAGE,
to=self.skill.skill_context.decision_maker_address,
sender=str(self.skill.skill_context.skill_id),
raw_message=RawMessage(
self.strategy.ledger_id, self.strategy.ethereum_address.encode("utf-8")
),
terms=Terms(
ledger_id=self.strategy.ledger_id,
sender_address="",
counterparty_address="",
amount_by_currency_id={},
quantities_by_good_id={},
nonce="",
),
)
assert has_attributes, error_str
mock_logger.assert_any_call(
logging.INFO, "sending signing_msg to decision maker..."
)
def test_setup_ii(self):
"""Test the setup method of the registration behaviour IN developer_handle_mode and announce_termination_key is NOT None."""
# setup
key = "some_key"
self.strategy.announce_termination_key = key
self.strategy.developer_handle_only = True
# operation
self.register_behaviour.setup()
# after
self.assert_quantity_in_decision_making_queue(0)
assert self.skill.skill_context.shared_state[key] is False
def test_act_i(self):
"""Test the act method of the registration behaviour where is_ready_to_register is False."""
# setup
self.strategy.is_ready_to_register = False
# operation
self.register_behaviour.act()
# after
self.assert_quantity_in_outbox(0)
def test_act_ii(self):
"""Test the act method of the registration behaviour where aw1_registration_aeas is None."""
# setup
self.strategy.is_ready_to_register = True
# operation
self.register_behaviour.act()
# after
self.assert_quantity_in_outbox(0)
def test_act_iii(self):
"""Test the act method of the registration behaviour where is_registered is True."""
# setup
self.strategy.is_ready_to_register = True
self.skill.skill_context.shared_state[
self.shared_storage_key
] = self.aw1_registration_aeas
self.strategy.is_registered = True
# operation
self.register_behaviour.act()
# after
self.assert_quantity_in_outbox(0)
def test_act_iv(self):
"""Test the act method of the registration behaviour where is_registration_pending is True."""
# setup
self.strategy.is_ready_to_register = True
self.skill.skill_context.shared_state[
self.shared_storage_key
] = self.aw1_registration_aeas
self.strategy.is_registered = False
self.strategy.is_registration_pending = True
# operation
self.register_behaviour.act()
# after
self.assert_quantity_in_outbox(0)
def test_act_v(self):
"""Test the act method of the registration behaviour where _register_for_aw1 is called."""
# setup
self.strategy.is_ready_to_register = True
self.skill.skill_context.shared_state[
self.shared_storage_key
] = self.aw1_registration_aeas
self.strategy.is_registered = False
self.strategy.is_registration_pending = False
# operation
with patch.object(self.logger, "log") as mock_logger:
self.register_behaviour.act()
# after
self.assert_quantity_in_outbox(len(self.aw1_registration_aeas))
assert self.strategy.is_registration_pending is True
# _register_for_aw1
info = self.strategy.registration_info
message = self.get_message_from_outbox()
has_attributes, error_str = self.message_has_attributes(
actual_message=message,
message_type=RegisterMessage,
performative=RegisterMessage.Performative.REGISTER,
to=self.aw1_registration_aea,
sender=self.skill.skill_context.agent_address,
info=info,
)
assert has_attributes, error_str
mock_logger.assert_any_call(
logging.INFO, f"sending registration info: {info}",
)
def test_act_vi(self):
"""Test the act method of the registration behaviour where aw1 agent is NOT in the whitelist."""
# setup
self.strategy.is_ready_to_register = True
self.skill.skill_context.shared_state[
self.shared_storage_key
] = self.aw1_registration_aeas
self.strategy.is_registered = False
self.strategy.is_registration_pending = False
self.strategy._whitelist = []
# operation
with patch.object(self.logger, "log") as mock_logger:
self.register_behaviour.act()
# after
self.assert_quantity_in_outbox(0)
assert self.strategy.is_registration_pending is True
mock_logger.assert_any_call(
logging.INFO,
f"agent={self.aw1_registration_aea} not in whitelist={self.strategy._whitelist}",
)
def test_teardown(self):
"""Test the teardown method of the registration behaviour."""
assert self.register_behaviour.teardown() is None
self.assert_quantity_in_outbox(0)
| 35.185185 | 132 | 0.656579 | 6,278 | 0.826053 | 0 | 0 | 92 | 0.012105 | 0 | 0 | 2,264 | 0.297895 |
0db95169c1f29d45bb1b316d97dd3b1f2a0218c5 | 5,971 | py | Python | anopool/pool.py | willtrnr/anopool | 0e3f882ae216e1897bdff1cbb311f3c2960908db | [
"MIT"
] | null | null | null | anopool/pool.py | willtrnr/anopool | 0e3f882ae216e1897bdff1cbb311f3c2960908db | [
"MIT"
] | null | null | null | anopool/pool.py | willtrnr/anopool | 0e3f882ae216e1897bdff1cbb311f3c2960908db | [
"MIT"
] | null | null | null | """Generic object pool"""
from __future__ import annotations
__all__ = [
"Manager",
"Pool",
]
import contextlib
import dataclasses
import logging
import queue
import threading
from abc import ABCMeta
from typing import Generator, Generic, Optional, TypeVar
from ._common import DEFAULT_SIZE
from .exceptions import PoolClosedError
logger = logging.getLogger(__name__)
_T = TypeVar("_T")
class Manager(Generic[_T], metaclass=ABCMeta):
"""An pool object manager.
Manages the lifecycle of pool objects.
"""
def create(self) -> _T:
"""Create a new pool object."""
def recycle(self, __obj: _T) -> None:
"""Check liveness and reset released objects.
If the object is no longer valid, this method should raise an exception to
signal it and prevent its return to the pool. A slot will be open to allow its
replacement.
Args:
obj: The returned pool object.
Raises:
Exception: When the object is no longer valid.
"""
def discard(self, __obj: _T) -> None:
"""Perform cleanup of discarded objects.
This method is called for discarding both invalid objects that failed the
recycling and live objects on pool closure. Liveness should not be assumed and
this method should ideally not raise any exception unless there's a failure
that will lead to a resource leak.
Args:
obj: The object to be discarded.
"""
# pylint: disable=missing-class-docstring
@dataclasses.dataclass
class PoolState(Generic[_T]):
is_open: threading.Event
count: threading.Semaphore
lock: threading.Condition
idle: queue.SimpleQueue[_T]
class Pool(Generic[_T]):
"""An object pool.
Args:
manager: The object manager to use.
maxsize: Optional; The maximum number of concurrent objects available.
"""
_manager: Manager[_T]
_max_size: int
_state: PoolState[_T]
def __init__(
self,
manager: Manager[_T],
max_size: Optional[int] = None,
) -> None:
if max_size is None:
max_size = DEFAULT_SIZE
elif max_size <= 0:
raise ValueError("max_size must be at least 1")
self._manager = manager
self._max_size = max_size
self._init_state()
def __enter__(self: _T_Pool) -> _T_Pool:
self.open()
return self
def __exit__(self, exc_type, exc_value, exc_tb) -> None:
del exc_type, exc_value, exc_tb
self.close()
def _init_state(self) -> None:
self._state = PoolState(
is_open=threading.Event(),
count=threading.BoundedSemaphore(self._max_size),
lock=threading.Condition(lock=threading.Lock()),
idle=queue.SimpleQueue(),
)
def is_open(self) -> bool:
"""Check if the pool is open.
Returns:
bool: Whether the pool is open.
"""
return self._state.is_open.is_set()
def open(self) -> None:
"""Initialize the pool."""
self._state.is_open.set()
def close(self) -> None:
"""Close the pool and discard its objects."""
state = self._state
if not state.is_open.is_set():
return
self._init_state()
state.is_open.clear()
while True:
try:
self._manager.discard(state.idle.get_nowait())
except queue.Empty:
break
except Exception: # pylint: disable=broad-except
logger.warning("Discard error, possible resource leak", exc_info=True)
with state.lock:
state.lock.notify_all()
@contextlib.contextmanager
def acquire(self) -> Generator[_T, None, None]:
"""Acquire an object from the pool.
Yields:
An object from the pool.
"""
state = self._state
while True:
if not state.is_open.is_set():
raise PoolClosedError()
# Try to get an object from the pool first
try:
obj = state.idle.get_nowait()
logger.debug("Checked out object from pool: %s", obj)
break
except queue.Empty:
pass
# If we can allocate more, create a new one
# pylint: disable=consider-using-with
if state.count.acquire(blocking=False):
try:
obj = self._manager.create()
logger.debug("Created new object: %s", obj)
break
except:
state.count.release()
raise
# Wait until an object is available or we can allocate more
with state.lock:
logger.debug("Waiting for free object or slot")
state.lock.wait()
try:
yield obj
finally:
try:
if not state.is_open.is_set():
raise PoolClosedError()
self._manager.recycle(obj)
logger.debug("Object succeeded recycle: %s", obj)
if not state.is_open.is_set():
raise PoolClosedError()
state.idle.put(obj)
logger.debug("Object returned to pool: %s", obj)
except Exception: # pylint: disable=broad-except
logger.debug("Recycle failed discarding: %s", obj, exc_info=True)
try:
self._manager.discard(obj)
except Exception: # pylint: disable=broad-except
logger.warning(
"Discard error, possible resource leak", exc_info=True
)
state.count.release()
finally:
with state.lock:
state.lock.notify()
_T_Pool = TypeVar("_T_Pool", bound=Pool)
| 28.032864 | 86 | 0.567577 | 5,452 | 0.91308 | 2,176 | 0.364428 | 2,381 | 0.398761 | 0 | 0 | 1,989 | 0.33311 |
0dbad2c362c6766c5ba6dfdc508d36777761cdeb | 1,090 | py | Python | deepab/util/get_bins.py | antonkulaga/DeepAb | 51a32d06d19815705bdbfb35a8a9518c17ec313a | [
"RSA-MD"
] | 67 | 2021-07-02T08:31:10.000Z | 2022-03-30T01:25:11.000Z | deepab/util/get_bins.py | antonkulaga/DeepAb | 51a32d06d19815705bdbfb35a8a9518c17ec313a | [
"RSA-MD"
] | 9 | 2021-08-18T10:32:27.000Z | 2022-03-30T06:40:05.000Z | deepab/util/get_bins.py | antonkulaga/DeepAb | 51a32d06d19815705bdbfb35a8a9518c17ec313a | [
"RSA-MD"
] | 16 | 2021-07-17T08:33:30.000Z | 2022-03-29T07:36:34.000Z | import math
def get_dist_bins(num_bins, interval=0.5):
bins = [(interval * i, interval * (i + 1)) for i in range(num_bins - 1)]
bins.append((bins[-1][1], float('Inf')))
return bins
def get_dihedral_bins(num_bins, rad=False):
first_bin = -180
bin_width = 2 * 180 / num_bins
bins = [(first_bin + bin_width * i, first_bin + bin_width * (i + 1))
for i in range(num_bins)]
if rad:
bins = deg_bins_to_rad(bins)
return bins
def get_planar_bins(num_bins, rad=False):
first_bin = 0
bin_width = 180 / num_bins
bins = [(first_bin + bin_width * i, first_bin + bin_width * (i + 1))
for i in range(num_bins)]
if rad:
bins = deg_bins_to_rad(bins)
return bins
def deg_bins_to_rad(bins):
return [(v[0] * math.pi / 180, v[1] * math.pi / 180) for v in bins]
def get_bin_values(bins):
bin_values = [t[0] for t in bins]
bin_width = (bin_values[2] - bin_values[1]) / 2
bin_values = [v + bin_width for v in bin_values]
bin_values[0] = bin_values[1] - 2 * bin_width
return bin_values
| 24.772727 | 76 | 0.617431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0.004587 |
0dbc3d65f08a855cd3326a6dfe34ef3f6203d093 | 8,155 | py | Python | tests/test_manifest.py | markfinger/python-webpack | 41ed0a3afac0dc96cb22093bd2da1dcbd31cfc42 | [
"MIT"
] | 66 | 2015-05-06T17:20:20.000Z | 2021-07-08T02:56:21.000Z | tests/test_manifest.py | markfinger/python-webpack | 41ed0a3afac0dc96cb22093bd2da1dcbd31cfc42 | [
"MIT"
] | 26 | 2015-05-03T11:38:24.000Z | 2016-01-01T22:32:45.000Z | tests/test_manifest.py | markfinger/python-webpack | 41ed0a3afac0dc96cb22093bd2da1dcbd31cfc42 | [
"MIT"
] | 15 | 2015-05-11T19:02:15.000Z | 2020-11-29T20:23:17.000Z | import unittest
import os
import json
import mock
import hashlib
from webpack.conf import Conf
from webpack.manifest import generate_manifest, generate_key, write_manifest, read_manifest, populate_manifest_file
from webpack.compiler import webpack
from .settings import ConfigFiles, OUTPUT_ROOT, WEBPACK
from .utils import clean_output_root
class TestManifest(unittest.TestCase):
@classmethod
def setUpClass(cls):
clean_output_root()
@classmethod
def tearDownClass(cls):
clean_output_root()
def test_a_manifest_key_is_relative(self):
key = generate_key(ConfigFiles.BASIC_CONFIG)
self.assertEqual(key, os.path.join('basic', 'webpack.config.js'))
def test_a_manifest_key_contains_the_context(self):
context = {'foo': 'bar'}
key = generate_key(ConfigFiles.BASIC_CONFIG, context)
hashed_context = hashlib.md5(json.dumps(context).encode('utf-8')).hexdigest()
expected = os.path.join('basic', 'webpack.config.js') + '__' + hashed_context
self.assertEqual(key, expected)
def test_a_manifest_can_be_generated(self):
manifest = generate_manifest(
(ConfigFiles.BASIC_CONFIG,)
)
self.assertIsInstance(manifest, dict)
self.assertEqual(len(manifest.keys()), 1)
key = generate_key(ConfigFiles.BASIC_CONFIG)
self.assertIn(key, manifest)
entry = manifest[key]
bundle = webpack(ConfigFiles.BASIC_CONFIG)
self.assertEqual(entry, bundle.data)
def test_a_manifest_can_be_generated_from_a_dictionary(self):
manifest = generate_manifest({
ConfigFiles.BASIC_CONFIG: ()
})
self.assertIsInstance(manifest, dict)
self.assertEqual(len(manifest.keys()), 1)
key = generate_key(ConfigFiles.BASIC_CONFIG)
self.assertIn(key, manifest)
entry = manifest[key]
bundle = webpack(ConfigFiles.BASIC_CONFIG)
self.assertEqual(entry, bundle.data)
def test_a_manifest_can_be_generated_from_multiple_config_files(self):
manifest = generate_manifest(
(
ConfigFiles.BASIC_CONFIG,
ConfigFiles.LIBRARY_CONFIG,
),
)
self.assertIsInstance(manifest, dict)
self.assertEqual(len(manifest.keys()), 2)
key1 = generate_key(ConfigFiles.BASIC_CONFIG)
self.assertIn(key1, manifest)
entry1 = manifest[key1]
bundle1 = webpack(ConfigFiles.BASIC_CONFIG)
self.assertEqual(entry1, bundle1.data)
key2 = generate_key(ConfigFiles.LIBRARY_CONFIG)
self.assertIn(key2, manifest)
entry2 = manifest[key2]
bundle2 = webpack(ConfigFiles.LIBRARY_CONFIG)
self.assertEqual(entry2, bundle2.data)
def test_a_manifest_can_be_generated_from_multiple_config_files_in_a_dictionary(self):
manifest = generate_manifest({
ConfigFiles.BASIC_CONFIG: (),
ConfigFiles.LIBRARY_CONFIG: (),
})
self.assertIsInstance(manifest, dict)
self.assertEqual(len(manifest.keys()), 2)
key1 = generate_key(ConfigFiles.BASIC_CONFIG)
self.assertIn(key1, manifest)
entry1 = manifest[key1]
bundle1 = webpack(ConfigFiles.BASIC_CONFIG)
self.assertEqual(entry1, bundle1.data)
key2 = generate_key(ConfigFiles.LIBRARY_CONFIG)
self.assertIn(key2, manifest)
entry2 = manifest[key2]
bundle2 = webpack(ConfigFiles.LIBRARY_CONFIG)
self.assertEqual(entry2, bundle2.data)
def test_a_manifest_can_be_generated_with_multiple_contexts(self):
manifest = generate_manifest({
ConfigFiles.BASIC_CONFIG: (
{'foo': 'bar'},
),
ConfigFiles.LIBRARY_CONFIG: (
{'foo': 'bar'},
{'woz': 'woo'},
),
})
self.assertIsInstance(manifest, dict)
self.assertEqual(len(manifest.keys()), 3)
key1 = generate_key(ConfigFiles.BASIC_CONFIG, {'foo': 'bar'})
self.assertIn(key1, manifest)
entry1 = manifest[key1]
bundle1 = webpack(ConfigFiles.BASIC_CONFIG, context={'foo': 'bar'})
self.assertEqual(entry1, bundle1.data)
key2 = generate_key(ConfigFiles.LIBRARY_CONFIG, {'foo': 'bar'})
self.assertIn(key2, manifest)
entry2 = manifest[key2]
bundle2 = webpack(ConfigFiles.LIBRARY_CONFIG, context={'foo': 'bar'})
self.assertEqual(entry2, bundle2.data)
key3 = generate_key(ConfigFiles.LIBRARY_CONFIG, {'woz': 'woo'})
self.assertIn(key3, manifest)
entry3 = manifest[key3]
bundle3 = webpack(ConfigFiles.LIBRARY_CONFIG, context={'woz': 'woo'})
self.assertEqual(entry3, bundle3.data)
def test_a_manifest_can_be_written_to_and_read_from_disk(self):
manifest = generate_manifest({
ConfigFiles.BASIC_CONFIG: (
{'foo': 'bar'},
),
ConfigFiles.LIBRARY_CONFIG: (
{'foo': 'bar'},
{'woz': 'woo'},
),
})
path = os.path.join(OUTPUT_ROOT, 'foo.json')
write_manifest(path, manifest)
# Manual check
with open(path, 'r') as manifest_file:
content = manifest_file.read()
self.assertEqual(json.loads(content), manifest)
# Convenience check
self.assertEqual(read_manifest(path), manifest)
@staticmethod
def _raise_if_called(*args, **kwargs):
raise Exception('method called with args: {} and kwargs: {}'.format(args, kwargs))
def test_the_manifest_is_used_by_the_compiler(self):
manifest = generate_manifest({
ConfigFiles.BASIC_CONFIG: (),
})
key = generate_key(ConfigFiles.BASIC_CONFIG)
self.assertIn(key, manifest)
path = os.path.join(OUTPUT_ROOT, 'test_manifest.json')
write_manifest(path, manifest)
with mock.patch('webpack.compiler.build_server.build', self._raise_if_called):
mock_settings = Conf()
mock_settings.configure(
**dict(
WEBPACK,
USE_MANIFEST=True,
MANIFEST_PATH=path,
)
)
with mock.patch('webpack.conf.settings', mock_settings):
bundle = webpack(ConfigFiles.BASIC_CONFIG)
self.assertEqual(bundle.data, manifest[key])
def test_the_manifest_can_be_populated_from_settings(self):
path = os.path.join(OUTPUT_ROOT, 'test_populate_manifest_file.json')
mock_settings = Conf()
mock_settings.configure(
**dict(
WEBPACK,
USE_MANIFEST=True,
MANIFEST_PATH=path,
MANIFEST=(
ConfigFiles.BASIC_CONFIG,
)
)
)
with mock.patch('webpack.conf.settings', mock_settings):
populate_manifest_file()
with open(path, 'r') as manifest_file:
content = manifest_file.read()
manifest = json.loads(content)
expected = generate_manifest(
(ConfigFiles.BASIC_CONFIG,)
)
self.assertEqual(manifest, expected)
def test_the_manifest_can_be_populated_from_a_dictionary(self):
path = os.path.join(OUTPUT_ROOT, 'test_populate_dict_manifest_file.json')
mock_settings = Conf()
mock_settings.configure(
**dict(
WEBPACK,
USE_MANIFEST=True,
MANIFEST_PATH=path,
MANIFEST={
ConfigFiles.BASIC_CONFIG: (),
}
)
)
with mock.patch('webpack.conf.settings', mock_settings):
populate_manifest_file()
with open(path, 'r') as manifest_file:
content = manifest_file.read()
manifest = json.loads(content)
expected = generate_manifest({
ConfigFiles.BASIC_CONFIG: (),
})
self.assertEqual(manifest, expected)
| 33.016194 | 115 | 0.614102 | 7,811 | 0.957817 | 0 | 0 | 280 | 0.034335 | 0 | 0 | 488 | 0.059841 |
0dbe7e52a6fcd5026bcaa13575dd6d512f98dd47 | 1,368 | py | Python | commonware/response/middleware.py | Osmose/commonware | e2e02ad47b553362929bfb741105f10c56a1bdc8 | [
"BSD-3-Clause"
] | null | null | null | commonware/response/middleware.py | Osmose/commonware | e2e02ad47b553362929bfb741105f10c56a1bdc8 | [
"BSD-3-Clause"
] | null | null | null | commonware/response/middleware.py | Osmose/commonware | e2e02ad47b553362929bfb741105f10c56a1bdc8 | [
"BSD-3-Clause"
] | null | null | null | import inspect
import time
from django.conf import settings
class _statsd(object):
def incr(s, *a, **kw):
pass
def timing(s, *a, **kw):
pass
try:
from statsd import statsd
except ImportError:
statsd = _statsd()
class FrameOptionsHeader(object):
"""
Set an X-Frame-Options header. Default to DENY. Set
response['x-frame-options'] = 'SAMEORIGIN'
to override.
"""
def process_response(self, request, response):
if hasattr(response, 'no_frame_options'):
return response
if not 'x-frame-options' in response:
response['x-frame-options'] = 'DENY'
return response
class StrictTransportMiddleware(object):
"""
Set the Strict-Transport-Security header on responses. Use the
STS_MAX_AGE setting to control the max-age value. (Default: 1 month.)
Use the STS_SUBDOMAINS boolean to add includeSubdomains.
(Default: False.)
"""
def process_response(self, request, response):
if request.is_secure():
age = getattr(settings, 'STS_MAX_AGE', 2592000) # 30 days.
subdomains = getattr(settings, 'STS_SUBDOMAINS', False)
val = 'max-age=%d' % age
if subdomains:
val += '; includeSubDomains'
response['Strict-Transport-Security'] = val
return response
| 24.872727 | 73 | 0.625731 | 1,218 | 0.890351 | 0 | 0 | 0 | 0 | 0 | 0 | 523 | 0.38231 |
0dbee3b7f12c94c66b785df1beab2df77b47d739 | 22,992 | py | Python | tests/test_btc_rawtx_zcash.py | VDamas/app-cryptoescudo | 9e53ccdd836f7b5c787927c74eba3b0ac5d079b6 | [
"Apache-2.0"
] | null | null | null | tests/test_btc_rawtx_zcash.py | VDamas/app-cryptoescudo | 9e53ccdd836f7b5c787927c74eba3b0ac5d079b6 | [
"Apache-2.0"
] | null | null | null | tests/test_btc_rawtx_zcash.py | VDamas/app-cryptoescudo | 9e53ccdd836f7b5c787927c74eba3b0ac5d079b6 | [
"Apache-2.0"
] | 1 | 2022-02-08T22:42:41.000Z | 2022-02-08T22:42:41.000Z | import pytest
from dataclasses import dataclass, field
from functools import reduce
from typing import List, Optional
from helpers.basetest import BaseTestBtc, LedgerjsApdu, TxData, CONSENSUS_BRANCH_ID
from helpers.deviceappbtc import DeviceAppBtc, CommException
# Test data below is from a Zcash test log from Live team"
test_zcash_prefix_cmds = [
LedgerjsApdu( # Get version
commands=["b001000000"],
# expected_resp="01055a63617368--------------0102" # i.e. "Zcash" + "1.3.23" (not checked)
),
LedgerjsApdu(
commands=[
"e040000015058000002c80000085800000000000000000000000", # GET PUBLIC KEY - on 44'/133'/0'/0/0 path
"e016000000", # Coin info
],
expected_resp="1cb81cbd01055a63617368035a4543" # "Zcash" + "ZEC"
),
LedgerjsApdu(
commands=[
"e040000009028000002c80000085", # Get Public Key - on path 44'/133'
"e016000000", # Coin info
],
expected_resp="1cb81cbd01055a63617368035a4543"
),
LedgerjsApdu(
commands=[
"e040000009028000002c80000085", # path 44'/133'
"e04000000d038000002c8000008580000000", # path 44'/133'/0'
"e04000000d038000002c8000008580000001", # path 44'/133'/1'
"b001000000"
],
# expected_resp="01055a63617368--------------0102"
),
LedgerjsApdu(
commands=[
"e040000015058000002c80000085800000000000000000000004", # Get Public Key - on path 44'/133'/0'/0/4
"e016000000", # Coin info
],
expected_resp="1cb81cbd01055a63617368035a4543"
),
LedgerjsApdu(
commands=["b001000000"],
# expected_resp="01055a63617368--------------0102"
),
LedgerjsApdu(
commands=[
"e040000015058000002c80000085800000000000000000000004", # Get Public Key - on path 44'/133'/0'/0/4
"e016000000"
],
expected_resp="1cb81cbd01055a63617368035a4543"
),
LedgerjsApdu(
commands=["b001000000"],
# expected_resp="01055a63617368--------------0102"
)
]
test_zcash_tx_sign_gti = [
LedgerjsApdu( # GET TRUSTED INPUT
commands=[
"e042000009000000010400008001",
"e042800025edc69b8179fd7c6a11a8a1ba5d17017df5e09296c3a1acdada0d94e199f68857010000006b",
"e042800032483045022100e8043cd498714122a78b6ecbf8ced1f74d1c65093c5e2649336dfa248aea9ccf022023b13e57595635452130",
"e0428000321c91ed0fe7072d295aa232215e74e50d01a73b005dac01210201e1c9d8186c093d116ec619b7dad2b7ff0e7dd16f42d458da",
"e04280000b1100831dc4ff72ffffff00",
"e04280000102",
"e042800022a0860100000000001976a914fa9737ab9964860ca0c3e9ad6c7eb3bc9c8f6fb588ac",
"e0428000224d949100000000001976a914b714c60805804d86eb72a38c65ba8370582d09e888ac",
"e04280000400000000",
],
expected_resp="3200" + "--"*2 + "20b7c68231303b2425a91b12f05bd6935072e9901137ae30222ef6d60849fc51010000004d94910000000000" + "--"*8
),
]
test_zcash_tx_to_sign_abandonned = [
LedgerjsApdu( # GET PUBLIC KEY
commands=["e040000015058000002c80000085800000000000000100000001"], # on 44'/133'/0'/1/1
),
LedgerjsApdu( # UNTRUSTED HASH TRANSACTION INPUT START
commands=[
"e0440005090400008085202f8901",
"e04480053b013832004d0420b7c68231303b2425a91b12f05bd6935072e9901137ae30222ef6d60849fc51010000004d9491000000000045e1e144cb88d4d800",
"e044800504ffffff00",
]
),
LedgerjsApdu( # UNTRUSTED HASH TRANSACTION INPUT FINALIZE FULL
commands=[
"e04aff0015058000002c80000085800000000000000100000003",
# "e04a0000320240420f00000000001976a91490360f7a0b0e50d5dd0c924fc1d6e7adb8519c9388ac39498200000000001976a91425ea06"
"e04a0000230140420f00000000001976a91490360f7a0b0e50d5dd0c924fc1d6e7adb8519c9388ac"
], # tx aborted on 2nd command
expected_sw="6985"
),
]
test_zcash_tx_sign_restart_prefix_cmds = [
LedgerjsApdu(
commands=["b001000000"],
# expected_resp="01055a63617368--------------0102"
),
LedgerjsApdu(
commands=[
"e040000015058000002c80000085800000000000000000000004",
"e016000000",
],
expected_resp="1cb81cbd01055a63617368035a4543"
),
LedgerjsApdu(
commands=["b001000000"],
# expected_resp="01055a63617368--------------0102"
)
]
test_zcash_tx_to_sign_finalized = test_zcash_tx_sign_gti + [
LedgerjsApdu( # GET PUBLIC KEY
commands=["e040000015058000002c80000085800000000000000100000001"], # on 44'/133'/0'/1/1
),
LedgerjsApdu( # UNTRUSTED HASH TRANSACTION INPUT START
commands=[
"e0440005090400008085202f8901",
"e04480053b""013832004d""0420b7c68231303b2425a91b12f05bd6935072e9901137ae30222ef6d60849fc51""01000000""4d94910000000000""45e1e144cb88d4d8""00",
"e044800504ffffff00",
]
),
LedgerjsApdu( # UNTRUSTED HASH TRANSACTION INPUT FINALIZE FULL
commands=[
"e04aff0015058000002c80000085800000000000000100000003",
# "e04a0000320240420f00000000001976a91490360f7a0b0e50d5dd0c924fc1d6e7adb8519c9388ac39498200000000001976a91425ea06"
"e04a0000230140420f00000000001976a91490360f7a0b0e50d5dd0c924fc1d6e7adb8519c9388ac"
"e04a8000045eb3f840"
],
expected_resp="0000"
),
LedgerjsApdu(
commands=[
"e044008509""0400008085202f8901",
"e04480853b""013832004d04""20b7c68231303b2425a91b12f05bd6935072e9901137ae30222ef6d60849fc51""01000000""4d94910000000000""45e1e144cb88d4d8""19",
"e04480851d""76a9140a146582553b2f5537e13cef6659e82ed8f69b8f88ac""ffffff00",
"e048000015""058000002c80000085800000000000000100000001"
],
check_sig_format=True
)
]
ledgerjs_test_data = [
test_zcash_prefix_cmds, test_zcash_tx_sign_gti, test_zcash_tx_to_sign_abandonned,
test_zcash_tx_sign_restart_prefix_cmds, test_zcash_tx_to_sign_finalized
]
utxo_single = bytes.fromhex(
# https://sochain.com/api/v2/tx/ZEC/ec9033381c1cc53ada837ef9981c03ead1c7c41700ff3a954389cfaddc949256
# Version @offset 0
"04000080"
# versionGroupId @offset 4
"85202f89"
# Input count @offset 8
"01"
# Input prevout hash @offset 9
"53685b8809efc50dd7d5cb0906b307a1b8aa5157baa5fc1bd6fe2d0344dd193a"
# Input prevout idx @offset 41
"00000000"
# Input script length @offset 45
"6b"
# Input script (107 bytes) @ offset 46
"483045022100ca0be9f37a4975432a52bb65b25e483f6f93d577955290bb7fb0"
"060a93bfc92002203e0627dff004d3c72a957dc9f8e4e0e696e69d125e4d8e27"
"5d119001924d3b48012103b243171fae5516d1dc15f9178cfcc5fdc67b0a8830"
"55c117b01ba8af29b953f6"
# Input sequence @offset 151
"ffffffff"
# Output count @offset 155
"01"
# Output #1 value @offset 156
"4072070000000000"
# Output #1 script length @offset 164
"19"
# Output #1 script (25 bytes) @offset 165
"76a91449964a736f3713d64283fd0018626ba50091c7e988ac"
# Locktime @offset 190
"00000000"
# Extra payload (size of everything remaining, specific to btc app inner protocol @offset 194
"0F"
# Expiry @offset 195
"00000000"
# valueBalance @offset 199
"0000000000000000"
# vShieldedSpend @offset 207
"00"
# vShieldedOutput @offset 208
"00"
# vJoinSplit @offset 209
"00"
)
utxos = [
# Considered a segwit tx - segwit flags couldn't be extracted from raw
# Get Trusted Input APDUs as they are not supposed to be sent w/ these APDUs.
bytes.fromhex(
# Version @offset 0
"04000080"
# versionGroupId @offset 4
"85202f89"
# Input count @offset 8
"01"
# Input prevout hash @offset 9
"edc69b8179fd7c6a11a8a1ba5d17017df5e09296c3a1acdada0d94e199f68857"
# Input prevout idx @offset 41
"01000000"
# Input script length @offset 45
"6b"
# Input script (107 bytes) @ offset 46
"483045022100e8043cd498714122a78b6ecbf8ced1f74d1c65093c5e2649336d"
"fa248aea9ccf022023b13e575956354521301c91ed0fe7072d295aa232215e74"
"e50d01a73b005dac01210201e1c9d8186c093d116ec619b7dad2b7ff0e7dd16f"
"42d458da1100831dc4ff72"
# Input sequence @offset 153
"ffffff00"
# Output count @offset 157
"02"
# Output #1 value @offset 160
"a086010000000000"
# Output #1 script length @offset 168
"19"
# Output #1 script (25 bytes) @offset 167
"76a914fa9737ab9964860ca0c3e9ad6c7eb3bc9c8f6fb588ac"
# Output #2 value @offset 192
"4d94910000000000" # 9 540 685 units of ZEC smallest currency available
# Output #2 script length @offset 200
"19"
# Output #2 script (25 bytes) @offset 201
"76a914b714c60805804d86eb72a38c65ba8370582d09e888ac"
# Locktime @offset 226
"00000000"
# Extra payload (size of everything remaining, specific to btc app inner protocol @offset 230
"0F"
# Expiry @offset 231
"00000000"
# valueBalance @offset 235
"0000000000000000"
# vShieldedSpend @offset 243
"00"
# vShieldedOutput @offset 244
"00"
# vJoinSplit @offset 245
"00"
)
]
tx_to_sign = bytes.fromhex(
# version @offset 0
"04000080"
# Some Zcash flags (?) @offset 4
"85202f89"
# Input count @offset 8
"01"
# Input's prevout hash @offset 9
"d35f0793da27a5eacfe984c73b1907af4b50f3aa3794ba1bb555b9233addf33f"
# Prevout idx @offset 41
"01000000"
# input sequence @offset 45
"ffffff00"
# Output count @offset 49
"02"
# Output #1 value @offset 50
"40420f0000000000" # 1 000 000 units of available balance spent
# Output #1 script (26 bytes) @offset 58
"1976a91490360f7a0b0e50d5dd0c924fc1d6e7adb8519c9388ac"
# Output #2 value @offset 84
"2b51820000000000"
# Output #2 scritp (26 bytes) @offset 92
"1976a91490360f7a0b0e50d5dd0c924fc1d6e7adb8519c9388ac"
# Locktime @offset 118
"5eb3f840"
)
change_path = bytes.fromhex("058000002c80000085800000000000000100000003") # 44'/133'/0'/1/3
output_paths = [
bytes.fromhex("058000002c80000085800000000000000100000001"), # 44'/133'/0'/1/1
bytes.fromhex("058000002c80000085800000000000000000000004") # 44'/133'/0'/0/4
]
@pytest.mark.zcash
class TestLedgerjsZcashTx(BaseTestBtc):
def _send_raw_apdus(self, apdus: List[LedgerjsApdu], device: DeviceAppBtc):
# Send the Get Version APDUs
for apdu in apdus:
try:
for command in apdu.commands:
response = device.sendRawApdu(bytes.fromhex(command))
if apdu.expected_resp is not None:
self.check_raw_apdu_resp(apdu.expected_resp, response)
elif apdu.check_sig_format is not None and apdu.check_sig_format == True:
self.check_signature(response) # Only format is checked
except CommException as error:
if apdu.expected_sw is not None and error.sw.hex() == apdu.expected_sw:
continue
raise error
@pytest.mark.skip(reason="Hardcoded TrustedInput can't be replayed on a different device than the one that generated it")
@pytest.mark.manual
@pytest.mark.parametrize('test_data', ledgerjs_test_data)
def test_replay_zcash_test(self, test_data: List[LedgerjsApdu]) -> None:
"""
Replay of raw apdus from @gre.
First time an output is presented for validation, it must be rejected by user
Then tx will be restarted and on 2nd presentation of outputs they have to be
accepted.
"""
apdus = test_data
btc = DeviceAppBtc()
self._send_raw_apdus(apdus, btc)
@pytest.mark.manual
def test_get_single_trusted_input(self) -> None:
btc = DeviceAppBtc()
# 1. Get Trusted Input
print("\n--* Get Trusted Input - from utxos")
input_datum = bytes.fromhex("00000000") + utxo_single
utxo_chunk_len = [
4 + 5 + 4, # len(prevout_index (BE)||version||input_count||versionGroupId)
37, # len(prevout_hash||prevout_index||len(scriptSig))
-1, # len(scriptSig, from last byte of previous chunk) + len(input_sequence)
1, # len(output_count)
34, # len(output_value #1||len(scriptPubkey #1)||scriptPubkey #1)
4 + 1, # len(locktime || extra_data)
4+16+1+1+1 # len(Expiry||valueBalance||vShieldedSpend||vShieldedOutput||vJoinSplit)
]
trusted_input = btc.getTrustedInput(data=input_datum, chunks_len=utxo_chunk_len)
self.check_trusted_input(
trusted_input,
out_index=bytes.fromhex("00000000"),
out_amount=bytes.fromhex("4072070000000000"),
out_hash=bytes.fromhex("569294dcadcf8943953aff0017c4c7d1ea031c98f97e83da3ac51c1c383390ec")
)
print(" OK")
@pytest.mark.manual
def test_replay_zcash_test2(self) -> None:
"""
Adapted version to work around some hw limitations
"""
# Send the Get Version raw apdus
apdus = test_zcash_prefix_cmds
btc = DeviceAppBtc()
self._send_raw_apdus(apdus, btc)
# 1. Get Trusted Input
print("\n--* Get Trusted Input - from utxos")
output_indexes = [
tx_to_sign[41+4-1:41-1:-1], # out_index in tx_to_sign input must be passed BE as prefix to utxo tx
]
input_data = [out_idx + utxo for out_idx, utxo in zip(output_indexes, utxos)]
utxos_chunks_len = [
[ # utxo #1
4+5+4, # len(prevout_index (BE)||version||input_count||versionGroupId)
37, # len(prevout_hash||prevout_index||len(scriptSig))
-1, # len(scriptSig, from last byte of previous chunk) + len(input_sequence)
1, # len(output_count)
34, # len(output_value #1||len(scriptPubkey #1)||scriptPubkey #1)
34, # len(output_value #2||len(scriptPubkey #2)||scriptPubkey #2)
4 + 1, # len(locktime)
4 + 16 + 1 + 1 + 1 # len(Expiry||valueBalance||vShieldedSpend||vShieldedOutput||vJoinSplit)
]
]
trusted_inputs = [
btc.getTrustedInput(
data=input_datum,
chunks_len=chunks_len
)
for (input_datum, chunks_len) in zip(input_data, utxos_chunks_len)
]
print(" OK")
out_amounts = [utxos[0][192:192+8]] # UTXO tx's 2nd output's value
prevout_hashes = [tx_to_sign[9:9+32]]
for trusted_input, out_idx, out_amount, prevout_hash in zip(
trusted_inputs, output_indexes, out_amounts, prevout_hashes
):
self.check_trusted_input(
trusted_input,
out_index=out_idx[::-1], # LE for comparison w/ out_idx in trusted_input
out_amount=out_amount, # utxo output #1 is requested in tx to sign input
out_hash=prevout_hash # prevout hash in tx to sign
)
# 2.0 Get public keys for output paths & compute their hashes
print("\n--* Get Wallet Public Key - for each tx output path")
wpk_responses = [btc.getWalletPublicKey(output_path) for output_path in output_paths]
print(" OK")
pubkeys_data = [self.split_pubkey_data(data) for data in wpk_responses]
for pubkey in pubkeys_data:
print(pubkey)
# 2.1 Construct a pseudo-tx without input script, to be hashed 1st.
print("\n--* Untrusted Transaction Input Hash Start - Hash tx to sign first w/ all inputs having a null script length")
input_sequences = [tx_to_sign[45:45+4]]
ptx_to_hash_part1 = [tx_to_sign[:9]]
for trusted_input, input_sequence in zip(trusted_inputs, input_sequences):
ptx_to_hash_part1.extend([
bytes.fromhex("01"), # TrustedInput marker byte, triggers the TrustedInput's HMAC verification
bytes([len(trusted_input)]),
trusted_input,
bytes.fromhex("00"), # Input script length = 0 (no sigScript)
input_sequence
])
ptx_to_hash_part1 = reduce(lambda x, y: x+y, ptx_to_hash_part1) # Get a single bytes object
ptx_to_hash_part1_chunks_len = [
9 # len(version||flags||input_count) - skip segwit version+flag bytes
]
for trusted_input in trusted_inputs:
ptx_to_hash_part1_chunks_len.extend([
1 + 1 + len(trusted_input) + 1, # len(trusted_input_marker||len(trusted_input)||trusted_input||len(scriptSig) == 0)
4 # len(input_sequence)
])
btc.untrustedTxInputHashStart(
p1="00",
p2="05", # Value used for Zcash
data=ptx_to_hash_part1,
chunks_len=ptx_to_hash_part1_chunks_len
)
print(" OK")
# 2.2 Finalize the input-centric-, pseudo-tx hash with the remainder of that tx
# 2.2.1 Start with change address path
print("\n--* Untrusted Transaction Input Hash Finalize Full - Handle change address")
ptx_to_hash_part2 = change_path
ptx_to_hash_part2_chunks_len = [len(ptx_to_hash_part2)]
btc.untrustedTxInputHashFinalize(
p1="ff", # to derive BIP 32 change address
data=ptx_to_hash_part2,
chunks_len=ptx_to_hash_part2_chunks_len
)
print(" OK")
# 2.2.2 Continue w/ tx to sign outputs & scripts
print("\n--* Untrusted Transaction Input Hash Finalize Full - Continue w/ hash of tx output")
ptx_to_hash_part3 = tx_to_sign[49:118] # output_count||repeated(output_amount||scriptPubkey)
ptx_to_hash_part3_chunks_len = [len(ptx_to_hash_part3)]
response = btc.untrustedTxInputHashFinalize(
p1="00",
data=ptx_to_hash_part3,
chunks_len=ptx_to_hash_part3_chunks_len
)
assert response == bytes.fromhex("0000")
print(" OK")
# We're done w/ the hashing of the pseudo-tx with all inputs w/o scriptSig.
# 2.2.3. Zcash-specific: "When using Overwinter/Sapling, UNTRUSTED HASH SIGN is
# called with an empty authorization and nExpiryHeight following the first
# UNTRUSTED HASH TRANSACTION INPUT FINALIZE FULL"
print("\n--* Untrusted Has Sign - with empty Auth & nExpiryHeight")
branch_id_data = [
bytes.fromhex(
"00" # Number of derivations (None)
"00" # Empty validation code
),
tx_to_sign[-4:], # locktime
bytes.fromhex("01"), # SigHashType - always 01
bytes.fromhex("00000000") # Empty nExpiryHeight
]
response = btc.untrustedHashSign(
data = reduce(lambda x, y: x+y, branch_id_data)
)
# 3. Sign each input individually. Because inputs are segwit, hash each input with its scriptSig
# and sequence individually, each in a pseudo-tx w/o output_count, outputs nor locktime.
print("\n--* Untrusted Transaction Input Hash Start, step 2 - Hash again each input individually (only 1)")
# Inputs are P2WPKH, so use 0x1976a914{20-byte-pubkey-hash}88ac from utxo as scriptSig in this step.
#
# From btc.asc: "The input scripts shall be prepared by the host for the transaction signing process as
# per bitcoin rules : the current input script being signed shall be the previous output script (or the
# redeeming script when consuming a P2SH output, or the scriptCode when consuming a BIP 143 output), and
# other input script shall be null."
input_scripts = [utxos[0][196:196 + utxos[0][196] + 1]]
# input_scripts = [tx_to_sign[45:45 + tx_to_sign[45] + 1]]
# input_scripts = [bytes.fromhex("1976a914") + pubkey.pubkey_hash + bytes.fromhex("88ac")
# for pubkey in pubkeys_data]
ptx_for_inputs = [
[ tx_to_sign[:8], # Tx version||zcash flags
bytes.fromhex("0101"), # Input_count||TrustedInput marker byte
bytes([len(trusted_input)]),
trusted_input,
input_script,
input_sequence
] for trusted_input, input_script, input_sequence in zip(trusted_inputs, input_scripts, input_sequences)
]
ptx_chunks_lengths = [
[
9, # len(version||zcash flags||input_count) - segwit flag+version not sent
1 + 1 + len(trusted_input) + 1, # len(trusted_input_marker||len(trusted_input)||trusted_input||scriptSig_len == 0x19)
-1 # get len(scripSig) from last byte of previous chunk + len(input_sequence)
] for trusted_input in trusted_inputs
]
# Hash & sign each input individually
for ptx_for_input, ptx_chunks_len, output_path in zip(ptx_for_inputs, ptx_chunks_lengths, output_paths):
# 3.1 Send pseudo-tx w/ sigScript
btc.untrustedTxInputHashStart(
p1="00",
p2="80", # to continue previously started tx hash, be it BTc or other BTC-like coin
data=reduce(lambda x,y: x+y, ptx_for_input),
chunks_len=ptx_chunks_len
)
print(" Final hash OK")
# 3.2 Sign tx at last. Param is:
# Num_derivs||Dest output path||RFU (0x00)||tx locktime||sigHashType(always 0x01)||Branch_id for overwinter (4B)
print("\n--* Untrusted Transaction Hash Sign")
tx_to_sign_data = output_path \
+ bytes.fromhex("00") \
+ tx_to_sign[-4:] \
+ bytes.fromhex("01") \
+ bytes.fromhex("00000000")
response = btc.untrustedHashSign(
data = tx_to_sign_data
)
self.check_signature(response) # Check sig format only
# self.check_signature(response, expected_der_sig) # Can't test sig value as it depends on signing device seed
print(" Signature OK\n")
| 42.10989 | 155 | 0.62674 | 12,370 | 0.538013 | 0 | 0 | 12,389 | 0.53884 | 0 | 0 | 12,169 | 0.529271 |
0dbf20f53fd144327ad5c51a40da0f81c05f9283 | 1,776 | py | Python | test.py | sguzman/duo-service-ready | c6dd88ccddfa2db13e1bed9787f77ca804bf48d9 | [
"Unlicense"
] | null | null | null | test.py | sguzman/duo-service-ready | c6dd88ccddfa2db13e1bed9787f77ca804bf48d9 | [
"Unlicense"
] | null | null | null | test.py | sguzman/duo-service-ready | c6dd88ccddfa2db13e1bed9787f77ca804bf48d9 | [
"Unlicense"
] | null | null | null | import atexit
import grpc
import logging
import os
import server_pb2
import server_pb2_grpc
port: str = None
def init_env() -> None:
global port
port = os.environ['PORT']
logging.info('Found PORT at %s', port)
def init_atexit() -> None:
def end():
logging.info('bye')
atexit.register(end)
def init_logging() -> None:
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
logging.info('hi')
def set_service(stub, service: str) -> server_pb2.ReadyStatus:
response = stub.RegisterService(server_pb2.ReadyService(name=service))
ack: bool = response.ready
logging.info('Registered service "%s" - got response %r', service, ack)
def get_inv(stub) -> None:
response = stub.GetInventory(server_pb2.ReadyStatus(ready=True))
for s in response.entry:
logging.info('Got service "%s" with status %r', s.name, s.ready)
def get_status(stub, service: str) -> None:
response = stub.RegisterService(server_pb2.Ready(name=service))
ack: bool = response.ready
logging.info('Got service "%s" - got status %r', service, ack)
def init_client() -> None:
addr: str = f'localhost:{port}'
logging.info('Calling %s', addr)
channel = grpc.insecure_channel(addr)
stub = server_pb2_grpc.ReadyStub(channel)
services: List[str] = ['A', 'B', 'C']
for s in services:
set_service(stub, s)
get_inv(stub)
response2 = stub.GetKey(server_pb2.ConfKey(key=key))
logging.info('Queried key "%s" and got "%s"', key, value)
def init() -> None:
init_logging()
init_atexit()
init_env()
init_client()
def main() -> None:
init()
if __name__ == '__main__':
main()
| 21.39759 | 75 | 0.644144 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 284 | 0.15991 |
0dbf61d6373ec9f78a1ff9c0baa721ae32809835 | 394 | py | Python | tests/dummy_project/spiders/dummy_spider.py | zack-wilson/scrapy-statsd | cfa2f40e243da30c7b1078cda16730f95bb79fda | [
"BSD-3-Clause"
] | null | null | null | tests/dummy_project/spiders/dummy_spider.py | zack-wilson/scrapy-statsd | cfa2f40e243da30c7b1078cda16730f95bb79fda | [
"BSD-3-Clause"
] | null | null | null | tests/dummy_project/spiders/dummy_spider.py | zack-wilson/scrapy-statsd | cfa2f40e243da30c7b1078cda16730f95bb79fda | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime as dt
import uuid
import scrapy
class DummySpiderSpider(scrapy.Spider):
name = "dummy_spider"
allowed_domains = ["example.com"]
start_urls = ["http://example.com/"]
def parse(self, response):
yield {
"uuid": str(uuid.uuid1()),
"timestamp": dt.datetime.now(),
"url": response.url,
}
| 20.736842 | 43 | 0.573604 | 318 | 0.807107 | 168 | 0.426396 | 0 | 0 | 0 | 0 | 93 | 0.236041 |
0dc14be135ed1c10113f4ab5fbd2759629e41d4e | 192 | py | Python | library_management/library_management/doctype/customer_account/customer_account.py | jcgurango/library_management | f9859499eb12414889277fbdadfcd60290c320dd | [
"MIT"
] | null | null | null | library_management/library_management/doctype/customer_account/customer_account.py | jcgurango/library_management | f9859499eb12414889277fbdadfcd60290c320dd | [
"MIT"
] | null | null | null | library_management/library_management/doctype/customer_account/customer_account.py | jcgurango/library_management | f9859499eb12414889277fbdadfcd60290c320dd | [
"MIT"
] | null | null | null | # Copyright (c) 2021, JC and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class CustomerAccount(Document):
pass
| 21.333333 | 49 | 0.791667 | 38 | 0.197917 | 0 | 0 | 0 | 0 | 0 | 0 | 105 | 0.546875 |
0dc28d03cd9039a35f49c8a676fc07ba9b6b1471 | 4,839 | py | Python | neural_compressor/experimental/common/optimizer.py | kevinintel/neural-compressor | b57645566aeff8d3c18dc49d2739a583c072f940 | [
"Apache-2.0"
] | 100 | 2020-12-01T02:40:12.000Z | 2021-09-09T08:14:22.000Z | neural_compressor/experimental/common/optimizer.py | kevinintel/neural-compressor | b57645566aeff8d3c18dc49d2739a583c072f940 | [
"Apache-2.0"
] | 25 | 2021-01-05T00:16:17.000Z | 2021-09-10T03:24:01.000Z | neural_compressor/experimental/common/optimizer.py | kevinintel/neural-compressor | b57645566aeff8d3c18dc49d2739a583c072f940 | [
"Apache-2.0"
] | 25 | 2020-12-01T19:07:08.000Z | 2021-08-30T14:20:07.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
from neural_compressor.utils.utility import LazyImport, singleton
torch = LazyImport('torch')
tf = LazyImport('tensorflow')
@singleton
class TensorflowOptimizers(object):
def __init__(self):
self.optimizers = {}
self.optimizers.update(TENSORFLOW_OPTIMIZERS)
@singleton
class PyTorchOptimizers(object):
def __init__(self):
self.optimizers = {}
self.optimizers.update(PYTORCH_OPTIMIZERS)
framework_optimizers = {"tensorflow": TensorflowOptimizers,
"pytorch": PyTorchOptimizers,
"pytorch_fx": PyTorchOptimizers}
# user/model specific optimizers will be registered here
TENSORFLOW_OPTIMIZERS = {}
PYTORCH_OPTIMIZERS= {}
registry_optimizers = {"tensorflow": TENSORFLOW_OPTIMIZERS,
"pytorch": PYTORCH_OPTIMIZERS,
"pytorch_fx": PYTORCH_OPTIMIZERS}
class Optimizers(object):
def __init__(self, framework):
assert framework in ("tensorflow", "pytorch", "pytorch_fx"), \
"framework support tensorflow pytorch"
self.optimizers = framework_optimizers[framework]().optimizers
def __getitem__(self, optimizer_type):
assert optimizer_type in self.optimizers.keys(), "only support optimizers in {}".\
format(self.optimizers.keys())
return self.optimizers[optimizer_type]
def register(self, name, optimizer_cls):
assert name not in self.optimizers.keys(), 'registered optimizer name already exists.'
self.optimizers.update({name: optimizer_cls})
def optimizer_registry(optimizer_type, framework):
"""The class decorator used to register all Optimizer subclasses.
cross framework optimizer is supported by add param as framework='tensorflow, pytorch'
Args:
optimizer_type (str): The string of supported criterion.
framework (str): The string of supported framework.
Returns:
cls: The class of register.
"""
def decorator_optimizer(cls):
for fw in [fwk.strip() for fwk in framework.split(',')]:
assert fw in [
"tensorflow",
"pytorch"], "The framework support tensorflow pytorch"
if optimizer_type in registry_optimizers[fw ].keys():
raise ValueError('Cannot have two optimizers with the same name')
registry_optimizers[fw][optimizer_type] = cls
return cls
return decorator_optimizer
@optimizer_registry('SGD', 'tensorflow')
class TensorFlowSGD(object):
"""TensorFlow keras SGD optimizer.
Args:
param_dict (dict): The dict of parameters setting by user for SGD optimizer
"""
def __init__(self, param_dict):
assert isinstance(param_dict, dict), 'This optimizer constructor parameter must be a dict'
self._param_dict = param_dict
def _mapping(self):
_param_map = {'learning_rate': 'learning_rate',
'momentum': 'momentum',
'nesterov': 'nesterov'}
_dict = {}
for key in self._param_dict:
if key in _param_map:
_dict.update({_param_map[key] : self._param_dict[key]})
return _dict
def __call__(self, **kwargs):
return tf.keras.optimizers.SGD, self._mapping(**kwargs)
@optimizer_registry('SGD', 'pytorch')
class PyTorchSGD(object):
"""PyTorch SGD optimizer.
Args:
param_dict (dict): The dict of parameters setting by user for SGD optimizer
"""
def __init__(self, param_dict):
assert isinstance(param_dict, dict), 'This optimizer constructor parameter must be a dict'
self._param_dict = param_dict
def _mapping(self):
_param_map = {'learning_rate': 'lr',
'momentum': 'momentum',
'nesterov': 'nesterov',
'weight_decay': 'weight_decay'}
_dict = {}
for key in self._param_dict:
if key in _param_map:
_dict.update({_param_map[key] : self._param_dict[key]})
return _dict
def __call__(self, **kwargs):
return torch.optim.SGD, self._mapping(**kwargs)
| 35.844444 | 98 | 0.656127 | 2,588 | 0.534821 | 0 | 0 | 1,998 | 0.412895 | 0 | 0 | 1,926 | 0.398016 |
0dc50a62229b8147dc2f55f4c1c6f9f415fa5d6c | 20,561 | py | Python | curation_validator.py | FlashpointProject/Curation-Validation-Bot | 826bd86a4093c4a7fde16d9e511364f203c2266b | [
"MIT"
] | 5 | 2021-02-15T19:05:52.000Z | 2021-05-12T03:12:05.000Z | curation_validator.py | FlashpointProject/Curation-Validation-Bot | 826bd86a4093c4a7fde16d9e511364f203c2266b | [
"MIT"
] | 12 | 2021-02-13T02:12:04.000Z | 2022-01-29T23:46:03.000Z | curation_validator.py | FlashpointProject/Curation-Validation-Bot | 826bd86a4093c4a7fde16d9e511364f203c2266b | [
"MIT"
] | 4 | 2021-02-11T20:49:05.000Z | 2021-03-13T12:25:16.000Z | import base64
import shutil
import json
import re
from enum import Enum, auto
from typing import Optional
import py7zr
from cachetools import TTLCache, cached
from ruamel.yaml import YAML, YAMLError
from logger import getLogger
import os
import tempfile
import zipfile
import requests
from bs4 import BeautifulSoup
l = getLogger("main")
class CurationType(Enum):
FLASH_GAME = auto()
OTHER_GAME = auto()
ANIMATION = auto()
def validate_curation(filename: str) -> tuple[list,
list,
Optional[bool],
Optional[CurationType],
Optional[dict],
Optional[list[dict]]]:
errors: list = []
warnings: list = []
# process archive
filenames: list = []
max_uncompressed_size = 50 * 1000 * 1000 * 1000
base_path = None
if filename.endswith(".7z"):
try:
l.debug(f"reading archive '{filename}'...")
archive = py7zr.SevenZipFile(filename, mode='r')
uncompressed_size = archive.archiveinfo().uncompressed
if uncompressed_size > max_uncompressed_size:
warnings.append(
f"The archive is too large to be validated (`{uncompressed_size // 1000000}MB/{max_uncompressed_size // 1000000}MB`).")
archive.close()
return errors, warnings, None, None, None, None
filenames = archive.getnames()
base_path = tempfile.mkdtemp(prefix="curation_validator_") + "/"
archive.extractall(path=base_path)
archive.close()
except Exception as e:
l.error(f"there was an error while reading file '{filename}': {e}")
errors.append("There seems to a problem with your 7z file.")
return errors, warnings, None, None, None, None
elif filename.endswith(".zip"):
try:
l.debug(f"reading archive '{filename}'...")
archive = zipfile.ZipFile(filename, mode='r')
uncompressed_size = sum([zinfo.file_size for zinfo in archive.filelist])
if uncompressed_size > max_uncompressed_size:
warnings.append(
f"The archive is too large to be validated (`{uncompressed_size // 1000000}MB/{max_uncompressed_size // 1000000}MB`).")
archive.close()
return errors, warnings, None, None, None, None
filenames = archive.namelist()
base_path = tempfile.mkdtemp(prefix="curation_validator_") + "/"
archive.extractall(path=base_path)
archive.close()
except Exception as e:
l.error(f"there was an error while reading file '{filename}': {e}")
errors.append("There seems to a problem with your zip file.")
return errors, warnings, None, None, None, None
elif filename.endswith(".rar"):
errors.append("Curations must be either .zip or .7z, not .rar.")
return errors, warnings, None, None, None, None
else:
l.warn(f"file type of file '{filename}' not supported")
errors.append(f"file type of file '{filename}' not supported")
return errors, warnings, None, None, None, None
# check files
l.debug(f"validating archive data for '{filename}'...")
uuid_folder_regex = re.compile(r"^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}/?$")
uuid_folder = [match for match in filenames if uuid_folder_regex.match(match) is not None]
logo = []
ss = []
if len(uuid_folder) == 0: # legacy or broken curation
content_folder_regex = re.compile(r"^[^/]+/content/?$")
meta_regex = re.compile(r"^[^/]+/meta\.(yaml|yml|txt)$")
logo_regex = re.compile(r"^[^/]+/logo\.(png)$")
logo_regex_case = re.compile(r"(?i)^[^/]+/logo\.(png)$")
ss_regex = re.compile(r"^[^/]+/ss\.(png)$")
ss_regex_case = re.compile(r"(?i)^[^/]+/ss\.(png)$")
content_folder = [match for match in filenames if content_folder_regex.match(match) is not None]
meta = [match for match in filenames if meta_regex.match(match) is not None]
logo = [match for match in filenames if logo_regex.match(match) is not None]
logo_case = [match for match in filenames if logo_regex_case.match(match) is not None]
ss = [match for match in filenames if ss_regex.match(match) is not None]
ss_case = [match for match in filenames if ss_regex_case.match(match) is not None]
else: # core curation
content_folder_regex = re.compile(
r"^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}/content/?$")
meta_regex = re.compile(
r"^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}/meta\.(yaml|yml|txt)$")
logo_regex = re.compile(r"^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}/logo\.png$")
logo_regex_case = re.compile(
r"(?i)^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}/logo\.(png)$")
ss_regex = re.compile(r"^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}/ss\.png$")
ss_regex_case = re.compile(
r"(?i)^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}/ss\.(png)$")
content_folder = [match for match in filenames if content_folder_regex.match(match) is not None]
meta = [match for match in filenames if meta_regex.match(match) is not None]
logo = [match for match in filenames if logo_regex.match(match) is not None]
logo_case = [match for match in filenames if logo_regex_case.match(match) is not None]
ss = [match for match in filenames if ss_regex.match(match) is not None]
ss_case = [match for match in filenames if ss_regex_case.match(match) is not None]
if len(logo) == 0 and len(ss) == 0 and len(content_folder) == 0 and len(meta) == 0:
errors.append("Logo, screenshot, content folder and meta not found. Is your curation structured properly?")
archive_cleanup(filename, base_path)
return errors, warnings, None, None, None, None
if set(logo) != set(logo_case):
errors.append("Logo file extension must be lowercase.")
else:
if len(logo) == 0:
errors.append("Logo file is either missing or its filename is incorrect.")
if set(ss) != set(ss_case):
errors.append("Screenshot file extension must be lowercase.")
else:
if len(ss) == 0:
errors.append("Screenshot file is either missing or its filename is incorrect.")
# check content
if len(content_folder) == 0:
errors.append("Content folder not found.")
else:
content_folder_path = base_path + content_folder[0]
filecount_in_content = sum([len(files) for r, d, files in os.walk(content_folder_path)])
if filecount_in_content == 0:
errors.append("No files found in content folder.")
# localflash checking
if 'localflash' in os.listdir(content_folder_path):
files_in_localflash = os.listdir(content_folder_path + '/localflash')
if len(files_in_localflash) > 1:
errors.append("Content must be in additional folder in localflash rather than in localflash directly.")
else:
with open("data/common_localflash_names.json") as f:
bad_localflash_names = json.load(f)["names"]
for file in files_in_localflash:
filepath = content_folder_path + '/localflash/' + file
if os.path.isfile(filepath):
errors.append(
"Content must be in additional folder in localflash rather than in localflash directly.")
break
elif file in bad_localflash_names:
errors.append("Extremely common localflash containing folder name, please change.")
with open("data/bad_system_files.json") as f:
bad_system_files = json.load(f)["names"]
for name in bad_system_files:
if any(name in s for s in filenames):
errors.append(f"{name} file found in curation, please remove.")
# process meta
is_extreme = False
curation_type = None
props: dict = {}
if len(meta) == 0:
errors.append(
"Meta file is either missing or its filename is incorrect. Are you using Flashpoint Core for curating?")
else:
meta_filename = meta[0]
with open(base_path + meta_filename, mode='r', encoding='utf8') as meta_file:
if meta_filename.endswith(".yml") or meta_filename.endswith(".yaml"):
try:
yaml = YAML(typ="safe")
props: dict = yaml.load(meta_file)
if props is None:
errors.append("The meta file seems to be empty.")
archive_cleanup(filename, base_path)
return errors, warnings, None, None, None, None
except YAMLError:
errors.append("Unable to load meta YAML file")
archive_cleanup(filename, base_path)
return errors, warnings, None, None, None, None
except ValueError:
errors.append("Invalid release date. Ensure entered date is valid.")
archive_cleanup(filename, base_path)
return errors, warnings, None, None, None, None
elif meta_filename.endswith(".txt"):
break_index: int = 0
while break_index != -1:
props, break_index = parse_lines_until_multiline(meta_file.readlines(), props,
break_index)
props, break_index = parse_multiline(meta_file.readlines(), props, break_index)
if props.get("Genre") is not None:
props["Tags"] = props["Genre"]
else:
errors.append(
"Meta file is either missing or its filename is incorrect. Are you using Flashpoint Core for curating?")
archive_cleanup(filename, base_path)
return errors, warnings, None, None, None, None
title: tuple[str, bool] = ("Title", bool(props.get("Title")))
# developer: tuple[str, bool] = ("Developer", bool(props["Developer"]))
release_date: tuple[str, bool] = ("Release Date", bool(props.get("Release Date")))
if release_date[1]:
date_string = str(props.get("Release Date")).strip()
if len(date_string) > 0:
date_regex = re.compile(r"^\d{4}(-\d{2}){0,2}$")
if not date_regex.match(date_string):
errors.append(
f"Release date {date_string} is incorrect. Release dates should always be in `YYYY-MM-DD` format.")
language_properties: tuple[str, bool] = "Languages", bool(props.get("Languages"))
if language_properties[1]:
with open("data/language-codes.json") as f:
list_of_language_codes: list[dict] = json.load(f)
with open("data/lang_replacements.json") as f:
replacements: dict = json.load(f)
language_str: str = props.get("Languages", "")
language_codes = language_str.split(";")
language_codes = [x.strip() for x in language_codes]
valid_language_codes = []
for x in list_of_language_codes:
valid_language_codes.append(x["alpha2"])
for language_code in language_codes:
replacement_code = replacements.get(language_code)
if language_code not in valid_language_codes:
if language_code == "":
pass
elif ',' in language_code:
errors.append("Languages should be separated with semicolons, not commas.")
elif language_code in [x["English"] for x in list_of_language_codes]:
for x in list_of_language_codes:
if language_code in x["English"]:
errors.append(
f"Languages must be in ISO 639-1 format, so please use `{x['alpha2']}` instead of `{language_code}`")
elif replacement_code is not None:
language_name = ""
for x in list_of_language_codes:
if replacement_code == x["alpha2"]:
language_name = x["English"]
errors.append(
f"The correct ISO 639-1 language code for {language_name} is `{replacement_code}`, not `{language_code}`.")
else:
errors.append(f"Code `{language_code}` is not a valid ISO 639-1 language code.")
# tag: tuple[str, bool] = ("Tags", bool(props["Tags"]))
source: tuple[str, bool] = ("Source", bool(props.get("Source")))
status: tuple[str, bool] = ("Status", bool(props.get("Status")))
launch_command: tuple[str, bool] = ("Launch Command", bool(props.get("Launch Command")))
application_path: tuple[str, bool] = ("Application Path", bool(props.get("Application Path")))
# TODO check description?
# description: tuple[str, bool] = ("Description", bool(props["Original Description"]))
# if description[1] is False and (
# bool(props["Curation Notes"]) or bool(props["Game Notes"])):
# reply += "Make sure you didn't put your description in the notes section.\n"
simple_mandatory_props: list[tuple[str, bool]] = [title, language_properties, source, launch_command, status,
application_path]
if not all([x[1] for x in simple_mandatory_props]):
for prop in simple_mandatory_props:
if prop[1] is False:
errors.append(f"The `{prop[0]}` property in the meta file is mandatory.")
if launch_command[1] and "https" in props["Launch Command"]:
errors.append("Found `https` in launch command. All launch commands must use `http` instead of `https`.")
if launch_command[1] and props["Launch Command"] in get_launch_commands_bluebot():
errors.append(
"Identical launch command already present in the master database. Is your curation a duplicate?")
# TODO check optional props?
# optional_props: list[tuple[str, bool]] = [developer, release_date, tag, description]
# if not all(optional_props[1]): for x in optional_props: if x[1] is False: reply += x[0] +
# "is missing, but not necessary. Add it if you can find it, but it's okay if you can't.\n"
tags: list[str] = props.get("Tags", "").split(";") if props.get("Tags", "") is not None else ""
tags: list[str] = [x.strip() for x in tags]
tags: list[str] = [x for x in tags if len(x) > 0]
master_tag_list = get_tag_list()
if not tags:
errors.append("Missing tags. At least one tag must be specified.")
else:
for tag in tags:
if tag not in master_tag_list:
warnings.append(f"Tag `{tag}` is not a known tag, please verify (did you write it correctly?).")
extreme: tuple[str, bool] = ("Extreme", bool(props.get("Extreme")))
extreme_tags = get_extreme_tag_list_file()
is_extreme = False
if extreme[1] and (props["Extreme"] == "Yes" or props["Extreme"] is True):
is_extreme = True
if tags:
has_extreme_tags = bool([tag for tag in tags if tag in extreme_tags])
has_legacy_extreme = "LEGACY-Extreme" in tags
if has_extreme_tags or has_legacy_extreme:
is_extreme = True
if is_extreme and not has_extreme_tags:
errors.append("Curation is extreme but lacks extreme tags.")
if props.get("Library") is not None and "theatre" in props.get("Library"):
curation_type = CurationType.ANIMATION
else:
platform: Optional[str] = props.get("Platform")
if platform is None or "Flash" in platform:
curation_type = CurationType.FLASH_GAME
else:
curation_type = CurationType.OTHER_GAME
images = []
if len(logo) == 1:
logo = logo[0]
image_path = f"{base_path}{logo}"
images.append({"type": "logo", "data": encode_image(image_path)})
for screenshot in ss:
image_path = f"{base_path}{screenshot}"
images.append({"type": f"screenshot", "data": encode_image(image_path)})
archive_cleanup(filename, base_path)
return errors, warnings, is_extreme, curation_type, props, images
def encode_image(image_path):
l.debug(f"encoding file '{image_path}' into base64")
with open(image_path, "rb") as f:
return base64.b64encode(f.read())
def archive_cleanup(filename, base_path):
l.debug(f"cleaning up extracted files in {base_path} after the archive '{filename}'...")
shutil.rmtree(base_path, True)
@cached(cache=TTLCache(maxsize=1, ttl=600))
def get_launch_commands_bluebot() -> list[str]:
l.debug(f"getting launch commands from bluebot...")
resp = requests.get(url="https://bluebot.unstable.life/launch-commands")
return resp.json()["launch_commands"]
@cached(cache=TTLCache(maxsize=1, ttl=600))
def get_tag_list_bluebot() -> list[str]:
l.debug(f"getting tags from bluebot...")
resp = requests.get(url="https://bluebot.unstable.life/tags")
return resp.json()["tags"]
@cached(cache=TTLCache(maxsize=1, ttl=3600))
def get_tag_list_file() -> list[str]:
l.debug(f"getting tags from file...")
with open("data/category_tags.json", "r", encoding="utf-8") as f:
data = json.load(f)
return data["tags"]
@cached(cache=TTLCache(maxsize=1, ttl=3600))
def get_extreme_tag_list_file() -> list[str]:
l.debug(f"getting tags from file...")
with open("data/extreme_tags.json", "r", encoding="utf-8") as f:
data = json.load(f)
return data["tags"]
@cached(cache=TTLCache(maxsize=1, ttl=60))
def get_tag_list_wiki() -> list[str]:
l.debug(f"getting tags from wiki...")
tags = []
resp = requests.get(url="https://bluemaxima.org/flashpoint/datahub/Tags")
soup = BeautifulSoup(resp.text, "html.parser")
tables = soup.find_all("table")
for table in tables:
rows = table.find_all("tr")
for row in rows:
cols = row.find_all('td')
if len(cols) > 0:
col = cols[0]
links = row.find_all('a')
if len(links) > 0:
tags.append(links[0].contents[0].strip())
else:
tags.append(col.contents[0].strip())
return tags
def get_tag_list() -> list[str]:
bluebot_tags = get_tag_list_bluebot()
file_tags = get_tag_list_file()
wiki_tags = get_tag_list_wiki()
return list(set(file_tags + wiki_tags + bluebot_tags))
def parse_lines_until_multiline(lines: list[str], d: dict, starting_number: int):
break_number: int = -1
for idx, line in enumerate(lines[starting_number:]):
if '|' not in line and line.strip():
split: list[str] = line.split(":")
split: list[str] = [x.strip(' ') for x in split]
d.update({split[0]: split[1]})
else:
break_number = idx
break
return d, break_number
def parse_multiline(lines: list[str], d: dict, starting_number: int):
break_number = -1
key: str = ""
val: str = ""
for idx, line in enumerate(lines[starting_number:]):
if idx is starting_number:
split = line.split(':')
split = [x.strip(' ') for x in split]
key = split[0]
else:
if line.startswith('\t'):
line = line.strip(" \t")
val += line
else:
break_number = idx
break
d.update({key: val})
return d, break_number
| 46.100897 | 139 | 0.581684 | 96 | 0.004669 | 0 | 0 | 1,718 | 0.083556 | 0 | 0 | 5,493 | 0.267156 |
0dc54bb903b475f38621941bf098cb9e92be0daf | 548 | py | Python | authentication/urls.py | thestackcoder/notifao_app | e21ab3c0eed72a64ee24508b92045de13c8385bb | [
"MIT"
] | null | null | null | authentication/urls.py | thestackcoder/notifao_app | e21ab3c0eed72a64ee24508b92045de13c8385bb | [
"MIT"
] | null | null | null | authentication/urls.py | thestackcoder/notifao_app | e21ab3c0eed72a64ee24508b92045de13c8385bb | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
from django.urls import path
from .views import login_view, register_user, reset_password
from django.contrib.auth.views import LogoutView
from .views import *
urlpatterns = [
path('login/', login_view, name="login"),
path('register/', register_user, name="register"),
path("logout/", LogoutView.as_view(), name="logout"),
path('reset/', reset_view, name="reset"),
path('reset_password/<str:pk>/',reset_password, name="reset_password"),
]
| 28.842105 | 75 | 0.695255 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 195 | 0.355839 |
0dc66aaede643e68b9fbae98bc7c2375fba4fa0b | 2,835 | py | Python | examples/get_tiles.py | cytomine/Cytomine-python-client | afd46c1a6bf88914610e961531979597f8efaf22 | [
"Apache-2.0"
] | 23 | 2015-04-28T07:58:25.000Z | 2021-05-21T09:37:29.000Z | examples/get_tiles.py | cytomine/Cytomine-python-client | afd46c1a6bf88914610e961531979597f8efaf22 | [
"Apache-2.0"
] | 27 | 2017-01-12T15:11:39.000Z | 2022-03-29T07:06:46.000Z | examples/get_tiles.py | cytomine/Cytomine-python-client | afd46c1a6bf88914610e961531979597f8efaf22 | [
"Apache-2.0"
] | 14 | 2016-09-02T19:51:54.000Z | 2022-02-02T14:35:19.000Z | # -*- coding: utf-8 -*-
# * Copyright (c) 2009-2018. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import sys
import numpy as np
from argparse import ArgumentParser
import os
from cytomine import Cytomine
from cytomine.models.image import ImageInstanceCollection, ImageInstance
from cytomine.utilities import WholeSlide
from cytomine.utilities.reader import CytomineReader
__author__ = "Rubens Ulysse <urubens@uliege.be>"
logging.basicConfig()
logger = logging.getLogger("cytomine.client")
logger.setLevel(logging.INFO)
if __name__ == '__main__':
parser = ArgumentParser(prog="Cytomine Python client example")
# Cytomine
parser.add_argument('--cytomine_host', dest='host',
default='demo.cytomine.be', help="The Cytomine host")
parser.add_argument('--cytomine_public_key', dest='public_key',
help="The Cytomine public key")
parser.add_argument('--cytomine_private_key', dest='private_key',
help="The Cytomine private key")
parser.add_argument('--cytomine_id_image_instance', dest='id_image_instance',
help="The image from which tiles will be extracted")
parser.add_argument('--overlap', help="Overlap between tiles", default=10)
parser.add_argument('--zoom', help="Zoom at which tiles are extracted", default=None)
params, other = parser.parse_known_args(sys.argv[1:])
with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key) as cytomine:
image_instance = ImageInstance().fetch(params.id_image_instance)
print(image_instance)
if not params.zoom:
params.zoom = int(image_instance.depth / 2)
print("Zoom set to {}".format(params.zoom))
whole_slide = WholeSlide(image_instance)
reader = CytomineReader(whole_slide, overlap=params.overlap, zoom=params.zoom)
while True:
reader.read()
image = np.array(reader.result())
print(image.shape)
print(reader.window_position)
if not reader.next():
break
| 38.310811 | 110 | 0.701587 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,111 | 0.391887 |
0dc682785b15f15c903947600de17c5139a1bca3 | 490 | py | Python | info/modules/admin/__init__.py | gxz987/Dynamic_movie | ceb266dcb123cc2e51bda29a1da436e58b220be4 | [
"MIT"
] | null | null | null | info/modules/admin/__init__.py | gxz987/Dynamic_movie | ceb266dcb123cc2e51bda29a1da436e58b220be4 | [
"MIT"
] | 6 | 2021-03-19T01:24:17.000Z | 2022-03-11T23:50:23.000Z | info/modules/admin/__init__.py | bzg287288182/my-News | 01e581ec53e44a9ed7ab8be1bcd9ddf84795acff | [
"MIT"
] | null | null | null | from flask import Blueprint
admin_blu = Blueprint("admin", __name__, url_prefix="/admin")
from .views import *
@admin_blu.before_request
def admin_identification():
"""
进入后台之前的校验
:return:
"""
# 我先从你的session获取下is_admin 如果能获取到 说明你是管理员
# 如果访问的接口是/admin/login 那么可以直接访问
is_login = request.url.endswith("/login") # 判断请求的url是否以/login结尾,即登录页面
is_admin = session.get("is_admin") # 判断用户是否是管理员
if not is_admin and not is_login:
return redirect("/")
| 22.272727 | 74 | 0.691837 | 0 | 0 | 0 | 0 | 516 | 0.81388 | 0 | 0 | 328 | 0.51735 |