hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
count_classes
int64
0
1.6M
score_classes
float64
0
1
count_generators
int64
0
651k
score_generators
float64
0
1
count_decorators
int64
0
990k
score_decorators
float64
0
1
count_async_functions
int64
0
235k
score_async_functions
float64
0
1
count_documentation
int64
0
1.04M
score_documentation
float64
0
1
f7af32c0de7c050bf221c8fa53e7b8146120211e
11,892
py
Python
custom_components/discord_game/sensor.py
Myztillx/discord_game
d2413a41ca3918bf2836b3b577fccca86b85ff05
[ "MIT" ]
null
null
null
custom_components/discord_game/sensor.py
Myztillx/discord_game
d2413a41ca3918bf2836b3b577fccca86b85ff05
[ "MIT" ]
null
null
null
custom_components/discord_game/sensor.py
Myztillx/discord_game
d2413a41ca3918bf2836b3b577fccca86b85ff05
[ "MIT" ]
null
null
null
import asyncio import json import logging import re import homeassistant.helpers.config_validation as cv import voluptuous as vol from discord import ActivityType, Spotify, Game, Streaming, CustomActivity, Activity, Member, User from homeassistant.components.notify import PLATFORM_SCHEMA from homeassistant.const import (EVENT_HOMEASSISTANT_STOP, EVENT_HOMEASSISTANT_START) from homeassistant.helpers.entity import Entity _LOGGER = logging.getLogger(__name__) REQUIREMENTS = ['discord.py==1.5.1'] CONF_TOKEN = 'token' CONF_MEMBERS = 'members' CONF_IMAGE_FORMAT = 'image_format' DOMAIN = 'sensor' ENTITY_ID_FORMAT = "sensor.discord_{}" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_TOKEN): cv.string, vol.Required(CONF_MEMBERS, default=[]): vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_IMAGE_FORMAT, default='webp'): vol.In(['png', 'webp', 'jpeg', 'jpg']), }) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): import discord token = config.get(CONF_TOKEN) image_format = config.get(CONF_IMAGE_FORMAT) intents = discord.Intents.default() intents.members = True intents.presences = True bot = discord.Client(loop=hass.loop, intents=intents) await bot.login(token) async def async_stop_server(event): await bot.logout() async def start_server(event): hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_stop_server) await bot.start(token) hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_server) @bot.event async def on_error(error, *args, **kwargs): raise def update_discord_entity(watcher: DiscordAsyncMemberState, discord_member: Member): watcher._state = discord_member.status activity_state = None game = None game_state = None game_details = None game_image_small = None game_image_large = None game_image_small_text = None game_image_large_text = None streaming = None streaming_details = None streaming_url = None listening = None listening_details = None listening_url = None spotify_artists = None spotify_title = None spotify_album = None spotify_album_cover_url = None spotify_track_id = None spotify_duration = None spotify_start = None spotify_end = None watching = None watching_details = None watching_url = None custom_status = None custom_emoji = None for activity in discord_member.activities: if activity.type == ActivityType.playing: if isinstance(activity, Game): activity: Game game = activity.name continue else: activity: Activity game = activity.name game_state = activity.state game_details = activity.details game_image_small = activity.small_image_url game_image_large = activity.large_image_url game_image_small_text = activity.small_image_text game_image_large_text = activity.large_image_text continue if activity.type == ActivityType.streaming: activity: Streaming streaming = activity.name streaming_details = activity.details streaming_url = activity.url continue if activity.type == ActivityType.listening: if isinstance(activity, Spotify): activity: Spotify listening = activity.title spotify_artists = ", ".join(activity.artists) spotify_title = activity.title spotify_album = activity.album spotify_album_cover_url = activity.album_cover_url spotify_track_id = activity.track_id spotify_duration = str(activity.duration) spotify_start = str(activity.start) spotify_end = str(activity.end) continue else: activity: Activity activity_state = activity.state listening = activity.name listening_details = activity.details listening_url = activity.url continue if activity.type == ActivityType.watching: activity: Activity activity_state = activity.state watching = activity.name watching_details = activity.details watching_url = activity.url continue if activity.type == ActivityType.custom: activity: CustomActivity activity_state = activity.state custom_status = activity.name custom_emoji = activity.emoji.name if activity.emoji else None continue watcher._game = game watcher._game_state = game_state watcher._game_details = game_details watcher._game_image_small = game_image_small watcher._game_image_large = game_image_large watcher._game_image_small_text = game_image_small_text watcher._game_image_large_text = game_image_large_text watcher._streaming = streaming watcher._streaming_url = streaming_url watcher._streaming_details = streaming_details watcher._listening = listening watcher._listening_url = listening_url watcher._listening_details = listening_details watcher._spotify_artist = spotify_artists watcher._spotify_title = spotify_title watcher._spotify_album = spotify_album watcher._spotify_album_cover_url = spotify_album_cover_url watcher._spotify_track_id = spotify_track_id watcher._spotify_duration = spotify_duration watcher._spotify_start = spotify_start watcher._spotify_end = spotify_end watcher._watching = watching watcher._watching_url = watching_url watcher._watching_details = watching_details watcher._activity_state = activity_state watcher._custom_status = custom_status watcher._custom_emoji = custom_emoji watcher.async_schedule_update_ha_state() def update_discord_entity_user(watcher: DiscordAsyncMemberState, discord_user: User): watcher._avatar_url = discord_user.avatar_url_as(format=None, static_format=image_format, size=1024).__str__() watcher._user_id = discord_user.id watcher.async_schedule_update_ha_state(True) @bot.event async def on_ready(): users = {"{}".format(user): user for user in bot.users} members = {"{}".format(member): member for member in list(bot.get_all_members())} for name, watcher in watchers.items(): if users.get(name) is not None: update_discord_entity_user(watcher, users.get(name)) if members.get(name) is not None: update_discord_entity(watcher, members.get(name)) @bot.event async def on_member_update(before: Member, after: Member): watcher = watchers.get("{}".format(after)) if watcher is not None: update_discord_entity(watcher, after) @bot.event async def on_user_update(before: User, after: User): watcher: DiscordAsyncMemberState = watchers.get("{}".format(after)) if watcher is not None: update_discord_entity_user(watcher, after) watchers = {} for member in config.get(CONF_MEMBERS): if re.match(r"^.*#[0-9]{4}", member): watcher: DiscordAsyncMemberState = DiscordAsyncMemberState(hass, bot, member) watchers[watcher.name] = watcher elif re.match(r"^[0-9]{,20}", member): #Up to 20 digits because 2^64 (snowflake-length) is 20 digits long user = await bot.fetch_user(member) if user: watcher: DiscordAsyncMemberState = DiscordAsyncMemberState(hass, bot, "{}#{}".format(user.name,user.discriminator)) watchers[watcher.name] = watcher if len(watchers) > 0: async_add_entities(watchers.values()) return True else: return False class DiscordAsyncMemberState(Entity): def __init__(self, hass, client, member): self._member = member self._hass = hass self._client = client self._state = 'unknown' self._game = None self._game_state = None self._game_details = None self._game_image_small = None self._game_image_large = None self._game_image_small_text = None self._game_image_large_text = None self._streaming = None self._streaming_url = None self._streaming_details = None self._listening = None self._listening_url = None self._listening_details = None self._spotify_artist = None self._spotify_title = None self._spotify_album = None self._spotify_album_cover_url = None self._spotify_track_id = None self._spotify_duration = None self._spotify_start = None self._spotify_end = None self._watching = None self._watching_url = None self._watching_details = None self._avatar_url = None self._user_id = None self._custom_status = None self._custom_emoji = None @property def should_poll(self) -> bool: return False @property def state(self) -> str: return self._state @property def entity_id(self): """Return the entity ID.""" # 1st Regex; keep a-z0-9 [](){} characters, replace with "_" # 2nd Regex; keep only a-z0-9 and single non-leading and non-trailing "_" characters, replace everything else with "" return ENTITY_ID_FORMAT.format(re.sub(r'([^a-z0-9_]|^_+|_+$|(_)\2+)', '', re.sub('[^a-z0-9 \[\]\(\)\{\}\"\']', '_', self._member.lower()))) @property def name(self): return self._member @property def entity_picture(self): return self._avatar_url @property def device_state_attributes(self): """Return the state attributes.""" return { 'avatar_url': self._avatar_url, 'game': self._game, 'game_state': self._game_state, 'game_details': self._game_details, 'game_image_small': self._game_image_small, 'game_image_large': self._game_image_large, 'game_image_small_text': self._game_image_small_text, 'game_image_large_text': self._game_image_large_text, 'streaming': self._streaming, 'streaming_url': self._streaming_url, 'streaming_details': self._streaming_details, 'listening': self._listening, 'listening_url': self._listening_url, 'listening_details': self._listening_details, 'spotify_artist': self._spotify_artist, 'spotify_title': self._spotify_title, 'spotify_album': self._spotify_album, 'spotify_album_cover_url': self._spotify_album_cover_url, 'spotify_track_id': self._spotify_track_id, 'spotify_duration': self._spotify_duration, 'spotify_start': self._spotify_start, 'spotify_end': self._spotify_end, 'watching': self._watching, 'watching_url': self._watching_url, 'watching_details': self._watching_details, 'custom_status': self._custom_status, 'custom_emoji': self._custom_emoji }
38.862745
147
0.632862
3,400
0.285906
0
0
3,141
0.264127
7,581
0.637487
958
0.080558
f7af40aed66aeeaae2505edaa30898f512812b45
329
py
Python
Mundo 1/ex_014.py
Shock3/Python_Exercicios
4420569e881b883728168aabe76b0e9f3a42597f
[ "MIT" ]
null
null
null
Mundo 1/ex_014.py
Shock3/Python_Exercicios
4420569e881b883728168aabe76b0e9f3a42597f
[ "MIT" ]
null
null
null
Mundo 1/ex_014.py
Shock3/Python_Exercicios
4420569e881b883728168aabe76b0e9f3a42597f
[ "MIT" ]
null
null
null
""" Escreva um programa que converta uma temperatura, digitando em graus Celsius e converta para graus Fahrenheit. """ celsius = int(input('Digite a temperatura: ')) fahrenheit = (celsius / 5) * 9 + 32 Kelvin = celsius + 273 print(f'A temperatura {celsius}°C em Fahrenheit é {fahrenheit}°F') print(f'E em Kevin fica {Kelvin} K')
32.9
66
0.723404
0
0
0
0
0
0
0
0
233
0.701807
f7af8bb0d4f3220811a9ca15ffd7c866a271a05f
24
py
Python
opensecrets/__init__.py
ndanielsen/py-opensecrets
b362d993fdcff6fc6a0d33ec2db75fb1da418a84
[ "MIT" ]
1
2018-02-15T03:59:13.000Z
2018-02-15T03:59:13.000Z
opensecrets/__init__.py
ndanielsen/py-opensecrets
b362d993fdcff6fc6a0d33ec2db75fb1da418a84
[ "MIT" ]
11
2018-02-14T16:23:17.000Z
2018-04-05T16:14:49.000Z
opensecrets/__init__.py
ndanielsen/py-opensecrets
b362d993fdcff6fc6a0d33ec2db75fb1da418a84
[ "MIT" ]
null
null
null
from .crpapi import CRP
12
23
0.791667
0
0
0
0
0
0
0
0
0
0
f7afb1df4dc8682c54d3708fff34533b6c3286db
2,933
py
Python
fumblr/services/imgur.py
jonoco/fumblr
cfbbea365299b9edba05c04de77cb003d03b6186
[ "MIT" ]
2
2017-04-13T02:58:24.000Z
2021-05-04T00:36:57.000Z
fumblr/services/imgur.py
jonoco/fumblr
cfbbea365299b9edba05c04de77cb003d03b6186
[ "MIT" ]
2
2020-09-09T22:10:40.000Z
2021-02-08T20:22:28.000Z
fumblr/services/imgur.py
jonoco/fumblr
cfbbea365299b9edba05c04de77cb003d03b6186
[ "MIT" ]
4
2019-07-30T12:17:55.000Z
2020-08-28T14:51:22.000Z
from fumblr.keys import IMGUR_SECRET, IMGUR_ID from imgurpython import ImgurClient, helpers import os import base64 API_URL = 'https://api.imgur.com/3/' def get_client(): """ Get an API client for Imgur Returns: Imgur client if it is available """ try: return ImgurClient(IMGUR_ID, IMGUR_SECRET) except helpers.error.ImgurClientError: print(f'Error: imgur client error - id: {IMGUR_ID} secret: {IMGUR_SECRET}') def delete_image(deletehash): """ Delete image from Imgur with given deletehash Args: deletehash: Hash id of image to delete Returns: Response from Imgur of image deletion if successful, otherwise False """ client = get_client() if client: try: return client.delete_image(deletehash) except: return False def upload_image(path): """ Upload image at system path to Imgur Example of response data from Imgur upload: {'size': 3527, 'title': None, 'animated': False, 'deletehash': 'YkK79ucEtDDn1b9', 'views': 0, 'width': 187, 'account_url': None, 'in_gallery': False, 'name': '', 'section': None, 'account_id': 0, 'type': 'image/png', 'datetime': 1473926225, 'description': None, 'height': 242, 'bandwidth': 0, 'id': 'AEvnA7h', 'favorite': False, 'nsfw': None, 'link': 'http://i.imgur.com/AEvnA7h.png', 'is_ad': False, 'vote': None} Args: path: System path of image Returns: Response from Imgur """ client = get_client() if client: image_path = os.path.abspath(path) upload = client.upload_from_path(image_path) return upload def upload(image): """ Upload image to Imgur from file Args: image: File object Returns: Imgur response object """ client = get_client() if client: contents = image.read() b64 = base64.b64encode(contents) data = { 'image': b64, 'type': 'base64' } return client.make_request('POST', 'upload', data, True) def upload_from_url(url): """ Upload image to Imgur from url Args: url: URL of image Returns: Imgur Response object if successful, otherwise False """ client = get_client() if client: try: return client.upload_from_url(url) except helpers.error.ImgurClientError: print('Error: imgur client error') return False def get_image(id): """ Return image data for image with given id Args: id: Imgur image id Returns: Response from Imgur """ client = get_client() if client: image_data = client.get_image(id) return image_data
20.51049
83
0.57177
0
0
0
0
0
0
0
0
1,673
0.570406
f7b31ccb2c831e63f0930af029ae9e690135237c
3,584
py
Python
source/src/molecular-unfolding/lambda/AthenaTableLambda/app.py
awslabs/quantum-ready-solution-for-drug-discovery
a015589995dc17a56bcd0da9332f63d966d08ace
[ "Apache-2.0" ]
10
2022-01-26T01:08:50.000Z
2022-03-31T03:03:44.000Z
source/src/molecular-unfolding/lambda/AthenaTableLambda/app.py
awslabs/quantum-ready-solution-for-drug-discovery
a015589995dc17a56bcd0da9332f63d966d08ace
[ "Apache-2.0" ]
47
2022-01-26T01:27:35.000Z
2022-03-29T04:34:51.000Z
source/src/molecular-unfolding/lambda/AthenaTableLambda/app.py
awslabs/quantum-ready-solution-for-drug-discovery
a015589995dc17a56bcd0da9332f63d966d08ace
[ "Apache-2.0" ]
5
2022-02-08T02:30:11.000Z
2022-03-25T01:59:15.000Z
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 import boto3 import botocore import os import logging import time import json import datetime log = logging.getLogger() log.setLevel('INFO') bucket = os.environ['BUCKET'] region = os.environ['AWS_REGION'] solution_version = os.environ.get('SOLUTION_VERSION', 'v1.0.0') solution_id = os.environ.get('SOLUTION_ID') user_agent_config = { 'user_agent_extra': f'AwsSolution/{solution_id}/{solution_version}', 'region_name': region } default_config = botocore.config.Config(**user_agent_config) athena_client = boto3.client('athena', config=default_config) def handler(event, context): s3_prefix = event['s3_prefix'] table_prefix = event["stackName"] log.info(f"table_prefix: {table_prefix}, s3_prefix: {s3_prefix}") table_name = f"{table_prefix}_qc_batch_evaluation_metrics_hist" view_name = f"{table_prefix}_qc_batch_evaluation_metrics" ATHENA_OUTPUT_LOCATION = f"s3://{bucket}/{s3_prefix}/athena-out/" location = f"s3://{bucket}/{s3_prefix}/batch_evaluation_metrics/" createDBSql = "CREATE DATABASE IF NOT EXISTS qc_db" dropTableSql = f"DROP TABLE IF EXISTS qc_db.{table_name}" createTableSql = f''' CREATE EXTERNAL TABLE IF NOT EXISTS qc_db.{table_name} ( Execution_Id string, Compute_Type string, Resolver string, Complexity integer, End_To_End_Time float, Running_Time float, Time_Info string, Start_Time string, Experiment_Name string, Task_Id string, Model_Name string, Model_FileName string, Scenario string, Resource string, Model_Param string, Opt_Param string, Create_Time string, Result_Detail string, Result_Location string ) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t' LINES TERMINATED BY '\\n' LOCATION '{location}' ''' createViewSql = f"CREATE OR REPLACE VIEW qc_db.{view_name} AS SELECT h1.* FROM qc_db.{table_name} h1, (SELECT DISTINCT Execution_Id, Start_Time FROM qc_db.{table_name} ORDER BY Start_Time DESC LIMIT 20) h2 WHERE (h1.Execution_Id = h2.Execution_Id)" #nosec B608 querySql = f"SELECT * FROM qc_db.{view_name}" #nosec B608 sqlStmSeq = [createDBSql, dropTableSql, createTableSql, createViewSql, querySql] for sqlStm in sqlStmSeq: log.info(sqlStm) response = athena_client.start_query_execution( QueryString=sqlStm, ResultConfiguration={ 'OutputLocation': ATHENA_OUTPUT_LOCATION } ) execution_id = response['QueryExecutionId'] wait_for_complete(execution_id) log.info("all done") return { 'queryResult': ATHENA_OUTPUT_LOCATION, 'endTime': datetime.datetime.utcnow().isoformat() } def wait_for_complete(execution_id): log.info("execution_id:{}".format(execution_id)) response = athena_client.get_query_execution( QueryExecutionId=execution_id ) while True: status = response['QueryExecution']['Status'] log.info("State: {}".format(status['State'])) if status['State'] == 'SUCCEEDED': return status elif status['State'] in ['QUEUED', 'RUNNING']: time.sleep(3) response = athena_client.get_query_execution( QueryExecutionId=execution_id ) else: log.error(json.dumps(response, default=str)) raise Exception(json.dumps(response, default=str))
34.461538
265
0.677176
0
0
0
0
0
0
0
0
1,758
0.490513
f7b33150fa99668b4eb5ad17455848d84b07ab75
14,664
py
Python
osf/management/commands/populate_custom_taxonomies.py
gaybro8777/osf.io
30408511510a40bc393565817b343ef5fd76ab14
[ "Apache-2.0" ]
628
2015-01-15T04:33:22.000Z
2022-03-30T06:40:10.000Z
osf/management/commands/populate_custom_taxonomies.py
gaybro8777/osf.io
30408511510a40bc393565817b343ef5fd76ab14
[ "Apache-2.0" ]
4,712
2015-01-02T01:41:53.000Z
2022-03-30T14:18:40.000Z
osf/management/commands/populate_custom_taxonomies.py
Johnetordoff/osf.io
de10bf249c46cede04c78f7e6f7e352c69e6e6b5
[ "Apache-2.0" ]
371
2015-01-12T16:14:08.000Z
2022-03-31T18:58:29.000Z
import json import logging from django.core.management.base import BaseCommand from django.db import transaction from osf.models import AbstractProvider, PreprintProvider, Preprint, Subject from osf.models.provider import rules_to_subjects from scripts import utils as script_utils from osf.models.validators import validate_subject_hierarchy from website.preprints.tasks import on_preprint_updated logger = logging.getLogger(__name__) BEPRESS_PROVIDER = None def validate_input(custom_provider, data, provider_type='osf.preprintprovider', copy=False, add_missing=False): # This function may be run outside of this command (e.g. in the admin app) so we # need to make sure that BEPRESS_PROVIDER is set global BEPRESS_PROVIDER BEPRESS_PROVIDER = AbstractProvider.objects.filter(_id='osf', type='osf.preprintprovider').first() logger.info('Validating data') includes = data.get('include', []) excludes = data.get('exclude', []) customs = data.get('custom', {}) merges = data.get('merge', {}) if copy: included_subjects = rules_to_subjects(custom_provider.subjects_acceptable) else: assert not set(includes) & set(excludes), 'There must be no overlap between includes and excludes' for text in includes: assert Subject.objects.filter(provider=BEPRESS_PROVIDER, text=text).exists(), 'Unable to find included subject with text {}'.format(text) included_subjects = Subject.objects.filter(provider=BEPRESS_PROVIDER, text__in=includes).include_children() logger.info('Successfully validated `include`') for text in excludes: try: Subject.objects.get(provider=BEPRESS_PROVIDER, text=text) except Subject.DoesNotExist: raise RuntimeError('Unable to find excluded subject with text {}'.format(text)) assert included_subjects.filter(text=text).exists(), 'Excluded subject with text {} was not included'.format(text) included_subjects = included_subjects.exclude(text__in=excludes) logger.info('Successfully validated `exclude`') for cust_name, map_dict in customs.items(): assert not included_subjects.filter(text=cust_name).exists(), 'Custom text {} already exists in mapped set'.format(cust_name) assert Subject.objects.filter(provider=BEPRESS_PROVIDER, text=map_dict.get('bepress')).exists(), 'Unable to find specified BePress subject with text {}'.format(map_dict.get('bepress')) if map_dict.get('parent'): # Null parent possible assert map_dict['parent'] in set(customs.keys()) | set(included_subjects.values_list('text', flat=True)), 'Unable to find specified parent with text {} in mapped set'.format(map_dict['parent']) # TODO: hierarchy length validation? Probably more trouble than worth here, done on .save logger.info('Successfully validated `custom`') included_subjects = included_subjects | Subject.objects.filter(text__in=[map_dict['bepress'] for map_dict in customs.values()]) for merged_from, merged_into in merges.items(): assert not included_subjects.filter(text=merged_from).exists(), 'Cannot merge subject "{}" that will be included'.format(merged_from) assert merged_into in set(included_subjects.values_list('text', flat=True)) | set(customs.keys()), 'Unable to determine merge target for "{}"'.format(merged_into) included_subjects = included_subjects | Subject.objects.filter(text__in=merges.keys()) missing_subjects = Subject.objects.filter(id__in=set([hier[-1].id for ps in Preprint.objects.filter(provider=custom_provider) for hier in ps.subject_hierarchy])).exclude(id__in=included_subjects.values_list('id', flat=True)) if not add_missing: assert not missing_subjects.exists(), 'Incomplete mapping -- following subjects in use but not included:\n{}'.format(list(missing_subjects.values_list('text', flat=True))) if isinstance(custom_provider, PreprintProvider): assert custom_provider.share_title not in [None, '', 'bepress'], 'share title not set; please set the share title on this provider before creating a custom taxonomy.' logger.info('Successfully validated mapping completeness') return list(missing_subjects) if add_missing else None def create_subjects_recursive(custom_provider, root_text, exclude_texts, parent=None): logger.info('Duplicating BePress subject {} on {}'.format(root_text, custom_provider._id)) bepress_subj = Subject.objects.get(provider=BEPRESS_PROVIDER, text=root_text) custom_subj = Subject(text=root_text, parent=parent, bepress_subject=bepress_subj, provider=custom_provider) custom_subj.save() # This is not a problem now, as all excluded subjects are leafs, but it could be problematic if non-leafs had their children excluded. # It could also be problematic if they didn't, if any of those children are used by existing preprints. # TODO: Determine correct resolution for child_text in bepress_subj.children.exclude(text__in=exclude_texts).values_list('text', flat=True): create_subjects_recursive(custom_provider, child_text, exclude_texts, parent=custom_subj) def create_from_subjects_acceptable(custom_provider, add_missing=False, missing=None): tries = 0 subjects_to_copy = list(rules_to_subjects(custom_provider.subjects_acceptable)) if missing and add_missing: subjects_to_copy = subjects_to_copy + missing while len(subjects_to_copy): previous_len = len(subjects_to_copy) tries += 1 if tries == 10: raise RuntimeError('Unable to map subjects acceptable with 10 iterations -- subjects remaining: {}'.format(subjects_to_copy)) for subj in list(subjects_to_copy): if map_custom_subject(custom_provider, subj.text, subj.parent.text if subj.parent else None, subj.text): subjects_to_copy.remove(subj) elif add_missing and subj.parent and subj.parent not in subjects_to_copy: # Dirty subjects_to_copy.append(subj.parent) previous_len += 1 else: logger.warn('Failed. Retrying next iteration') new_len = len(subjects_to_copy) if new_len == previous_len: raise RuntimeError('Unable to map any custom subjects on iteration -- subjects remaining: {}'.format(subjects_to_copy)) def do_create_subjects(custom_provider, includes, excludes, copy=False, add_missing=False, missing=None): if copy: create_from_subjects_acceptable(custom_provider, add_missing=add_missing, missing=missing) else: for root_text in includes: create_subjects_recursive(custom_provider, root_text, excludes) def map_custom_subject(custom_provider, name, parent, mapping): logger.info('Attempting to create subject {} on {} from {} with {}'.format(name, custom_provider._id, mapping, 'parent {}'.format(parent) if parent else 'no parent')) if parent: parent_subject = Subject.objects.filter(provider=custom_provider, text=parent).first() else: parent_subject = None bepress_subject = Subject.objects.get(provider=BEPRESS_PROVIDER, text=mapping) if parent and not parent_subject: return False custom_subject = Subject(provider=custom_provider, text=name, parent=parent_subject, bepress_subject=bepress_subject) custom_subject.save() return True def do_custom_mapping(custom_provider, customs): tries = 0 unmapped_customs = customs while len(unmapped_customs): previous_len = len(unmapped_customs) tries += 1 if tries == 10: raise RuntimeError('Unable to map custom subjects with 10 iterations -- invalid input') successes = [] for cust_name, map_dict in unmapped_customs.items(): if map_custom_subject(custom_provider, cust_name, map_dict.get('parent'), map_dict.get('bepress')): successes.append(cust_name) else: logger.warn('Failed. Retrying next iteration') [unmapped_customs.pop(key) for key in successes] new_len = len(unmapped_customs) if new_len == previous_len: raise RuntimeError('Unable to map any custom subjects on iteration -- invalid input') def map_preprints_to_custom_subjects(custom_provider, merge_dict, dry_run=False): for preprint in Preprint.objects.filter(provider=custom_provider): logger.info('Preparing to migrate preprint {}'.format(preprint.id)) old_hier = preprint.subject_hierarchy subjects_to_map = [hier[-1] for hier in old_hier] merged_subject_ids = set(Subject.objects.filter(provider=custom_provider, text__in=[merge_dict[k] for k in set(merge_dict.keys()) & set([s.text for s in subjects_to_map])]).values_list('id', flat=True)) subject_ids_to_map = set(s.id for s in subjects_to_map if s.text not in merge_dict.keys()) aliased_subject_ids = set(Subject.objects.filter(bepress_subject__id__in=subject_ids_to_map, provider=custom_provider).values_list('id', flat=True)) | merged_subject_ids aliased_hiers = [s.object_hierarchy for s in Subject.objects.filter(id__in=aliased_subject_ids)] old_subjects = list(preprint.subjects.values_list('id', flat=True)) preprint.subjects.clear() for hier in aliased_hiers: validate_subject_hierarchy([s._id for s in hier]) for s in hier: preprint.subjects.add(s) # Update preprint in SHARE if not dry_run: on_preprint_updated(preprint._id, old_subjects=old_subjects) preprint.reload() new_hier = [s.object_hierarchy for s in preprint.subjects.exclude(children__in=preprint.subjects.all())] logger.info('Successfully migrated preprint {}.\n\tOld hierarchy:{}\n\tNew hierarchy:{}'.format(preprint.id, old_hier, new_hier)) def migrate(provider=None, provider_type='osf.preprintprovider', share_title=None, data=None, dry_run=False, copy=False, add_missing=False): # This function may be run outside of this command (e.g. in the admin app) so we # need to make sure that BEPRESS_PROVIDER is set global BEPRESS_PROVIDER if not BEPRESS_PROVIDER: BEPRESS_PROVIDER = AbstractProvider.objects.filter(_id='osf', type='osf.preprintprovider').first() custom_provider = AbstractProvider.objects.filter(_id=provider, type=provider_type).first() assert custom_provider, 'Unable to find specified provider: {}'.format(provider) assert custom_provider.id != BEPRESS_PROVIDER.id, 'Cannot add custom mapping to BePress provider' assert not custom_provider.subjects.exists(), 'Provider aldready has a custom taxonomy' if isinstance(custom_provider, PreprintProvider) and custom_provider.share_title in [None, '', 'bepress']: if not share_title: raise RuntimeError('`--share-title` is required if not already set on the provider') custom_provider.share_title = share_title custom_provider.save() missing = validate_input(custom_provider, data, provider_type=provider_type, copy=copy, add_missing=add_missing) do_create_subjects(custom_provider, data['include'], data.get('exclude', []), copy=copy, add_missing=add_missing, missing=missing) do_custom_mapping(custom_provider, data.get('custom', {})) map_preprints_to_custom_subjects(custom_provider, data.get('merge', {}), dry_run=dry_run) class Command(BaseCommand): def add_arguments(self, parser): super(Command, self).add_arguments(parser) parser.add_argument( '--dry', action='store_true', dest='dry_run', help='Run migration and roll back changes to db', ) parser.add_argument( '--data', action='store', dest='data', help='List of targets, of form {\n"include": [<list of subject texts to include at top level, children implicit>],' '\n"exclude": [<list of children to exclude from included trees>],' '\n"custom": [{"<Custom Name": {"parent": <Parent text>", "bepress": "<Bepress Name>"}}, ...]' '\n"merge": {"<Merged from (bepress)>": "<Merged into (custom)", ...}}', ) parser.add_argument( '--provider', action='store', dest='provider', required=True, help='_id of the <provider> object, e.g. "osf". <provider> is expected to not already have a custom taxonomy.' ) parser.add_argument( '--from-subjects-acceptable', action='store_true', dest='from_subjects_acceptable', help='Specifies that the provider\'s `subjects_acceptable` be copied. `data.include` and `exclude` are ignored, the other keys may still be used' ) parser.add_argument( '--add-missing', action='store_true', dest='add_missing', help='Adds "used-but-not-included" subjects.' ) parser.add_argument( '--share-title', action='store', type=str, dest='share_title', help='Sets <provider>.share_title. Ignored if already set on provider, required if not.' ) parser.add_argument( '--type', action='store', type=str, dest='provider_type', help='Specifies provider type [`osf.preprintprovider`, `osf.registrationprovider`, `osf.collectionprovider`]' ) def handle(self, *args, **options): global BEPRESS_PROVIDER provider_type = options.get('provider_type') or 'osf.preprintprovider' BEPRESS_PROVIDER = AbstractProvider.objects.filter(_id='osf', type='osf.preprintprovider').first() dry_run = options.get('dry_run') provider = options['provider'] data = json.loads(options['data'] or '{}') share_title = options.get('share_title') copy = options.get('from_subjects_acceptable') add_missing = options.get('add_missing') if copy: data['include'] = list(Subject.objects.filter(provider=BEPRESS_PROVIDER, parent__isnull=True).values_list('text', flat=True)) if not dry_run: script_utils.add_file_logger(logger, __file__) with transaction.atomic(): migrate(provider=provider, share_title=share_title, provider_type=provider_type, data=data, dry_run=dry_run, copy=copy, add_missing=add_missing) if dry_run: raise RuntimeError('Dry run, transaction rolled back.')
52.185053
228
0.694558
3,182
0.216994
0
0
0
0
0
0
3,819
0.260434
f7b36ab04da3147e45f62315611a09ce95152628
2,999
py
Python
examples/animated_rsh.py
sophiaas/e3nn
92351b9225df7aeaf70fdc124c7b0e566d4c0eda
[ "MIT" ]
1
2021-01-11T18:34:39.000Z
2021-01-11T18:34:39.000Z
examples/animated_rsh.py
sophiaas/e3nn
92351b9225df7aeaf70fdc124c7b0e566d4c0eda
[ "MIT" ]
null
null
null
examples/animated_rsh.py
sophiaas/e3nn
92351b9225df7aeaf70fdc124c7b0e566d4c0eda
[ "MIT" ]
null
null
null
# pylint: disable=not-callable, no-member, invalid-name, missing-docstring, line-too-long import math import os import subprocess import argparse import shutil import tqdm import plotly.graph_objs as go import torch from e3nn import o3, rsh def rsh_surface(l, m, scale, tr, rot): n = 50 a = torch.linspace(0, 2 * math.pi, 2 * n) b = torch.linspace(0, math.pi, n) a, b = torch.meshgrid(a, b) f = rsh.spherical_harmonics_alpha_beta([l], a, b) f = torch.einsum('ij,...j->...i', o3.irr_repr(l, *rot), f) f = f[..., l + m] r = o3.angles_to_xyz(a, b) x, y, z = r[:, :, 0], r[:, :, 1], r[:, :, 2] r = f.abs() x = scale * r * x + tr[0] y = scale * r * y + tr[1] z = scale * r * z + tr[2] max_value = 0.5 return go.Surface( x=x.numpy(), y=y.numpy(), z=z.numpy(), surfacecolor=f.numpy(), showscale=False, cmin=-max_value, cmax=max_value, colorscale=[[0, 'rgb(0,50,255)'], [0.5, 'rgb(200,200,200)'], [1, 'rgb(255,50,0)']], ) def main(lmax, resolution, steps): scale = 0.5 * math.sqrt(4 * math.pi) / math.sqrt(2 * lmax + 1) axis = dict( showbackground=False, showticklabels=False, showgrid=False, zeroline=False, title='', nticks=3, range=[-lmax / 2 - 0.5, lmax / 2 + 0.5] ) layout = dict( width=resolution, height=resolution, scene=dict( xaxis=axis, yaxis=axis, zaxis=axis, aspectmode='manual', aspectratio=dict(x=1, y=1, z=1), camera=dict( up=dict(x=0, y=0, z=1), center=dict(x=0, y=0, z=0), eye=dict(x=0, y=-1.3, z=0), projection=dict(type='perspective'), ), ), paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)', margin=dict(l=0, r=0, t=0, b=0) ) if os.path.exists('sh'): shutil.rmtree('sh') os.makedirs('sh') for i in tqdm.tqdm(range(steps)): rot = 2 * math.pi * i / steps a, b, c = 0, math.pi / 4, 0 abc = o3.compose(-c, -b, -a, *o3.compose(0, 0, rot, a, b, c)) surfaces = [ rsh_surface(l, m, scale, [l + (m if m < 0 else 0) - lmax / 2, 0, lmax / 2 - l + (m if m > 0 else 0)], abc) for l in range(lmax + 1) for m in range(-l, l + 1) ] fig = go.Figure(surfaces, layout=layout) fig.write_image('sh/{:03d}.png'.format(i)) subprocess.check_output(["convert", "-delay", "3", "-loop", "0", "-dispose", "2", "sh/*.png", "output.gif"]) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("--lmax", type=int, default=2) parser.add_argument("--resolution", type=int, default=500) parser.add_argument("--steps", type=int, default=30) args = parser.parse_args() main(args.lmax, args.resolution, args.steps)
26.776786
118
0.518506
0
0
0
0
0
0
0
0
338
0.112704
f7b44e0603289410fe1b212dcf1e2a0ad54c9500
62
py
Python
errores.py
fbzavaleta/DS_Software_Stack
37cb42c129a6ff4e04704b90cd5b13db3ad9dfcf
[ "MIT" ]
null
null
null
errores.py
fbzavaleta/DS_Software_Stack
37cb42c129a6ff4e04704b90cd5b13db3ad9dfcf
[ "MIT" ]
null
null
null
errores.py
fbzavaleta/DS_Software_Stack
37cb42c129a6ff4e04704b90cd5b13db3ad9dfcf
[ "MIT" ]
null
null
null
# E_LEN = "No es posible operar vectores de diferente módulo"
20.666667
59
0.758065
0
0
0
0
0
0
0
0
53
0.84127
f7b491ed05c90e96397d418234149764a3bc7143
8,449
py
Python
dataset.py
gzaraunitn/TA3N
d83ae5d9c8f4452ff69dd9002bb4016a695a4be8
[ "MIT" ]
null
null
null
dataset.py
gzaraunitn/TA3N
d83ae5d9c8f4452ff69dd9002bb4016a695a4be8
[ "MIT" ]
null
null
null
dataset.py
gzaraunitn/TA3N
d83ae5d9c8f4452ff69dd9002bb4016a695a4be8
[ "MIT" ]
null
null
null
import torch.utils.data as data import os import os.path import numpy as np from numpy.random import randint import torch from colorama import init from colorama import Fore, Back, Style import random from os import listdir from os.path import join, splitext import numpy as np import torch import torch.nn.functional as F import torchvision.transforms.functional as TF from PIL import Image, ImageFilter, ImageFile from torch.utils.data import DataLoader, Dataset from torchvision import transforms init(autoreset=True) class VideoRecord(object): def __init__(self, row): self._data = row @property def path(self): return self._data[0] @property def num_frames(self): return int(self._data[1]) @property def label(self): return int(self._data[2]) class TSNDataSet(data.Dataset): def __init__(self, root_path, list_file, num_dataload, num_segments=3, new_length=1, modality='RGB', image_tmpl='img_{:05d}.t7', transform=None, force_grayscale=False, random_shift=True, test_mode=False): self.root_path = root_path self.list_file = list_file self.num_segments = num_segments self.new_length = new_length self.modality = modality self.image_tmpl = image_tmpl self.transform = transform self.random_shift = random_shift self.test_mode = test_mode self.num_dataload = num_dataload if self.modality == 'RGBDiff' or self.modality == 'RGBDiff2' or self.modality == 'RGBDiffplus': self.new_length += 1 # Diff needs one more image to calculate diff self._parse_list() # read all the video files def _load_feature(self, directory, idx): if self.modality == 'RGB' or self.modality == 'RGBDiff' or self.modality == 'RGBDiff2' or self.modality == 'RGBDiffplus': feat_path = os.path.join(directory, self.image_tmpl.format(idx)) try: feat = [torch.load(feat_path)] except: print(Back.RED + feat_path) return feat elif self.modality == 'Flow': x_feat = torch.load(os.path.join(directory, self.image_tmpl.format('x', idx))) y_feat = torch.load(os.path.join(directory, self.image_tmpl.format('y', idx))) return [x_feat, y_feat] def _parse_list(self): self.video_list = [VideoRecord(x.strip().split(' ')) for x in open(self.list_file)] # repeat the list if the length is less than num_dataload (especially for target data) n_repeat = self.num_dataload//len(self.video_list) n_left = self.num_dataload%len(self.video_list) self.video_list = self.video_list*n_repeat + self.video_list[:n_left] def _sample_indices(self, record): """ :param record: VideoRecord :return: list """ #np.random.seed(1) average_duration = (record.num_frames - self.new_length + 1) // self.num_segments if average_duration > 0: offsets = np.multiply(list(range(self.num_segments)), average_duration) + randint(average_duration, size=self.num_segments) elif record.num_frames > self.num_segments: offsets = np.sort(randint(record.num_frames - self.new_length + 1, size=self.num_segments)) else: offsets = np.zeros((self.num_segments,)) return offsets + 1 def _get_val_indices(self, record): num_min = self.num_segments + self.new_length - 1 num_select = record.num_frames - self.new_length + 1 if record.num_frames >= num_min: tick = float(num_select) / float(self.num_segments) offsets = np.array([int(tick / 2.0 + tick * float(x)) for x in range(self.num_segments)]) else: offsets = np.zeros((self.num_segments,)) return offsets + 1 def _get_test_indices(self, record): num_min = self.num_segments + self.new_length - 1 num_select = record.num_frames - self.new_length + 1 if record.num_frames >= num_min: tick = float(num_select) / float(self.num_segments) offsets = np.array([int(tick / 2.0 + tick * float(x)) for x in range(self.num_segments)]) # pick the central frame in each segment else: # the video clip is too short --> duplicate the last frame id_select = np.array([x for x in range(num_select)]) # expand to the length of self.num_segments with the last element id_expand = np.ones(self.num_segments-num_select,dtype=int)*id_select[id_select[0]-1] offsets = np.append(id_select, id_expand) return offsets + 1 def __getitem__(self, index): record = self.video_list[index] if not self.test_mode: segment_indices = self._sample_indices(record) if self.random_shift else self._get_val_indices(record) else: segment_indices = self._get_test_indices(record) return self.get(record, segment_indices) def get(self, record, indices): frames = list() for seg_ind in indices: p = int(seg_ind) for i in range(self.new_length): seg_feats = self._load_feature(record.path, p) frames.extend(seg_feats) if p < record.num_frames: p += 1 # process_data = self.transform(frames) process_data = torch.stack(frames) return process_data, record.label def __len__(self): return len(self.video_list) class VideoDataset(data.Dataset): def __init__( self, folder, n_frames, frame_size=224, separator="_" ): self.folder = folder self.num_segments = n_frames self.frame_size = frame_size self.data_transform = transforms.Compose( [ transforms.Resize(self.frame_size), transforms.CenterCrop(self.frame_size), transforms.ToTensor(), transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ), ] ) self.separator = separator self.classes = [c for c in sorted(listdir(folder))] self.videos_with_classes = [] for c_index, c in enumerate(self.classes): c_path = join(self.folder, c) videos = listdir(c_path) for v in videos: v_path = join(c_path, v) num_frames = len(listdir(v_path)) if num_frames >= self.num_segments: pair = (v_path, c_index) self.videos_with_classes.append(pair) def _get_test_indices(self, num_frames): num_min = self.num_segments num_select = num_frames if num_frames >= num_min: tick = float(num_select) / float(self.num_segments) offsets = np.array( [int(tick / 2.0 + tick * float(x)) for x in range(self.num_segments)] ) # pick the central frame in each segment else: # the video clip is too short --> duplicate the last frame id_select = np.array([x for x in range(num_select)]) # expand to the length of self.num_segments with the last element id_expand = ( np.ones(self.num_segments - num_select, dtype=int) * id_select[id_select[0] - 1] ) offsets = np.append(id_select, id_expand) return offsets def __getitem__(self, index): video, label = self.videos_with_classes[index] frames_temp = sorted( listdir(video), key=lambda path: int(path.split(self.separator)[-1].split(".")[0]), ) frames = [f for f in frames_temp if f.endswith('jpg') or f.endswith('jpeg')] num_frames = len(frames) data = [] segment_indices = self._get_test_indices(num_frames) for index in segment_indices: frame = frames[index] frame_path = join(video, frame) frame_img = Image.open(frame_path) frame_feat = self.data_transform(frame_img) data.append(frame_feat) tensor = torch.stack(data) return tensor, label def __len__(self): return len(self.videos_with_classes)
34.345528
142
0.610486
7,916
0.936916
0
0
191
0.022606
0
0
734
0.086874
f7b50b715b179630c9fcdafb1ce4cd54b3be0ee5
423
py
Python
edm_web1/middleware/errors.py
zhouli121018/nodejsgm
0ccbc8acf61badc812f684dd39253d55c99f08eb
[ "MIT" ]
null
null
null
edm_web1/middleware/errors.py
zhouli121018/nodejsgm
0ccbc8acf61badc812f684dd39253d55c99f08eb
[ "MIT" ]
18
2020-06-05T18:17:40.000Z
2022-03-11T23:25:21.000Z
edm_web1/middleware/errors.py
zhouli121018/nodejsgm
0ccbc8acf61badc812f684dd39253d55c99f08eb
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from django.http import HttpResponseForbidden from django.template import loader from django.utils.translation import ugettext_lazy as _ # 普通用户 def _requred_forbid(msg): t = loader.get_template('limit_ip.html') content = t.render({'message': msg }) return HttpResponseForbidden(content) _msg = _(u'请求太频繁,请等待30s后重试(Request too often)。') limitip_requred_forbid = _requred_forbid(_msg)
26.4375
55
0.754137
0
0
0
0
0
0
0
0
125
0.273523
f7b55905ea97e096b70cfda1b4ce991e067b06eb
151
py
Python
data/windows/dr16/mask.py
dnidever/apogee
83ad7496a0b4193df9e2c01b06dc36cb879ea6c1
[ "BSD-3-Clause" ]
5
2019-04-11T13:35:24.000Z
2019-11-14T06:12:51.000Z
data/windows/dr16/mask.py
dnidever/apogee
83ad7496a0b4193df9e2c01b06dc36cb879ea6c1
[ "BSD-3-Clause" ]
null
null
null
data/windows/dr16/mask.py
dnidever/apogee
83ad7496a0b4193df9e2c01b06dc36cb879ea6c1
[ "BSD-3-Clause" ]
5
2018-09-20T22:07:43.000Z
2021-01-15T07:13:38.000Z
from apogee.aspcap import aspcap from apogee.aspcap import mask els=aspcap.elems() for el in els[0]: mask.mkmask(el,globalmask='mask_v02_aspcap.txt')
25.166667
66
0.788079
0
0
0
0
0
0
0
0
21
0.139073
f7b6cec7ff18c898066933b6660bdaa93907b21d
7,368
py
Python
dragonfly/opt/unittest_cp_random_multiobjective_optimiser.py
anonymous-submission000/mobo
090f774d742c7155c5e5ba01c10e7db7b93b6a0a
[ "MIT" ]
1
2022-02-17T08:50:47.000Z
2022-02-17T08:50:47.000Z
dragonfly/opt/unittest_cp_random_multiobjective_optimiser.py
anonymous-submission000/mobo
090f774d742c7155c5e5ba01c10e7db7b93b6a0a
[ "MIT" ]
null
null
null
dragonfly/opt/unittest_cp_random_multiobjective_optimiser.py
anonymous-submission000/mobo
090f774d742c7155c5e5ba01c10e7db7b93b6a0a
[ "MIT" ]
null
null
null
""" Unit tests for Random CP optimiser on Cartesian product domains. -- kandasamy@cs.cmu.edu """ # pylint: disable=invalid-name # pylint: disable=abstract-class-little-used import os from . import random_multiobjective_optimiser from ..exd.cp_domain_utils import get_raw_point_from_processed_point, \ load_config_file from ..exd.experiment_caller import get_multifunction_caller_from_config from ..exd.worker_manager import SyntheticWorkerManager # Local imports from ..test_data.multiobjective_hartmann.multiobjective_hartmann \ import objectives as moo_hartmann from ..test_data.multiobjective_park.multiobjective_park \ import objectives as moo_park from ..utils.base_test_class import BaseTestClass, execute_tests from ..utils.reporters import get_reporter class CPMultiObjectiveOptimiserBaseTestCase(object): """ Base test class for optimisers on Cartesian product spaces. """ # pylint: disable=no-member def setUp(self): """ Set up. """ self.max_capital = 20 self._child_set_up() self.worker_manager_1 = SyntheticWorkerManager(1, time_distro='const') self.worker_manager_3 = SyntheticWorkerManager(3, time_distro='halfnormal') file_dir = os.path.dirname(os.path.realpath(__file__)) test_data_pardir = os.path.dirname(file_dir) self.opt_problems = [ (test_data_pardir + '/test_data/multiobjective_hartmann/config.json', (moo_hartmann,)), (test_data_pardir + '/test_data/multiobjective_park/config.json', (moo_park,)), ] def _child_set_up(self): """ Child set up. """ pass @classmethod def _child_instantiate_optimiser(cls, func_caller, worker_manager, options, reporter): """ Instantiate the optimiser. """ raise NotImplementedError('Implement in a child class.') @classmethod def _run_optimiser(cls, raw_funcs, domain_config_file, worker_manager, max_capital, mode, *args, **kwargs): """ Run the optimiser from given args. """ raise NotImplementedError('Implement in a child class.') def test_instantiation(self): """ Tests instantiation of the optimiser. """ self.report('Test instantiation of multi-objective optimiser.') for idx, (dcf, (raw_prob_funcs,)) in enumerate(self.opt_problems): self.report('[%d/%d] Testing instantiation of optimiser for %s.' % ( idx + 1, len(self.opt_problems), dcf), 'test_result') config = load_config_file(dcf) multi_func_caller = get_multifunction_caller_from_config(raw_prob_funcs, config) optimiser = self._child_instantiate_optimiser( multi_func_caller, self.worker_manager_3, options=None, reporter=get_reporter('silent')) self.report('Instantiated %s object.' % (type(optimiser))) for attr in dir(optimiser): if not attr.startswith('_'): self.report('optimiser.%s = %s' % (attr, str(getattr(optimiser, attr))), 'test_result') def _test_optimiser_results(self, raw_prob_funcs, pareto_vals, pareto_points, history, dcf): """ Tests optimiser results. """ config = load_config_file(dcf) multi_func_caller = get_multifunction_caller_from_config(raw_prob_funcs, config) raw_pareto_points = [get_raw_point_from_processed_point(pop, config.domain, config.domain_orderings.index_ordering, config.domain_orderings.dim_ordering) for pop in pareto_points] self.report('Pareto opt point [-1]: proc=%s, raw=%s.' % (pareto_points[-1], raw_pareto_points[-1])) saved_in_history = [key for key, _ in list(history.__dict__.items()) if not key.startswith('__')] self.report('Stored in history: %s.' % (saved_in_history), 'test_result') assert len(history.curr_pareto_vals) == len(history.curr_pareto_points) for val in pareto_vals: assert len(val) == multi_func_caller.num_funcs for pt in pareto_points: assert len(pt) == config.domain.num_domains self.report('Pareto optimal points: %s.' % (pareto_points)) self.report('Pareto optimal values: %s.' % (pareto_vals)) def test_optimisation_single(self): """ Test optimisation with a single worker. """ self.report('') self.report('Testing %s with one worker.' % (type(self))) for idx, (dcf, (raw_prob_funcs,)) in enumerate(self.opt_problems): self.report('[%d/%d] Testing optimisation with 1 worker on %s.' % ( idx + 1, len(self.opt_problems), dcf), 'test_result') self.worker_manager_1.reset() pareto_vals, pareto_points, history = self._run_optimiser(raw_prob_funcs, dcf, self.worker_manager_1, self.max_capital, 'asy') self._test_optimiser_results(raw_prob_funcs, pareto_vals, pareto_points, history, dcf) self.report('') def test_optimisation_asynchronous(self): """ Testing random optimiser with three asynchronous workers. """ self.report('') self.report('Testing %s with three asynchronous workers.' % (type(self))) for idx, (dcf, (raw_prob_funcs,)) in enumerate(self.opt_problems): self.report('[%d/%d] Testing optimisation with 3 asynchronous workers on %s.' % ( idx + 1, len(self.opt_problems), dcf), 'test_result') self.worker_manager_3.reset() pareto_vals, pareto_points, history = self._run_optimiser(raw_prob_funcs, dcf, self.worker_manager_3, self.max_capital, 'asy') self._test_optimiser_results(raw_prob_funcs, pareto_vals, pareto_points, history, dcf) self.report('') class CPRandomMultiObjectiveOptimiserTestCase( CPMultiObjectiveOptimiserBaseTestCase, BaseTestClass): """ Unit tests for random multi-objective optimisation. """ @classmethod def _child_instantiate_optimiser(cls, multi_func_caller, worker_manager, options, reporter): """ Instantiate optimiser. """ return random_multiobjective_optimiser.CPRandomMultiObjectiveOptimiser( multi_func_caller, worker_manager, options, reporter) @classmethod def _run_optimiser(cls, raw_prob_funcs, domain_config_file, worker_manager, max_capital, mode, *args, **kwargs): """ Runs multi-objective optimiser. """ rmoo = random_multiobjective_optimiser return rmoo.cp_random_multiobjective_optimisation_from_raw_args(raw_prob_funcs, domain_config_file, worker_manager, max_capital, mode, *args, **kwargs) if __name__ == '__main__': execute_tests()
48.794702
120
0.621607
6,532
0.886536
0
0
1,356
0.184039
0
0
1,448
0.196526
f7b7563d85b1f23ad406817127e2c0f401a6930a
2,817
py
Python
corehq/apps/app_manager/app_schemas/casedb_schema.py
dimagilg/commcare-hq
ea1786238eae556bb7f1cbd8d2460171af1b619c
[ "BSD-3-Clause" ]
1
2020-07-14T13:00:23.000Z
2020-07-14T13:00:23.000Z
corehq/apps/app_manager/app_schemas/casedb_schema.py
dimagilg/commcare-hq
ea1786238eae556bb7f1cbd8d2460171af1b619c
[ "BSD-3-Clause" ]
94
2020-12-11T06:57:31.000Z
2022-03-15T10:24:06.000Z
corehq/apps/app_manager/app_schemas/casedb_schema.py
dimagilg/commcare-hq
ea1786238eae556bb7f1cbd8d2460171af1b619c
[ "BSD-3-Clause" ]
null
null
null
from corehq import toggles from corehq.apps.app_manager.app_schemas.case_properties import ( ParentCasePropertyBuilder, get_usercase_properties, ) from corehq.apps.app_manager.const import USERCASE_TYPE from corehq.apps.app_manager.util import is_usercase_in_use from corehq.apps.data_dictionary.util import get_case_property_description_dict def get_casedb_schema(form): """Get case database schema definition for vellum to display as an external data source. This lists all case types and their properties for the given app. """ app = form.get_app() base_case_type = form.get_module().case_type if form.requires_case() else None builder = ParentCasePropertyBuilder.for_app(app, ['case_name'], include_parent_properties=False) related = builder.get_parent_type_map(None) map = builder.get_properties_by_case_type() descriptions_dict = get_case_property_description_dict(app.domain) if base_case_type: # Generate hierarchy of case types, represented as a list of lists of strings: # [[base_case_type], [parent_type1, parent_type2...], [grandparent_type1, grandparent_type2...]] # Vellum case management only supports three levels generation_names = ['case', 'parent', 'grandparent'] generations = [[] for g in generation_names] def _add_ancestors(ctype, generation): if generation < len(generation_names): generations[generation].append(ctype) for parent in related.get(ctype, {}).get('parent', []): _add_ancestors(parent, generation + 1) _add_ancestors(base_case_type, 0) # Remove any duplicate types or empty generations generations = [set(g) for g in generations if len(g)] else: generations = [] subsets = [{ "id": generation_names[i], "name": "{} ({})".format(generation_names[i], " or ".join(ctypes)) if i > 0 else base_case_type, "structure": { p: {"description": descriptions_dict.get(t, {}).get(p, '')} for t in ctypes for p in map[t]}, "related": {"parent": { "hashtag": "#case/" + generation_names[i + 1], "subset": generation_names[i + 1], "key": "@case_id", }} if i < len(generations) - 1 else None, } for i, ctypes in enumerate(generations)] if is_usercase_in_use(app.domain): subsets.append({ "id": USERCASE_TYPE, "name": "user", "key": "@case_type", "structure": {p: {} for p in get_usercase_properties(app)[USERCASE_TYPE]}, }) return { "id": "casedb", "uri": "jr://instance/casedb", "name": "case", "path": "/casedb/case", "structure": {}, "subsets": subsets, }
39.125
104
0.635428
0
0
0
0
0
0
0
0
730
0.259141
f7b75acf0297c3ab2601bc579ad2b3528994326d
28
py
Python
python/testData/keywordCompletion/noMatchInCondition.py
06needhamt/intellij-community
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
[ "Apache-2.0" ]
null
null
null
python/testData/keywordCompletion/noMatchInCondition.py
06needhamt/intellij-community
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
[ "Apache-2.0" ]
null
null
null
python/testData/keywordCompletion/noMatchInCondition.py
06needhamt/intellij-community
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
[ "Apache-2.0" ]
null
null
null
matches = True if mat<caret>
14
14
0.75
0
0
0
0
0
0
0
0
0
0
f7b8c19ee74b54f26fc920af5e0e656df23e85a5
3,597
py
Python
bookshelf/bookshelf/model_aerospike.py
fakeskimo/as2bt
0872192e703a2992dea7bee2bf2544727d6094ee
[ "Apache-2.0" ]
null
null
null
bookshelf/bookshelf/model_aerospike.py
fakeskimo/as2bt
0872192e703a2992dea7bee2bf2544727d6094ee
[ "Apache-2.0" ]
null
null
null
bookshelf/bookshelf/model_aerospike.py
fakeskimo/as2bt
0872192e703a2992dea7bee2bf2544727d6094ee
[ "Apache-2.0" ]
null
null
null
import math import aerospike from aerospike import predicates as p from aerospike import exception as ex from flask import current_app aerospike_host = current_app.config['AEROSPIKE_HOST'] aerospike_port = current_app.config['AEROSPIKE_PORT'] namespace = current_app.config['AEROSPIKE_NAMESPACE'] set_name = current_app.config['AEROSPIKE_SET_NAME'] n_replicas = 1 config = { 'hosts': [ (aerospike_host, aerospike_port) ], 'policies': { 'timeout': 1000 # milliseconds } } client = aerospike.client(config).connect() # cannot limit the number of rows, only percent # there is no start offset option # https://discuss.aerospike.com/t/can-you-limit-the-number-of-returned-records/1330/2 # https://discuss.aerospike.com/t/official-as-approach-to-pagination/2532 # https://stackoverflow.com/questions/25927736/limit-number-of-records-in-aerospike-select-query def init_app(app): pass # if there is no more record, return -1 as next def list(limit=10, cursor=None): if cursor: start = int(cursor) else: start = 0 end = start + limit records = [] for i in range(start, end): rec = read(str(i)) if rec: records.append(rec) if end >= __get_objs_cnt__(): next_key = -1 else: next_key = len(records) return records, next_key # cannot limit the number of rows, only percent # there is no start offset option # https://discuss.aerospike.com/t/can-you-limit-the-number-of-returned-records/1330/2 # https://discuss.aerospike.com/t/official-as-approach-to-pagination/2532 # https://stackoverflow.com/questions/25927736/limit-number-of-records-in-aerospike-select-query # if there is no more record, return -1 as next def list_by_user(user_id, limit=10, cursor=None): if cursor: start = cursor else: start = 0 query = client.query(namespace, set_name) query.where(p.equals('createdById', user_id)) records = [] results = query.results() if cursor: start = cursor else: start = 0 cnt = 0 records = [] for i, result in enumerate(results): if cnt >= limit: break if i < start: continue else: rec = result[2] records.append(rec) cnt += 1 if cnt == limit: next_key = cnt else: next_key = -1 return records, next_key def __get_objs_cnt__(): info = client.info("sets" + "/" + namespace + "/" + set_name) for value in info.values(): info_str = value[1] try: start_idx = info_str.index("=") + 1 end_idx = info_str.index(":") n_str = info_str[start_idx:end_idx] return math.ceil(int(n_str) / n_replicas) except ValueError: return 0 def create(data, id=None): if id: key = str(id) else: key = str(__get_objs_cnt__()) data['id'] = key client.put((namespace, set_name, key), data) return read(key) def read(id): try: (key, metadata) = client.exists((namespace, set_name, id)) (key, metadata, record) = client.get((namespace, set_name, id)) return record except ex.RecordNotFound: print("Record not found:", id) return None except ex.AerospikeError as e: print("Error: {0} [{1}]".format(e.msg, e.code)) return None def update(data, id): if client.exists((namespace, set_name, id)): delete(id) return create(data, id) def delete(id): client.remove((namespace, set_name, id))
22.341615
96
0.626633
0
0
0
0
0
0
0
0
947
0.263275
f7b8e6d755230cb8c58e980bba16ad5edecee7d7
1,437
py
Python
examples/EC2.py
nimRobotics/fnirslib
0273c0da5f4a41d7cf4dac0fc9686c38f2c7b0cd
[ "MIT" ]
null
null
null
examples/EC2.py
nimRobotics/fnirslib
0273c0da5f4a41d7cf4dac0fc9686c38f2c7b0cd
[ "MIT" ]
null
null
null
examples/EC2.py
nimRobotics/fnirslib
0273c0da5f4a41d7cf4dac0fc9686c38f2c7b0cd
[ "MIT" ]
null
null
null
""" author: @nimrobotics description: calculates the effective connectivity between regions and plots them """ import numpy as np import scipy.io import glob import sys sys.path.append('../utils') from plots import plotData dir = "./process3/" #directory of the data outdir = 'process3/' #directory to save the plots regions = 3 #number of regions files = glob.glob(dir+'/*_.mat') # get all the files in the directory for file in files: print('Processing condition: ', file) data = scipy.io.loadmat(file) #load data from the directory fval = data['fval'] #fval pval = data['pval'] #pval sig = data['sig'] #sig cd = data['cd'] #cd print('fval shape: ',fval.shape) print('\nfval \n',fval) print('pval shape: ',pval.shape) print('sig shape: ',sig.shape) print('\nsig \n',sig) print(cd.shape) # elementwise multiplication of fval and sig(0/1) fval_sig = np.multiply(fval, sig) print(fval_sig.shape) print('\nfval_sig \n',fval_sig) # fval_sig = np.mean(fval_sig, axis=2) # average over files # print(fval_sig.shape) # fval = np.mean(fval, axis=2) labels = ['PFC', 'PM-MC', 'VC'] #labels for the regions condition = file.split('/')[-1].split('.')[0] #get the condition name plot = plotData(fval_sig, labels, outdir, colormap='viridis', dpi=300, title='EC: '+condition, filename='EC_'+condition +'.png') plot.matrixPlot() plot.circularPlot()
31.933333
133
0.659708
0
0
0
0
0
0
0
0
680
0.473208
f7b9749cf050209379cfad2f528020cbb5090d82
263
py
Python
feed/models.py
Lisgevan/DJANGO-101-PROJECT-COPY
01655b30682efd435d91e85223af0fd6186e6a59
[ "MIT" ]
null
null
null
feed/models.py
Lisgevan/DJANGO-101-PROJECT-COPY
01655b30682efd435d91e85223af0fd6186e6a59
[ "MIT" ]
null
null
null
feed/models.py
Lisgevan/DJANGO-101-PROJECT-COPY
01655b30682efd435d91e85223af0fd6186e6a59
[ "MIT" ]
null
null
null
from django.db import models from sorl.thumbnail import ImageField # Create your models here. class Post(models.Model): text = models.CharField(max_length=140, blank=False, null=False) image = ImageField() def __str__(self): return self.text
26.3
68
0.726236
168
0.638783
0
0
0
0
0
0
26
0.098859
f7bb92af288264a3c094d6c7636074324c8ab56d
12,847
py
Python
gcp/docker/infrastructure/rapids_lib.py
ethem-kinginthenorth/cloud-ml-examples
e434d2bdbf2adf058dc436f992a56585537dc8ab
[ "Apache-2.0" ]
1
2022-03-23T05:10:45.000Z
2022-03-23T05:10:45.000Z
gcp/docker/infrastructure/rapids_lib.py
ethem-kinginthenorth/cloud-ml-examples
e434d2bdbf2adf058dc436f992a56585537dc8ab
[ "Apache-2.0" ]
null
null
null
gcp/docker/infrastructure/rapids_lib.py
ethem-kinginthenorth/cloud-ml-examples
e434d2bdbf2adf058dc436f992a56585537dc8ab
[ "Apache-2.0" ]
null
null
null
# os import sys, os, time, logging # CPU DS stack import pandas as pd import numpy as np import sklearn # GPU DS stack [ rapids ] import gcsfs # scaling library import dask # data ingestion [ CPU ] from pyarrow import orc as pyarrow_orc # ML models from sklearn import ensemble import xgboost # data set splits from sklearn.model_selection import train_test_split as sklearn_train_test_split # device query ##hack try: import cudf, cuml from cuml.preprocessing.model_selection import train_test_split as cuml_train_test_split import pynvml import cupy except: print("Caught import failures -- probably missing GPU") # memory query import psutil # i/o import logging, json, pprint default_sagemaker_paths = { 'base': '/opt/ml', 'code': '/opt/ml/code', 'data': '/opt/ml/input', 'train_data': '/opt/ml/input/data/training', 'hyperparams': '/opt/ml/input/config/hyperparameters.json', 'model': '/opt/ml/model', 'output': '/opt/ml/output', } class RapidsCloudML(object): def __init__(self, cloud_type='AWS', model_type='XGBoost', data_type='ORC', compute_type='single-GPU', n_workers=-1, verbose_estimator=False, CSP_paths=default_sagemaker_paths): self.CSP_paths = CSP_paths self.cloud_type = cloud_type self.model_type = model_type self.data_type = data_type self.compute_type = compute_type self.verbose_estimator = verbose_estimator self.n_workers = self.parse_compute(n_workers) self.query_memory() def _read_orc(self, filename): if ('CPU' in self.compute_type): if (filename.startswith('gs://')): fs = gcsfs.GCSFileSystem() with fs.open(filename, mode='rb') as file: dataset = pyarrow_orc.ORCFile(file).read().to_pandas() else: with open(filename, mode='rb') as file: dataset = pyarrow_orc.ORCFile(file).read().to_pandas() elif ('GPU' in self.compute_type): dataset = cudf.read_orc(filename) return dataset def _read_csv(self, filename, col_labels): if ('CPU' in self.compute_type): dataset = pd.read_csv(filename, names=col_labels) elif ('GPU' in self.compute_type): dataset = cudf.read_csv(filename, names=col_labels) return dataset def load_data(self, filename='dataset.orc', col_labels=None, y_label='ArrDelayBinary'): target_filename = self.CSP_paths['train_data'] + '/' + filename self.log_to_file(f'\n> loading dataset from {target_filename}...\n') with PerfTimer() as ingestion_timer: if 'ORC' in self.data_type: dataset = self._read_orc(target_filename) elif 'CSV' in self.data_type: dataset = self._read_csv(target_filename, names=col_labels) self.log_to_file(f'ingestion completed in {ingestion_timer.duration}') self.log_to_file(f'dataset descriptors: {dataset.shape}\n {dataset.dtypes}\n {dataset.columns}\n') return dataset, col_labels, y_label, ingestion_timer.duration def split_data(self, dataset, y_label, train_size=.8, random_state=0, shuffle=True): """ split dataset into train and test subset NOTE: assumes the first column of the dataset is the classification labels ! in the case of sklearn, we manually filter this column in the split call ! in the case of cuml, the filtering happens internally """ self.log_to_file('\tsplitting train and test data') start_time = time.perf_counter() with PerfTimer() as split_timer: if 'CPU' in self.compute_type: X_train, X_test, y_train, y_test = sklearn_train_test_split(dataset.loc[:, dataset.columns != y_label], dataset[y_label], train_size=train_size, shuffle=shuffle, random_state=random_state) elif 'GPU' in self.compute_type: X_train, X_test, y_train, y_test = cuml_train_test_split(X=dataset, y=y_label, train_size=train_size, shuffle=shuffle, random_state=random_state) self.log_to_file(f'\t> split completed in {split_timer.duration}') return X_train, X_test, y_train, y_test, split_timer.duration def train_model(self, X_train, y_train, model_params): self.log_to_file(f'\ttraining {self.model_type} estimator w/ hyper-params') pprint.pprint(model_params, indent=10) print(f"model type: {self.model_type}\n compute type: {self.compute_type}\n dataset dtype: {type(X_train)}") try: if self.model_type == 'XGBoost': trained_model, training_time = self.fit_xgboost(X_train, y_train, model_params) elif self.model_type == 'RandomForest': trained_model, training_time = self.fit_random_forest(X_train, y_train, model_params) except Exception as error: self.log_to_file('!error during model training: ' + str(error)) raise self.log_to_file(f'\t> finished training in {training_time:.4f} s') return trained_model, training_time # train dlmc.xgboost model def fit_xgboost(self, X_train, y_train, model_params): with PerfTimer() as train_timer: train_DMatrix = xgboost.DMatrix(data=X_train, label=y_train) trained_model = xgboost.train(dtrain=train_DMatrix, params=model_params, num_boost_round=model_params['num_boost_round'], verbose_eval=self.verbose_estimator) return trained_model, train_timer.duration # fit_xgboost_multi_GPU () # fit_random_forest_multi_GPU () # train cuml.random-forest model def fit_random_forest(self, X_train, y_train, model_params): if 'CPU' in self.compute_type: rf_model = sklearn.ensemble.RandomForestClassifier(n_estimators=model_params['n_estimators'], max_depth=model_params['max_depth'], max_features=model_params['max_features'], n_jobs=int(self.n_workers), verbose=self.verbose_estimator) elif 'GPU' in self.compute_type: rf_model = cuml.ensemble.RandomForestClassifier(n_estimators=model_params['n_estimators'], max_depth=model_params['max_depth'], n_bins=model_params['n_bins'], max_features=model_params['max_features'], verbose=self.verbose_estimator) with PerfTimer() as train_timer: trained_model = rf_model.fit(X_train, y_train) return trained_model, train_timer.duration def evaluate_test_perf(self, trained_model, X_test, y_test): self.log_to_file(f'\tinferencing on test set') with PerfTimer() as inference_timer: try: if self.model_type == 'XGBoost': test_DMatrix = xgboost.DMatrix(data=X_test, label=y_test) test_accuracy = 1 - float(trained_model.eval(test_DMatrix).split(':')[1]) elif self.model_type == 'RandomForest': # y_test = cudf.DataFrame({'label': y_test.astype('int32') }) test_accuracy = trained_model.score(X_test, y_test.astype('int32')) except Exception as error: self.log_to_file('!error during inference: ' + str(error)) raise self.log_to_file(f'\t> finished inference in {inference_timer.duration:.4f} s') return test_accuracy, inference_timer.duration # TODO: FIL inference [ ? ] # evaluate_perf_FIL(self, trained_model, X_test, y_test ): # TODO: global_best_model.save() def save_best_model(self, global_best_model=None): pass # ------------------------------------------------------ # end of data science logic # ------------------------------------------------------ def parse_compute(self, n_workers=None): if 'CPU' in self.compute_type or 'GPU' in self.compute_type: available_devices = self.query_compute() if n_workers == -1: n_workers = available_devices assert (n_workers <= available_devices) self.log_to_file(f'compute type: {self.compute_type}, n_workers: {n_workers}') else: raise Exception('unsupported compute type') return n_workers def query_compute(self): available_devices = None if 'CPU' in self.compute_type: available_devices = os.cpu_count() self.log_to_file(f'detected {available_devices} CPUs') elif 'GPU' in self.compute_type: available_devices = cupy.cuda.runtime.getDeviceCount() self.log_to_file(f'detected {available_devices} GPUs') return available_devices # TODO: enumerate all visible GPUs [ ? ] def query_memory(self): def print_device_memory(memory, device_ID=-1): memory_free_GB = np.array(memory.free) / np.array(10e8) memory_used_GB = np.array(memory.used) / np.array(10e8) memory_total_GB = np.array(memory.total) / np.array(10e8) if device_ID != -1: self.log_to_file(f'device ID = {device_ID}') self.log_to_file(f'memory free, used, total: {memory_free_GB}, {memory_used_GB}, {memory_total_GB}') if 'CPU' in self.compute_type: print_device_memory(psutil.virtual_memory()) elif 'GPU' in self.compute_type: pynvml.nvmlInit() for iGPU in range(self.n_workers): handle = pynvml.nvmlDeviceGetHandleByIndex(iGPU) print_device_memory(pynvml.nvmlDeviceGetMemoryInfo(handle)) def set_up_logging(self): logging_path = self.CSP_paths['output'] + '/log.txt' logging.basicConfig(filename=logging_path, level=logging.INFO) def log_to_file(self, text): logging.info(text) print(text) def environment_check(self): self.check_dirs() if self.cloud_type == 'AWS': try: self.list_files('/opt/ml') self.log_to_file(os.environ['SM_NUM_GPUS']) self.log_to_file(os.environ['SM_TRAINING_ENV']) self.log_to_file(os.environ['SM_CHANNEL_TRAIN']) self.log_to_file(os.environ['SM_HPS']) except: pass else: pass def check_dirs(self): self.log_to_file('\n> checking for sagemaker paths...\n') directories_to_check = self.CSP_paths for iDir, val in directories_to_check.items(): self.log_to_file(f'{val}, exists : {os.path.exists(val)}') self.log_to_file(f'working directory = {os.getcwd()}') def list_files(self, startpath): print(f'\n> listing contents of {startpath}\n') for root, dirs, files in os.walk(startpath): level = root.replace(startpath, '').count(os.sep) indent = ' ' * 4 * (level) print('{}{}/'.format(indent, os.path.basename(root))) subindent = ' ' * 4 * (level + 1) for f in files: print('{}{}'.format(subindent, f)) # perf_counter = highest available timer resolution class PerfTimer: def __init__(self): self.start = None self.duration = None def __enter__(self): self.start = time.perf_counter() return self def __exit__(self, *args): self.duration = time.perf_counter() - self.start ''' https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html#sklearn.ensemble.RandomForestClassifier.fit n_estimators=100, criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, bootstrap=True, oob_score=False, n_jobs=None, random_state=None, verbose=0, warm_start=False, class_weight=None, ccp_alpha=0.0, max_samples=None '''
38.57958
138
0.599829
11,281
0.878104
0
0
0
0
0
0
3,211
0.249942
f7bc4cc67a214b3d1cc41c823e3eb37e1f5d2531
5,011
py
Python
docs/making_widgets_from_scratch/line_clock.py
Rahuum/glooey
932edca1c8fdd710f1941038e47ac8d25a31a1a8
[ "MIT" ]
86
2016-11-28T12:34:28.000Z
2022-03-17T13:49:49.000Z
docs/making_widgets_from_scratch/line_clock.py
Rahuum/glooey
932edca1c8fdd710f1941038e47ac8d25a31a1a8
[ "MIT" ]
57
2017-03-07T10:11:52.000Z
2022-01-16T19:35:33.000Z
docs/making_widgets_from_scratch/line_clock.py
Rahuum/glooey
932edca1c8fdd710f1941038e47ac8d25a31a1a8
[ "MIT" ]
9
2017-03-15T18:55:50.000Z
2022-02-17T14:52:49.000Z
#!/usr/bin/env python3 import pyglet import glooey import autoprop import datetime from pyglet.gl import * from vecrec import Vector, Rect @autoprop class LineClock(glooey.Widget): custom_radius = 50 custom_color = 'green' custom_hour_hand_width = 3 custom_minute_hand_width = 2 custom_second_hand_width = 1 custom_face_border_width = 3 def __init__(self): super().__init__() # User-controlled attributes: self._radius = self.custom_radius self._color = self.custom_color # Internal attributes: self._face = None self._hands = { 'hour': glooey.drawing.Rectangle(), 'min': glooey.drawing.Rectangle(), 'sec': glooey.drawing.Rectangle(), } def get_radius(self): return self._radius def set_radius(self, radius): self._radius = radius self._repack() def get_color(self): return self._color def set_color(self, color): self._color = color self._draw() def on_update(self, dt): self._draw() def do_attach(self): # Update the clock ten times a second. pyglet.clock.schedule_interval(self.on_update, 1/10) def do_detach(self): pyglet.clock.unschedule(self.on_update) def do_claim(self): width = height = 2 * self.radius return width, height def do_regroup(self): if self._face is not None: self.batch.migrate( self._face, GL_TRIANGLE_STRIP, self.group, self.batch) for k in self._hands: self._hands[k].batch = self.batch self._hands[k].group = HandGroup(self) def do_draw(self): self.do_draw_face() self.do_draw_hands() def do_draw_face(self): N = 48 vertices = [] for i in range(N + 2): direction = Vector.from_degrees(360 * i / N) radius = self._radius - (i % 2 * self.custom_face_border_width) vertex = self.rect.center + radius * direction vertices += vertex.tuple # Insert duplicate vertices at the beginning and end of the list, # otherwise this triangle strip will end up connected to any other # triangle strips in the scene. vertices = vertices[:2] + vertices + vertices[-2:] num_vertices = len(vertices) // 2 color = glooey.drawing.Color.from_anything(self._color) colors = num_vertices * color.rgb # The vertex list for the face may or may not exist yet, e.g. if the # clock is being drawn for the first time or was previously being # hidden. So create the vertex list if we need to, otherwise just # update its coordinates. if self._face is None: self._face = self.batch.add( num_vertices, GL_TRIANGLE_STRIP, self.group, ('v2f', vertices), ('c3B', colors), ) else: self._face.vertices = vertices self._face.colors = colors def do_draw_hands(self): # We're hard-coding the radii of the hands here. Probably it would be # better to make separate attributes for these, but I think that would # start to detract from the clarity of the example. rects = { 'hour': Rect.from_size(self.custom_hour_hand_width, self.radius/2), 'min': Rect.from_size(self.custom_minute_hand_width, self.radius), 'sec': Rect.from_size(self.custom_second_hand_width, self.radius), } # The clock hands all start pointing towards 12:00, and the rotations # are clockwise, so 90° is 3:00, 180° is 6:00, 270° is 9:00, etc. now = datetime.datetime.now() angles = { 'hour': 360 * now.hour / 12, 'min': 360 * now.minute / 60, 'sec': 360 * now.second / 60, } for k in self._hands: rects[k].bottom = 0 rects[k].center_x = 0 self._hands[k].rect = rects[k] self._hands[k].group.angle = angles[k] self._hands[k].color = self._color self._hands[k].show() def do_undraw(self): if self._face is not None: self._face.delete() self._face = None for k in self._hands: self._hands[k].hide() class HandGroup(pyglet.graphics.Group): def __init__(self, clock): super().__init__(parent=clock.group) self.clock = clock self.angle = 0 def set_state(self): x, y = self.clock.rect.center clockwise = -1 glPushMatrix() glLoadIdentity() glTranslatef(x, y, 0) glRotatef(self.angle, 0, 0, clockwise) def unset_state(self): glPopMatrix() window = pyglet.window.Window() gui = glooey.Gui(window) gui.add(LineClock()) pyglet.app.run()
28.151685
79
0.580124
4,759
0.949142
0
0
4,340
0.865576
0
0
900
0.179497
f7bd078884fa7f447ad7081c6426bb1a2e21941b
625
py
Python
forms_builder/forms/migrations/0004_auto_20180727_1256.py
maqmigh/django-forms-builder
1a0068d1d07498f4a2e160c46ec85b9a5f2ddd98
[ "BSD-2-Clause" ]
null
null
null
forms_builder/forms/migrations/0004_auto_20180727_1256.py
maqmigh/django-forms-builder
1a0068d1d07498f4a2e160c46ec85b9a5f2ddd98
[ "BSD-2-Clause" ]
null
null
null
forms_builder/forms/migrations/0004_auto_20180727_1256.py
maqmigh/django-forms-builder
1a0068d1d07498f4a2e160c46ec85b9a5f2ddd98
[ "BSD-2-Clause" ]
null
null
null
# coding=utf-8 # Generated by Django 2.0.7 on 2018-07-27 10:56 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('forms', '0003_auto_20180522_0820'), ] operations = [ migrations.AlterField( model_name='field', name='help_text', field=models.CharField(blank=True, max_length=2000, verbose_name='Help text'), ), migrations.AlterField( model_name='form', name='slug', field=models.SlugField(max_length=100, unique=True, verbose_name='Slug'), ), ]
25
90
0.5968
517
0.8272
0
0
0
0
0
0
140
0.224
f7bd2e55648aaa2a1a246e97711c0fc010416b3b
5,711
py
Python
scripts/sighan/generate.py
piglaker/SpecialEdition
172688ef111e1b5c62bdb1ba0a523a2654201b90
[ "Apache-2.0" ]
2
2022-01-06T07:41:50.000Z
2022-01-22T14:18:51.000Z
scripts/sighan/generate.py
piglaker/SpecialEdition
172688ef111e1b5c62bdb1ba0a523a2654201b90
[ "Apache-2.0" ]
null
null
null
scripts/sighan/generate.py
piglaker/SpecialEdition
172688ef111e1b5c62bdb1ba0a523a2654201b90
[ "Apache-2.0" ]
null
null
null
import os import re import sys import json #upper import sys.path.append("../../") from utils import levenshtein from utils.io import load_json, write_to def strQ2B(ustring): """全角转半角""" rstring = "" for uchar in ustring: inside_code=ord(uchar) if inside_code == 12288: #全角空格直接转换 inside_code = 32 elif (inside_code >= 65281 and inside_code <= 65374): #全角字符(除空格)根据关系转化 inside_code -= 65248 rstring += chr(inside_code) return rstring def get_sighan_from_json(): all_data = { "train":None, "dev":None, "test":None, "test14":None, "test15":None, } data_dir = "../../data/rawdata/sighan/csc/" train_file1 = os.path.join(data_dir, "train_dev.json") train_file2 = os.path.join(data_dir, "train131415.json") test14_file = os.path.join(data_dir, "test14.json") test15_file = os.path.join(data_dir, "test15.json") #test15_file = "../../data/rawdata/sighan/enchanted/test15.enc.json" all_data["train"] = load_json(train_file1) all_data["train"].extend(load_json(train_file2)) all_data["train"] = all_data["train"] all_data["valid14"] = load_json(test14_file) all_data["valid"] = load_json(test15_file) #all_data["test"].extend(load_json(test15_file)) return all_data def preprocess(sentence): s = strQ2B(sentence) back_num = re.findall('\d+', s) back_eng = re.findall(r'[a-zA-Z]+', s) #s = re.sub(r'[a-zA-Z]+', 'e', s) #s = re.sub('\d+', 'n', s) return s def json2list(data, need_preprocess): source, target = [], [] for i, element in enumerate(data): if need_preprocess: source.append(preprocess(element["original_text"])) target.append(preprocess(element["correct_text"])) assert len(preprocess(element["original_text"])) == len(preprocess(element["correct_text"])), preprocess(element["original_text"])+preprocess(element["correct_text"]) else: print("ERROR: ABORT !") exit(0) source.append(strQ2B((element["original_text"]))) target.append(strQ2B((element["correct_text"]))) return source, target def generate(need_preprocess=True): """ split raw data(train.json) to preprocessed target """ #file = open("../../data/rawdata/ctc2021/train.json", 'r', encoding='utf-8') data = get_sighan_from_json() train_source, train_target = json2list(data["train"], need_preprocess) valid14_source, valid14_target = json2list(data["valid14"], need_preprocess) valid_source, valid_target = json2list(data["valid"], need_preprocess) print(train_source[:3], train_target[:3]) print(len(train_source), len(train_target)) print(valid_source[:3], valid_target[:3]) print(len(valid_source), len(valid_target)) need_remove = {} # cluster all need_remove for i, sample in enumerate(valid_source): for j, char in enumerate(sample): tgt = valid_target[i][j] if char != tgt: need_remove[ (char, tgt) ] = 0 for i, sample in enumerate(valid14_source): for j, char in enumerate(sample): tgt = valid14_target[i][j] if char != tgt: need_remove[ (char, tgt) ] = 0 #remove remove_count = 0 new_train_source, new_train_target = [], [] for i, sample in enumerate(train_source): skip = False for j, char in enumerate(sample): tgt = train_target[i][j] if char != tgt: key = (char, tgt) if key in need_remove: skip = True remove_count += 1 break if not skip: new_train_source.append(sample) new_train_target.append(train_target[i]) print("Total Skip: ", remove_count) train_source, train_target = new_train_source, new_train_target #f_src = levenstein.tokenize(source, vocab_file_path="vocab.txt") train_through = levenshtein.convert_from_sentpair_through(train_source, train_target, train_source) valid14_through = levenshtein.convert_from_sentpair_through(valid14_source, valid14_target, valid14_source) valid_through = levenshtein.convert_from_sentpair_through(valid_source, valid_target, valid_source) #print(train_through[0], valid_through[0]) #output_name = "enchanted" #output_name = "raw" output_name = "holy" write_to("../../data/rawdata/sighan/" + output_name + "/train.src", "\n".join(train_source)) write_to("../../data/rawdata/sighan/"+output_name+"/train.tgt", "\n".join(train_target)) #write_to("../../data/rawdata/sighan/std/train.through", "\n".join(train_through)) write_to("../../data/rawdata/sighan/"+output_name+"/valid14.src", "\n".join(valid14_source)) write_to("../../data/rawdata/sighan/"+output_name+"/valid14.tgt", "\n".join(valid14_target)) #write_to("../../data/rawdata/sighan/std/valid14.through", "\n".join(valid14_through)) write_to("../../data/rawdata/sighan/"+output_name+"/test.src", "\n".join(valid_source)) write_to("../../data/rawdata/sighan/"+output_name+"/test.tgt", "\n".join(valid_target)) #write_to("../../data/rawdata/sighan/std/test.through", "\n".join(valid_through)) write_to("../../data/rawdata/sighan/"+output_name+"/valid.src", "\n".join(valid_source)) write_to("../../data/rawdata/sighan/"+output_name+"/valid.tgt", "\n".join(valid_target)) #write_to("../../data/rawdata/sighan/std/valid.through", "\n".join(valid_through[:500])) if __name__ == "__main__": generate()
33.994048
179
0.629487
0
0
0
0
0
0
0
0
1,693
0.293567
f7bde64d861ea84f6a0483cdddf17127e95c800d
67
py
Python
keras_retinanet/backend/__init__.py
mj-haghighi/keras-retinanet
644c2f8da799889a2a3f6cc833478256cbe32c23
[ "Apache-2.0" ]
null
null
null
keras_retinanet/backend/__init__.py
mj-haghighi/keras-retinanet
644c2f8da799889a2a3f6cc833478256cbe32c23
[ "Apache-2.0" ]
null
null
null
keras_retinanet/backend/__init__.py
mj-haghighi/keras-retinanet
644c2f8da799889a2a3f6cc833478256cbe32c23
[ "Apache-2.0" ]
null
null
null
# from .backend import * # noqa: F401,F403 from .sbackend import *
33.5
43
0.701493
0
0
0
0
0
0
0
0
43
0.641791
f7bf187ba4675f05a89f42e9783052fe7bcd13c5
647
py
Python
docs/_docs/bash/az3166_patch_binary.py
skolbin-ssi/azure-iot-developer-kit
24035c8870e9c342d055bcd586529441078af0a0
[ "MIT" ]
43
2017-10-03T23:03:23.000Z
2019-04-27T18:57:16.000Z
docs/_docs/bash/az3166_patch_binary.py
skolbin-ssi/azure-iot-developer-kit
24035c8870e9c342d055bcd586529441078af0a0
[ "MIT" ]
114
2017-09-20T02:51:28.000Z
2019-05-06T06:13:14.000Z
docs/_docs/bash/az3166_patch_binary.py
skolbin-ssi/azure-iot-developer-kit
24035c8870e9c342d055bcd586529441078af0a0
[ "MIT" ]
48
2017-09-19T08:18:52.000Z
2019-04-19T11:44:32.000Z
# ---------------------------------------------------------------------------- # Copyright (C) Microsoft. All rights reserved. # Licensed under the MIT license. # ---------------------------------------------------------------------------- import os import binascii import struct import shutil import inspect import sys def binary_hook(binf, outf): with open(binf,'rb') as f: appbin = f.read() with open('boot.bin', 'rb') as f: bootbin = f.read() with open(outf ,'wb') as f: f.write(bootbin + ('\xFF' * (0xc000 - len(bootbin))) + appbin) if __name__ == '__main__': binary_hook(sys.argv[1], sys.argv[2])
29.409091
78
0.482226
0
0
0
0
0
0
0
0
276
0.426584
f7bfccc428289385cc22ed6c618de770f292647a
590
py
Python
setup.py
FireXStuff/firex-bundle-ci
05ef1d9017b3553e8f4249da9a96e313f0ad7047
[ "BSD-3-Clause" ]
1
2021-01-08T19:50:33.000Z
2021-01-08T19:50:33.000Z
setup.py
FireXStuff/firex-bundle-ci
05ef1d9017b3553e8f4249da9a96e313f0ad7047
[ "BSD-3-Clause" ]
null
null
null
setup.py
FireXStuff/firex-bundle-ci
05ef1d9017b3553e8f4249da9a96e313f0ad7047
[ "BSD-3-Clause" ]
null
null
null
import versioneer from setuptools import setup setup(name='firex-bundle-ci', version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), description='FireX CI services', url='https://github.com/FireXStuff/firex-bundle-ci.git', author='Core FireX Team', author_email='firex-dev@gmail.com', license='BSD-3-Clause', packages=['firex_bundle_ci'], zip_safe=True, install_requires=[ "firexapp", "firex-keeper", "lxml", "xunitmerge", "unittest-xml-reporting" ], )
26.818182
62
0.60678
0
0
0
0
0
0
0
0
222
0.376271
f7c03e8c3283127463ae5c11c8faf6e12bf38615
1,951
py
Python
meta_middleware/meta_middleware/middleware.py
kevin-wyx/ProxyFS
76d9478c9e87c18950f2e4659b397a397fb1ac69
[ "Apache-2.0" ]
null
null
null
meta_middleware/meta_middleware/middleware.py
kevin-wyx/ProxyFS
76d9478c9e87c18950f2e4659b397a397fb1ac69
[ "Apache-2.0" ]
null
null
null
meta_middleware/meta_middleware/middleware.py
kevin-wyx/ProxyFS
76d9478c9e87c18950f2e4659b397a397fb1ac69
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2016 SwiftStack, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. class MetaMiddleware(object): def __init__(self, app, conf): self.app = app def __call__(self, env, start_response): hToDel = list() vToAdd = list() for h in env: if h.upper() == 'HTTP_X_PROXYFS_BIMODAL': hToDel.append(h) vToAdd.append(env[h]) for h in hToDel: del env[h] for v in vToAdd: env['HTTP_X_ACCOUNT_SYSMETA_PROXYFS_BIMODAL'] = v # only last one, if multiple, will determine value def meta_response(status, response_headers, exc_info=None): hvToDel = list() vToAdd = list() for (h,v) in response_headers: if h.upper() == 'X-ACCOUNT-SYSMETA-PROXYFS-BIMODAL': hvToDel.append((h,v)) vToAdd.append(v) for hv in hvToDel: response_headers.remove(hv) for v in vToAdd: response_headers.append(('X-ProxyFS-BiModal',v)) # potentially multiple instances of same header return start_response(status, response_headers, exc_info) return self.app(env, meta_response) def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) def meta_filter(app): return MetaMiddleware(app, conf) return meta_filter
34.22807
112
0.633521
1,164
0.596617
0
0
0
0
0
0
786
0.40287
f7c20796c689531f3a41f3738826f84aead341b4
1,397
py
Python
distpy/util/__init__.py
CU-NESS/distpy
279ba7e46726a85246566401fca19b8739d18d08
[ "Apache-2.0" ]
null
null
null
distpy/util/__init__.py
CU-NESS/distpy
279ba7e46726a85246566401fca19b8739d18d08
[ "Apache-2.0" ]
null
null
null
distpy/util/__init__.py
CU-NESS/distpy
279ba7e46726a85246566401fca19b8739d18d08
[ "Apache-2.0" ]
null
null
null
""" Introduces utilities used throughout the package, including: - interfaces for making objects `distpy.util.Savable.Savable` and `distpy.util.Loadable.Loadable` in binary hdf5 files using h5py - helper methods for using h5py to save and load variables and arrays (`h5py_extensions`) - type category definitions (`distpy.util.TypeCategories`) - functions for making univariate histograms, bivariate histograms, and triangle plots (`distpy.util.TrianglePlot`) - a class that uses strings to represent an `distpy.util.Expression.Expression` that can be modified and have arguments passed to it before being evaluated - a class that represents **File**: $DISTPY/distpy/util/\\_\\_init\\_\\_.py **Author**: Keith Tauscher **Date**: 14 May 2021 """ from distpy.util.Savable import Savable from distpy.util.Loadable import Loadable from distpy.util.TypeCategories import bool_types, int_types, float_types,\ real_numerical_types, complex_numerical_types, numerical_types,\ sequence_types from distpy.util.h5py_extensions import create_hdf5_dataset, get_hdf5_value,\ HDF5Link, save_dictionary, load_dictionary from distpy.util.TrianglePlot import univariate_histogram,\ confidence_contour_2D, bivariate_histogram, triangle_plot from distpy.util.Expression import Expression from distpy.util.SparseSquareBlockDiagonalMatrix import\ SparseSquareBlockDiagonalMatrix
43.65625
79
0.800286
0
0
0
0
0
0
0
0
763
0.54617
f7c31602d3ba09f1a3970f8ce071305eb086135d
74
py
Python
Crypto-hardRSA/flag.py
JSW2020/hsctf-2019-freshmen
5282d6d51153aadd62f42673aa3d487f8d7ef45b
[ "MIT" ]
16
2019-12-09T15:53:08.000Z
2021-12-07T00:34:30.000Z
Crypto-hardRSA/flag.py
JSW2020/hsctf-2019-freshmen
5282d6d51153aadd62f42673aa3d487f8d7ef45b
[ "MIT" ]
null
null
null
Crypto-hardRSA/flag.py
JSW2020/hsctf-2019-freshmen
5282d6d51153aadd62f42673aa3d487f8d7ef45b
[ "MIT" ]
7
2019-12-09T11:53:52.000Z
2021-11-14T04:09:04.000Z
flag = "flag{b3453333-9da9-49ae-b4ed-0017c392d58e}" e1 = 65537 e2 = 368273
24.666667
51
0.743243
0
0
0
0
0
0
0
0
44
0.594595
f7c3c4d630855f49542b120f69474ebe854a401b
310
py
Python
toast/decorators/__init__.py
joshuaskelly/Toast
741bc17992fd654c9834b080189eb800202ce999
[ "MIT" ]
null
null
null
toast/decorators/__init__.py
joshuaskelly/Toast
741bc17992fd654c9834b080189eb800202ce999
[ "MIT" ]
null
null
null
toast/decorators/__init__.py
joshuaskelly/Toast
741bc17992fd654c9834b080189eb800202ce999
[ "MIT" ]
null
null
null
class call_if(object): def __init__(self, cond): self.condition = cond def __call__(self, func): def inner(*args, **kwargs): if getattr(args[0], self.condition): return func(*args, **kwargs) else: return None return inner
28.181818
48
0.525806
310
1
0
0
0
0
0
0
0
0
f7c417316d84349935d37272663f36b5a52c49ff
1,165
py
Python
drogher/package/fedex.py
thisisnotmyuserid/drogher
f8ea5e34dad6a2e9f22608b4ae4a6f7032133e45
[ "BSD-3-Clause" ]
13
2017-04-24T07:49:30.000Z
2020-09-22T13:13:13.000Z
drogher/package/fedex.py
thisisnotmyuserid/drogher
f8ea5e34dad6a2e9f22608b4ae4a6f7032133e45
[ "BSD-3-Clause" ]
null
null
null
drogher/package/fedex.py
thisisnotmyuserid/drogher
f8ea5e34dad6a2e9f22608b4ae4a6f7032133e45
[ "BSD-3-Clause" ]
4
2018-09-08T05:31:57.000Z
2022-02-10T17:42:31.000Z
import itertools from .base import Package class FedEx(Package): shipper = 'FedEx' class FedExExpress(FedEx): barcode_pattern = r'^\d{34}$' @property def tracking_number(self): return self.barcode[20:22].lstrip('0') + self.barcode[22:] @property def valid_checksum(self): chars, check_digit = self.tracking_number[:-1], self.tracking_number[-1] total = 0 for digit, char in zip(itertools.cycle([1, 3, 7]), reversed(chars)): total += int(char) * digit return total % 11 % 10 == int(check_digit) class FedExGround96(FedEx): barcode_pattern = r'^96\d{20}$' @property def tracking_number(self): return self.barcode[7:] @property def valid_checksum(self): chars, check_digit = self.tracking_number[:-1], self.tracking_number[-1] odd = even = 0 for i, char in enumerate(reversed(chars)): if i & 0x1: odd += int(char) else: even += int(char) check = ((even * 3) + odd) % 10 if check != 0: check = 10 - check return check == int(check_digit)
25.326087
80
0.574249
1,112
0.954506
0
0
922
0.791416
0
0
34
0.029185
f7c4b93a5f9fe2cd51baa68e74a1491e4f04cbf5
1,535
py
Python
nipy/labs/spatial_models/tests/test_bsa_io.py
arokem/nipy
d6b2e862c65558bb5747c36140fd6261a7e1ecfe
[ "BSD-3-Clause" ]
null
null
null
nipy/labs/spatial_models/tests/test_bsa_io.py
arokem/nipy
d6b2e862c65558bb5747c36140fd6261a7e1ecfe
[ "BSD-3-Clause" ]
null
null
null
nipy/labs/spatial_models/tests/test_bsa_io.py
arokem/nipy
d6b2e862c65558bb5747c36140fd6261a7e1ecfe
[ "BSD-3-Clause" ]
null
null
null
from __future__ import with_statement from nose.tools import assert_true from os.path import exists import numpy as np from nibabel import Nifti1Image from numpy.testing import assert_equal from ...utils.simul_multisubject_fmri_dataset import surrogate_3d_dataset from ..bsa_io import make_bsa_image from nibabel.tmpdirs import InTemporaryDirectory def test_parcel_intra_from_3d_images_list(): """Test that a parcellation is generated, starting from a list of 3D images """ # Generate an image shape = (5, 5, 5) contrast_id = 'plop' mask_image = Nifti1Image(np.ones(shape), np.eye(4)) #mask_images = [mask_image for _ in range(5)] with InTemporaryDirectory() as dir_context: data_image = ['image_%d.nii' % i for i in range(5)] for datim in data_image: surrogate_3d_dataset(mask=mask_image, out_image_file=datim) #run the algo landmark, hrois = make_bsa_image( mask_image, data_image, threshold=10., smin=0, sigma=1., prevalence_threshold=0, prevalence_pval=0.5, write_dir=dir_context, algorithm='density', contrast_id=contrast_id) assert_equal(landmark, None) assert_equal(len(hrois), 5) assert_true(exists('density_%s.nii' % contrast_id)) assert_true(exists('prevalence_%s.nii' % contrast_id)) assert_true(exists('AR_%s.nii' % contrast_id)) assert_true(exists('CR_%s.nii' % contrast_id)) if __name__ == "__main__": import nose nose.run(argv=['', __file__])
34.111111
79
0.699674
0
0
0
0
0
0
0
0
258
0.168078
f7c5189c4c9985714dd619cfadbc0baf92efab39
5,099
py
Python
MFSDA/MFSDA_run.py
bpaniagua/MFSDA_Python
d7e439fe670d5e2731c9ec722919a74f67b01e30
[ "Apache-2.0" ]
3
2020-08-10T08:57:36.000Z
2021-04-04T01:12:50.000Z
MFSDA/MFSDA_run.py
bpaniagua/MFSDA_Python
d7e439fe670d5e2731c9ec722919a74f67b01e30
[ "Apache-2.0" ]
17
2018-08-03T14:25:52.000Z
2022-02-06T18:19:39.000Z
MFSDA/MFSDA_run.py
bpaniagua/MFSDA_Python
d7e439fe670d5e2731c9ec722919a74f67b01e30
[ "Apache-2.0" ]
13
2017-11-14T17:22:32.000Z
2020-12-10T16:55:58.000Z
#!/usr/bin/env python-real # -*- coding: utf-8 -*- """ Run script: multivariate functional shape data analysis (MFSDA). Author: Chao Huang (chaohuang.stat@gmail.com) Last update: 2017-08-14 """ import sys,os sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)),os.path.join('Resources','Libraries'))) import numpy as np from scipy import stats from statsmodels.sandbox.stats.multicomp import fdrcorrection0 from stat_read_x import read_x from stat_lpks import lpks from stat_sif import sif from stat_wald_ht import wald_ht from stat_bstrp_pvalue import bstrp_pvalue import MFSDA_stat as mfsda import timeit import vtk import argparse import os import json """installed all the libraries above""" def main(): parser = argparse.ArgumentParser(description='Multivariate Functional Shape Data Analysis (MFSDA)') parser.add_argument('--shapeData', type=str, help='Text file list with vtk filenames, 1 file per line', required=True) parser.add_argument('--coordData', type=str, help='filename, .vtk shape template', required=True) parser.add_argument('--outputDir', help='output directory', default='./output') args = parser.parse_args() start_all = timeit.default_timer() run_script(args) stop_all = timeit.default_timer() delta_time_all = str(stop_all - start_all) print("The total elapsed time is " + delta_time_all) def run_script(args): """ Run the commandline script for MFSDA. """ """+++++++++++++++++++++++++++++++++++""" """Step 1. load dataset """ print("loading data ......") print("+++++++Read the surface shape data+++++++") fh = open(args.shapeData, 'rU') y_design = [] nshape = 0 numpoints = -1 header = fh.readline() toks = header.split(sep=',') covs_tmp = [] for line in fh.readlines(): toks = line.strip().split(sep=',') # Read VTK file vtkfilename = toks[0].rstrip() print("Reading {}".format(vtkfilename)) reader = vtk.vtkPolyDataReader() reader.SetFileName(vtkfilename) reader.Update() shapedata = reader.GetOutput() shapedatapoints = shapedata.GetPoints() y_design.append([]) if numpoints == -1: numpoints = shapedatapoints.GetNumberOfPoints() if numpoints != shapedatapoints.GetNumberOfPoints(): print("WARNING! The number of points is not the same for the shape:", vtkfilename) for i in range(shapedatapoints.GetNumberOfPoints()): p = shapedatapoints.GetPoint(i) y_design[nshape].append(p) nshape += 1 # Build covariate matrix covs_tmp.append(toks[1:]) y_design = np.array(y_design) y_design.reshape(nshape, numpoints, 3) y_design = np.array(y_design) y_design.reshape(nshape, numpoints, 3) print("The dimension of shape matrix is " + str(y_design.shape)) print("+++++++Read the sphere coordinate data+++++++") print("Reading", args.coordData) reader = vtk.vtkPolyDataReader() reader.SetFileName(args.coordData) reader.Update() coordData = reader.GetOutput() shapedatapoints = coordData.GetPoints() if numpoints != shapedatapoints.GetNumberOfPoints(): print("WARNING! The template does not have the same number of points as the shapes") coord_mat = [] for i in range(shapedatapoints.GetNumberOfPoints()): p = shapedatapoints.GetPoint(i) coord_mat.append(p) coord_mat = np.array(coord_mat) # Set up design matrix design_data = np.array(covs_tmp,dtype=float) # read the covariate type var_type = getCovariateType(design_data) """+++++++++++++++++++++++++++++++++++""" """Step 2. Statistical analysis: including (1) smoothing and (2) hypothesis testing""" gpvals, lpvals_fdr, clu_pvals, efit_beta, efity_design, efit_eta = mfsda.run_stats(y_design, coord_mat, design_data, var_type) """+++++++++++++++++++++++++++++++++++""" """Step3. Save all the results""" if not os.path.exists(args.outputDir): os.makedirs(args.outputDir) pvalues = {} pvalues['Gpvals'] = gpvals.tolist() pvalues['clu_pvals'] = clu_pvals.tolist() pvalues['Lpvals_fdr'] = lpvals_fdr.tolist() with open(os.path.join(args.outputDir,'pvalues.json'), 'w') as outfile: json.dump(pvalues, outfile) efit = {} efit['efitBetas'] = efit_beta.tolist() efit['efitYdesign'] = efity_design.tolist() efit['efitEtas'] = efit_eta.tolist() with open(os.path.join(args.outputDir,'efit.json'), 'w') as outfile: json.dump(efit, outfile) def getCovariateType(design_data): (row,column)=design_data.shape cov_types=[] for c in range(column): cov_col=design_data[:,c] cov_type = 0. #int for i in range(len(cov_col)): if int(cov_col[i])!=cov_col[i]: cov_type = 1. #double break cov_types.append(cov_type) cov_types = np.array(cov_types) return cov_types if __name__ == '__main__': main()
29.818713
130
0.642871
0
0
0
0
0
0
0
0
1,325
0.259855
f7c72117e015e7f0761f5162d10f3d3cf0ddb74f
1,671
py
Python
modules/mongodb_atlas/mongodb_atlas.py
riddopic/opta
25fa6435fdc7e2ea9c7963ed74100fffb0743063
[ "Apache-2.0" ]
595
2021-05-21T22:30:48.000Z
2022-03-31T15:40:25.000Z
modules/mongodb_atlas/mongodb_atlas.py
riddopic/opta
25fa6435fdc7e2ea9c7963ed74100fffb0743063
[ "Apache-2.0" ]
463
2021-05-24T21:32:59.000Z
2022-03-31T17:12:33.000Z
modules/mongodb_atlas/mongodb_atlas.py
riddopic/opta
25fa6435fdc7e2ea9c7963ed74100fffb0743063
[ "Apache-2.0" ]
29
2021-05-21T22:27:52.000Z
2022-03-28T16:43:45.000Z
import os from typing import TYPE_CHECKING from modules.base import ModuleProcessor from opta.core.terraform import get_terraform_outputs from opta.exceptions import UserErrors if TYPE_CHECKING: from opta.layer import Layer from opta.module import Module class MongodbAtlasProcessor(ModuleProcessor): def __init__(self, module: "Module", layer: "Layer"): if module.data["type"] != "mongodb-atlas": raise Exception( f"The module {module.name} was expected to be of type mongodb-atlas" ) super(MongodbAtlasProcessor, self).__init__(module, layer) def pre_hook(self, module_idx: int) -> None: required_env_set = set(["MONGODB_ATLAS_PUBLIC_KEY", "MONGODB_ATLAS_PRIVATE_KEY"]) if not required_env_set.issubset(set(os.environ.keys())): raise UserErrors( "Opta did not find environment variable(s), please set them and retry: {}".format( required_env_set - set(os.environ.keys()) ) ) super(MongodbAtlasProcessor, self).pre_hook(module_idx) def process(self, module_idx: int) -> None: self.module.data["cloud_provider"] = self.layer.cloud.upper() if self.module.data["cloud_provider"] == "LOCAL": self.module.data["cloud_provider"] = "AWS" # For local, always spin up in AWS self.module.data["region"] = "US_EAST_1" base_layer = self.layer.root() root_outputs = get_terraform_outputs(base_layer) self.module.data["public_nat_ips"] = root_outputs["public_nat_ips"] super(MongodbAtlasProcessor, self).process(module_idx)
37.133333
98
0.663076
1,403
0.839617
0
0
0
0
0
0
376
0.225015
f7c92906bdd05fb9011ed12eacbe0ac0a33b671e
502
py
Python
python/tests/testdata/region_HU.py
kevin-brown/python-phonenumbers
e4ae191e6fae47581eb40d3d23c7e2b7d422c326
[ "Apache-2.0" ]
1
2019-08-06T03:19:28.000Z
2019-08-06T03:19:28.000Z
python/tests/testdata/region_HU.py
kevin-brown/python-phonenumbers
e4ae191e6fae47581eb40d3d23c7e2b7d422c326
[ "Apache-2.0" ]
null
null
null
python/tests/testdata/region_HU.py
kevin-brown/python-phonenumbers
e4ae191e6fae47581eb40d3d23c7e2b7d422c326
[ "Apache-2.0" ]
2
2018-02-09T13:52:15.000Z
2019-09-10T08:36:25.000Z
"""Auto-generated file, do not edit by hand. HU metadata""" from phonenumbers.phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata PHONE_METADATA_HU = PhoneMetadata(id='HU', country_code=36, international_prefix='00', general_desc=PhoneNumberDesc(national_number_pattern='30\\d{7}', possible_length=(9,)), mobile=PhoneNumberDesc(national_number_pattern='30\\d{7}', example_number='301234567', possible_length=(9,)), national_prefix='06', national_prefix_for_parsing='06')
55.777778
113
0.776892
0
0
0
0
0
0
0
0
106
0.211155
f7c994df8beeb9e54af1a6918047db78eb8494b2
1,389
py
Python
lambdas/budget-handler/lambda_handler.py
weAllWeGot/personal_financial_engine
37c89e49aa68d6db48c10d6663135f4992a72171
[ "Apache-2.0" ]
2
2018-08-18T16:41:43.000Z
2020-12-20T21:29:49.000Z
lambdas/budget-handler/lambda_handler.py
weallwegot/personal_financial_engine
37c89e49aa68d6db48c10d6663135f4992a72171
[ "Apache-2.0" ]
12
2018-07-25T16:56:48.000Z
2019-10-22T01:16:23.000Z
lambdas/budget-handler/lambda_handler.py
weAllWeGot/personal_financial_engine
37c89e49aa68d6db48c10d6663135f4992a72171
[ "Apache-2.0" ]
4
2018-12-07T23:50:12.000Z
2021-04-16T20:49:08.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import boto3 import csv import json import logging from budget_retrieval import get_budget from budget_placement import place_budget def respond(err, res=None): return { 'statusCode': '400' if err else '200', 'body': err.message if err else json.dumps(res), 'headers': { 'Content-Type': 'application/json', "Access-Control-Allow-Origin": "*", # Required for CORS support to work "Access-Control-Allow-Credentials": True }, } def lambda_handler(event: dict, context: dict) -> dict: '''Demonstrates a simple HTTP endpoint using API Gateway. You have full access to the request and response payload, including headers and status code. ''' path = event['path'] user_uid = event['requestContext']['authorizer']['claims']['sub'] body = json.loads(event['body']) path = '/retrieve' if body['RetrieveOrPlace'].endswith('retrieve') else '/place' entity = 'budget' if body['Entity'].endswith('budget') else 'account' print(path) if path.endswith('/retrieve'): response = get_budget(user_uid, entity) elif path.endswith('/place'): response = place_budget(user_uid, body, entity) return respond(err=None, res=response) # with open('event.json') as f: # e = json.load(f) # lambda_handler(e, {})
28.346939
84
0.645068
0
0
0
0
0
0
0
0
609
0.438445
f7ca0211e8a92052407acbaa028f0ad46e74b5f9
1,451
py
Python
src/documenteer/stackdocs/doxygentag.py
lsst-sqre/sphinxkit
a9475d0722b0f6f89fd1c4c54eafad0564667b0b
[ "MIT" ]
3
2019-04-18T02:47:06.000Z
2021-11-09T03:49:12.000Z
src/documenteer/stackdocs/doxygentag.py
lsst-sqre/sphinxkit
a9475d0722b0f6f89fd1c4c54eafad0564667b0b
[ "MIT" ]
29
2016-12-15T01:02:05.000Z
2022-03-07T12:06:40.000Z
src/documenteer/stackdocs/doxygentag.py
lsst-sqre/sphinxkit
a9475d0722b0f6f89fd1c4c54eafad0564667b0b
[ "MIT" ]
2
2016-09-12T17:44:06.000Z
2016-12-15T00:37:05.000Z
"""Utilities for working with Doxygen tag files. """ __all__ = ["get_tag_entity_names"] import xml.etree.ElementTree as ET from pathlib import Path from typing import List, Optional, Sequence, Union try: from sphinxcontrib.doxylink import doxylink except ImportError: print( "sphinxcontrib.doxylink is missing. Install documenteer with the " "pipelines extra:\n\n pip install documenteer[pipelines]" ) def get_tag_entity_names( tag_path: Union[str, Path], kinds: Optional[Sequence[str]] = None ) -> List[str]: """Get the list of API names in a Doxygen tag file. Parameters ---------- tag_path : `str` or `~pathlib.Path` File path of the Doxygen tag file. kinds : sequence of `str`, optional If provided, a sequence of API kinds to include in the listing. Doxygen types are: - namespace - struct - class - file - define - group - variable - typedef - enumeration - function Returns ------- names : `list` of `str` List of API names. """ doc = ET.parse(str(tag_path)) symbol_map = doxylink.SymbolMap(doc) keys = [] for key in symbol_map._mapping.keys(): entry = symbol_map[key] if kinds: if entry.kind in kinds: keys.append(key) else: keys.append(key) keys.sort() return keys
24.183333
74
0.598208
0
0
0
0
0
0
0
0
770
0.530669
f7cadf89eeb52e1e8b7bf3ad6d819d4964e7f62f
1,263
py
Python
src/gamesbyexample/shellgame.py
skinzor/PythonStdioGames
75f27af19d7f1d555b0fd85fbcf215f07660b93f
[ "MIT" ]
1
2019-11-30T17:04:09.000Z
2019-11-30T17:04:09.000Z
src/gamesbyexample/shellgame.py
skinzor/PythonStdioGames
75f27af19d7f1d555b0fd85fbcf215f07660b93f
[ "MIT" ]
null
null
null
src/gamesbyexample/shellgame.py
skinzor/PythonStdioGames
75f27af19d7f1d555b0fd85fbcf215f07660b93f
[ "MIT" ]
null
null
null
# Shell Game, by Al Sweigart al@inventwithpython.com # A random gambling game. import random, time, sys print('''SHELL GAME By Al Sweigart al@inventwithpython.com Try to find the diamond! Press Enter to continue...''') input() CUPS = ['diamond', 'pocket lint', 'nothing'] while True: print() print('Shuffling the cups', end='') random.shuffle(CUPS) # This happens instantly. # We add fake pauses to make it seem more interesting: time.sleep(0.3) print('.', end='') time.sleep(0.3) print('.', end='') time.sleep(0.3) print('.', end='') time.sleep(0.3) print() while True: print('Okay! Pick a cup 1-{}'.format(len(CUPS))) pickedCup = input() if pickedCup.isdecimal() and 1 <= int(pickedCup) <= len(CUPS): break print('Type a number between 1 and {}.'.format(len(CUPS))) print() if CUPS[int(pickedCup) - 1] == 'diamond': print('You found the cup with the diamond!') else: print('Nope! You picked the cup that had {} in it.'.format(CUPS[int(pickedCup) - 1])) print('Would you like to play again? Y/N') response = input().upper() if not response.startswith('Y'): print('Thanks for playing!') sys.exit()
26.3125
93
0.599367
0
0
0
0
0
0
0
0
538
0.42597
f7cbba72cbee5b92ee9bed0dc914113ae1d6f2e4
1,242
py
Python
main.py
mathew4STAR/GPT-3_based_AI
7c5ffcd26ebbd64ee1f6fa02ec4a8529c795b809
[ "MIT" ]
null
null
null
main.py
mathew4STAR/GPT-3_based_AI
7c5ffcd26ebbd64ee1f6fa02ec4a8529c795b809
[ "MIT" ]
null
null
null
main.py
mathew4STAR/GPT-3_based_AI
7c5ffcd26ebbd64ee1f6fa02ec4a8529c795b809
[ "MIT" ]
null
null
null
import pyttsx3 import speech_recognition as sr import openai as op import os op.api_key = os.getenv("OPENAI_API_KEY") engine = pyttsx3.init() engine.setProperty('rate', 150) engine.setProperty('volume', 1.0) voices = engine.getProperty('voices') engine.setProperty('voice', voices[1].id) def tell(text): engine.say(text) engine.runAndWait() def takecommand(): r = sr.Recognizer() with sr.Microphone() as source: print("Listening...") r.pause_threshold = 1 audio = r.listen(source) try: print("Recognizing...") query = r.recognize_google(audio, language='en-in') print(query) except Exception as e: print("Please repeat") return "Nothing" return query while True: query = takecommand() response = op.Completion.create( engine="text-davinci-001", prompt="The following is a conversation with an AI friend. The friend is helpful, creative, clever, and very friendly.\n\nHuman: " + query + "\nAI: ", temperature=0.9, max_tokens=150, top_p=1, frequency_penalty=0, presence_penalty=0.6, ) presponse= response["choices"][0]["text"] print(presponse) tell(presponse)
24.84
154
0.638486
0
0
0
0
0
0
0
0
270
0.217391
f7cdafc3fcc754a52e3ada458ff7a926e8981f1d
71,088
py
Python
sdk/python/pulumi_azure_native/compute/v20200930/_inputs.py
polivbr/pulumi-azure-native
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/compute/v20200930/_inputs.py
polivbr/pulumi-azure-native
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/compute/v20200930/_inputs.py
polivbr/pulumi-azure-native
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from ._enums import * __all__ = [ 'CreationDataArgs', 'DataDiskImageEncryptionArgs', 'DisallowedArgs', 'DiskSkuArgs', 'EncryptionImagesArgs', 'EncryptionSetIdentityArgs', 'EncryptionSettingsCollectionArgs', 'EncryptionSettingsElementArgs', 'EncryptionArgs', 'ExtendedLocationArgs', 'GalleryApplicationVersionPublishingProfileArgs', 'GalleryArtifactVersionSourceArgs', 'GalleryDataDiskImageArgs', 'GalleryImageFeatureArgs', 'GalleryImageIdentifierArgs', 'GalleryImageVersionPublishingProfileArgs', 'GalleryImageVersionStorageProfileArgs', 'GalleryOSDiskImageArgs', 'ImageDiskReferenceArgs', 'ImagePurchasePlanArgs', 'KeyForDiskEncryptionSetArgs', 'KeyVaultAndKeyReferenceArgs', 'KeyVaultAndSecretReferenceArgs', 'OSDiskImageEncryptionArgs', 'PrivateLinkServiceConnectionStateArgs', 'PurchasePlanArgs', 'RecommendedMachineConfigurationArgs', 'ResourceRangeArgs', 'SharingProfileArgs', 'SnapshotSkuArgs', 'SourceVaultArgs', 'TargetRegionArgs', 'UserArtifactManageArgs', 'UserArtifactSourceArgs', ] @pulumi.input_type class CreationDataArgs: def __init__(__self__, *, create_option: pulumi.Input[Union[str, 'DiskCreateOption']], gallery_image_reference: Optional[pulumi.Input['ImageDiskReferenceArgs']] = None, image_reference: Optional[pulumi.Input['ImageDiskReferenceArgs']] = None, logical_sector_size: Optional[pulumi.Input[int]] = None, source_resource_id: Optional[pulumi.Input[str]] = None, source_uri: Optional[pulumi.Input[str]] = None, storage_account_id: Optional[pulumi.Input[str]] = None, upload_size_bytes: Optional[pulumi.Input[float]] = None): """ Data used when creating a disk. :param pulumi.Input[Union[str, 'DiskCreateOption']] create_option: This enumerates the possible sources of a disk's creation. :param pulumi.Input['ImageDiskReferenceArgs'] gallery_image_reference: Required if creating from a Gallery Image. The id of the ImageDiskReference will be the ARM id of the shared galley image version from which to create a disk. :param pulumi.Input['ImageDiskReferenceArgs'] image_reference: Disk source information. :param pulumi.Input[int] logical_sector_size: Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default. :param pulumi.Input[str] source_resource_id: If createOption is Copy, this is the ARM id of the source snapshot or disk. :param pulumi.Input[str] source_uri: If createOption is Import, this is the URI of a blob to be imported into a managed disk. :param pulumi.Input[str] storage_account_id: Required if createOption is Import. The Azure Resource Manager identifier of the storage account containing the blob to import as a disk. :param pulumi.Input[float] upload_size_bytes: If createOption is Upload, this is the size of the contents of the upload including the VHD footer. This value should be between 20972032 (20 MiB + 512 bytes for the VHD footer) and 35183298347520 bytes (32 TiB + 512 bytes for the VHD footer). """ pulumi.set(__self__, "create_option", create_option) if gallery_image_reference is not None: pulumi.set(__self__, "gallery_image_reference", gallery_image_reference) if image_reference is not None: pulumi.set(__self__, "image_reference", image_reference) if logical_sector_size is not None: pulumi.set(__self__, "logical_sector_size", logical_sector_size) if source_resource_id is not None: pulumi.set(__self__, "source_resource_id", source_resource_id) if source_uri is not None: pulumi.set(__self__, "source_uri", source_uri) if storage_account_id is not None: pulumi.set(__self__, "storage_account_id", storage_account_id) if upload_size_bytes is not None: pulumi.set(__self__, "upload_size_bytes", upload_size_bytes) @property @pulumi.getter(name="createOption") def create_option(self) -> pulumi.Input[Union[str, 'DiskCreateOption']]: """ This enumerates the possible sources of a disk's creation. """ return pulumi.get(self, "create_option") @create_option.setter def create_option(self, value: pulumi.Input[Union[str, 'DiskCreateOption']]): pulumi.set(self, "create_option", value) @property @pulumi.getter(name="galleryImageReference") def gallery_image_reference(self) -> Optional[pulumi.Input['ImageDiskReferenceArgs']]: """ Required if creating from a Gallery Image. The id of the ImageDiskReference will be the ARM id of the shared galley image version from which to create a disk. """ return pulumi.get(self, "gallery_image_reference") @gallery_image_reference.setter def gallery_image_reference(self, value: Optional[pulumi.Input['ImageDiskReferenceArgs']]): pulumi.set(self, "gallery_image_reference", value) @property @pulumi.getter(name="imageReference") def image_reference(self) -> Optional[pulumi.Input['ImageDiskReferenceArgs']]: """ Disk source information. """ return pulumi.get(self, "image_reference") @image_reference.setter def image_reference(self, value: Optional[pulumi.Input['ImageDiskReferenceArgs']]): pulumi.set(self, "image_reference", value) @property @pulumi.getter(name="logicalSectorSize") def logical_sector_size(self) -> Optional[pulumi.Input[int]]: """ Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default. """ return pulumi.get(self, "logical_sector_size") @logical_sector_size.setter def logical_sector_size(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "logical_sector_size", value) @property @pulumi.getter(name="sourceResourceId") def source_resource_id(self) -> Optional[pulumi.Input[str]]: """ If createOption is Copy, this is the ARM id of the source snapshot or disk. """ return pulumi.get(self, "source_resource_id") @source_resource_id.setter def source_resource_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source_resource_id", value) @property @pulumi.getter(name="sourceUri") def source_uri(self) -> Optional[pulumi.Input[str]]: """ If createOption is Import, this is the URI of a blob to be imported into a managed disk. """ return pulumi.get(self, "source_uri") @source_uri.setter def source_uri(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source_uri", value) @property @pulumi.getter(name="storageAccountId") def storage_account_id(self) -> Optional[pulumi.Input[str]]: """ Required if createOption is Import. The Azure Resource Manager identifier of the storage account containing the blob to import as a disk. """ return pulumi.get(self, "storage_account_id") @storage_account_id.setter def storage_account_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "storage_account_id", value) @property @pulumi.getter(name="uploadSizeBytes") def upload_size_bytes(self) -> Optional[pulumi.Input[float]]: """ If createOption is Upload, this is the size of the contents of the upload including the VHD footer. This value should be between 20972032 (20 MiB + 512 bytes for the VHD footer) and 35183298347520 bytes (32 TiB + 512 bytes for the VHD footer). """ return pulumi.get(self, "upload_size_bytes") @upload_size_bytes.setter def upload_size_bytes(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "upload_size_bytes", value) @pulumi.input_type class DataDiskImageEncryptionArgs: def __init__(__self__, *, lun: pulumi.Input[int], disk_encryption_set_id: Optional[pulumi.Input[str]] = None): """ Contains encryption settings for a data disk image. :param pulumi.Input[int] lun: This property specifies the logical unit number of the data disk. This value is used to identify data disks within the Virtual Machine and therefore must be unique for each data disk attached to the Virtual Machine. :param pulumi.Input[str] disk_encryption_set_id: A relative URI containing the resource ID of the disk encryption set. """ pulumi.set(__self__, "lun", lun) if disk_encryption_set_id is not None: pulumi.set(__self__, "disk_encryption_set_id", disk_encryption_set_id) @property @pulumi.getter def lun(self) -> pulumi.Input[int]: """ This property specifies the logical unit number of the data disk. This value is used to identify data disks within the Virtual Machine and therefore must be unique for each data disk attached to the Virtual Machine. """ return pulumi.get(self, "lun") @lun.setter def lun(self, value: pulumi.Input[int]): pulumi.set(self, "lun", value) @property @pulumi.getter(name="diskEncryptionSetId") def disk_encryption_set_id(self) -> Optional[pulumi.Input[str]]: """ A relative URI containing the resource ID of the disk encryption set. """ return pulumi.get(self, "disk_encryption_set_id") @disk_encryption_set_id.setter def disk_encryption_set_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "disk_encryption_set_id", value) @pulumi.input_type class DisallowedArgs: def __init__(__self__, *, disk_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ Describes the disallowed disk types. :param pulumi.Input[Sequence[pulumi.Input[str]]] disk_types: A list of disk types. """ if disk_types is not None: pulumi.set(__self__, "disk_types", disk_types) @property @pulumi.getter(name="diskTypes") def disk_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of disk types. """ return pulumi.get(self, "disk_types") @disk_types.setter def disk_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "disk_types", value) @pulumi.input_type class DiskSkuArgs: def __init__(__self__, *, name: Optional[pulumi.Input[Union[str, 'DiskStorageAccountTypes']]] = None): """ The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or UltraSSD_LRS. :param pulumi.Input[Union[str, 'DiskStorageAccountTypes']] name: The sku name. """ if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[Union[str, 'DiskStorageAccountTypes']]]: """ The sku name. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[Union[str, 'DiskStorageAccountTypes']]]): pulumi.set(self, "name", value) @pulumi.input_type class EncryptionImagesArgs: def __init__(__self__, *, data_disk_images: Optional[pulumi.Input[Sequence[pulumi.Input['DataDiskImageEncryptionArgs']]]] = None, os_disk_image: Optional[pulumi.Input['OSDiskImageEncryptionArgs']] = None): """ Optional. Allows users to provide customer managed keys for encrypting the OS and data disks in the gallery artifact. :param pulumi.Input[Sequence[pulumi.Input['DataDiskImageEncryptionArgs']]] data_disk_images: A list of encryption specifications for data disk images. :param pulumi.Input['OSDiskImageEncryptionArgs'] os_disk_image: Contains encryption settings for an OS disk image. """ if data_disk_images is not None: pulumi.set(__self__, "data_disk_images", data_disk_images) if os_disk_image is not None: pulumi.set(__self__, "os_disk_image", os_disk_image) @property @pulumi.getter(name="dataDiskImages") def data_disk_images(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DataDiskImageEncryptionArgs']]]]: """ A list of encryption specifications for data disk images. """ return pulumi.get(self, "data_disk_images") @data_disk_images.setter def data_disk_images(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DataDiskImageEncryptionArgs']]]]): pulumi.set(self, "data_disk_images", value) @property @pulumi.getter(name="osDiskImage") def os_disk_image(self) -> Optional[pulumi.Input['OSDiskImageEncryptionArgs']]: """ Contains encryption settings for an OS disk image. """ return pulumi.get(self, "os_disk_image") @os_disk_image.setter def os_disk_image(self, value: Optional[pulumi.Input['OSDiskImageEncryptionArgs']]): pulumi.set(self, "os_disk_image", value) @pulumi.input_type class EncryptionSetIdentityArgs: def __init__(__self__, *, type: Optional[pulumi.Input[Union[str, 'DiskEncryptionSetIdentityType']]] = None): """ The managed identity for the disk encryption set. It should be given permission on the key vault before it can be used to encrypt disks. :param pulumi.Input[Union[str, 'DiskEncryptionSetIdentityType']] type: The type of Managed Identity used by the DiskEncryptionSet. Only SystemAssigned is supported for new creations. Disk Encryption Sets can be updated with Identity type None during migration of subscription to a new Azure Active Directory tenant; it will cause the encrypted resources to lose access to the keys. """ if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter def type(self) -> Optional[pulumi.Input[Union[str, 'DiskEncryptionSetIdentityType']]]: """ The type of Managed Identity used by the DiskEncryptionSet. Only SystemAssigned is supported for new creations. Disk Encryption Sets can be updated with Identity type None during migration of subscription to a new Azure Active Directory tenant; it will cause the encrypted resources to lose access to the keys. """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input[Union[str, 'DiskEncryptionSetIdentityType']]]): pulumi.set(self, "type", value) @pulumi.input_type class EncryptionSettingsCollectionArgs: def __init__(__self__, *, enabled: pulumi.Input[bool], encryption_settings: Optional[pulumi.Input[Sequence[pulumi.Input['EncryptionSettingsElementArgs']]]] = None, encryption_settings_version: Optional[pulumi.Input[str]] = None): """ Encryption settings for disk or snapshot :param pulumi.Input[bool] enabled: Set this flag to true and provide DiskEncryptionKey and optional KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object, the existing settings remain unchanged. :param pulumi.Input[Sequence[pulumi.Input['EncryptionSettingsElementArgs']]] encryption_settings: A collection of encryption settings, one for each disk volume. :param pulumi.Input[str] encryption_settings_version: Describes what type of encryption is used for the disks. Once this field is set, it cannot be overwritten. '1.0' corresponds to Azure Disk Encryption with AAD app.'1.1' corresponds to Azure Disk Encryption. """ pulumi.set(__self__, "enabled", enabled) if encryption_settings is not None: pulumi.set(__self__, "encryption_settings", encryption_settings) if encryption_settings_version is not None: pulumi.set(__self__, "encryption_settings_version", encryption_settings_version) @property @pulumi.getter def enabled(self) -> pulumi.Input[bool]: """ Set this flag to true and provide DiskEncryptionKey and optional KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object, the existing settings remain unchanged. """ return pulumi.get(self, "enabled") @enabled.setter def enabled(self, value: pulumi.Input[bool]): pulumi.set(self, "enabled", value) @property @pulumi.getter(name="encryptionSettings") def encryption_settings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EncryptionSettingsElementArgs']]]]: """ A collection of encryption settings, one for each disk volume. """ return pulumi.get(self, "encryption_settings") @encryption_settings.setter def encryption_settings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EncryptionSettingsElementArgs']]]]): pulumi.set(self, "encryption_settings", value) @property @pulumi.getter(name="encryptionSettingsVersion") def encryption_settings_version(self) -> Optional[pulumi.Input[str]]: """ Describes what type of encryption is used for the disks. Once this field is set, it cannot be overwritten. '1.0' corresponds to Azure Disk Encryption with AAD app.'1.1' corresponds to Azure Disk Encryption. """ return pulumi.get(self, "encryption_settings_version") @encryption_settings_version.setter def encryption_settings_version(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "encryption_settings_version", value) @pulumi.input_type class EncryptionSettingsElementArgs: def __init__(__self__, *, disk_encryption_key: Optional[pulumi.Input['KeyVaultAndSecretReferenceArgs']] = None, key_encryption_key: Optional[pulumi.Input['KeyVaultAndKeyReferenceArgs']] = None): """ Encryption settings for one disk volume. :param pulumi.Input['KeyVaultAndSecretReferenceArgs'] disk_encryption_key: Key Vault Secret Url and vault id of the disk encryption key :param pulumi.Input['KeyVaultAndKeyReferenceArgs'] key_encryption_key: Key Vault Key Url and vault id of the key encryption key. KeyEncryptionKey is optional and when provided is used to unwrap the disk encryption key. """ if disk_encryption_key is not None: pulumi.set(__self__, "disk_encryption_key", disk_encryption_key) if key_encryption_key is not None: pulumi.set(__self__, "key_encryption_key", key_encryption_key) @property @pulumi.getter(name="diskEncryptionKey") def disk_encryption_key(self) -> Optional[pulumi.Input['KeyVaultAndSecretReferenceArgs']]: """ Key Vault Secret Url and vault id of the disk encryption key """ return pulumi.get(self, "disk_encryption_key") @disk_encryption_key.setter def disk_encryption_key(self, value: Optional[pulumi.Input['KeyVaultAndSecretReferenceArgs']]): pulumi.set(self, "disk_encryption_key", value) @property @pulumi.getter(name="keyEncryptionKey") def key_encryption_key(self) -> Optional[pulumi.Input['KeyVaultAndKeyReferenceArgs']]: """ Key Vault Key Url and vault id of the key encryption key. KeyEncryptionKey is optional and when provided is used to unwrap the disk encryption key. """ return pulumi.get(self, "key_encryption_key") @key_encryption_key.setter def key_encryption_key(self, value: Optional[pulumi.Input['KeyVaultAndKeyReferenceArgs']]): pulumi.set(self, "key_encryption_key", value) @pulumi.input_type class EncryptionArgs: def __init__(__self__, *, disk_encryption_set_id: Optional[pulumi.Input[str]] = None, type: Optional[pulumi.Input[Union[str, 'EncryptionType']]] = None): """ Encryption at rest settings for disk or snapshot :param pulumi.Input[str] disk_encryption_set_id: ResourceId of the disk encryption set to use for enabling encryption at rest. :param pulumi.Input[Union[str, 'EncryptionType']] type: The type of key used to encrypt the data of the disk. """ if disk_encryption_set_id is not None: pulumi.set(__self__, "disk_encryption_set_id", disk_encryption_set_id) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter(name="diskEncryptionSetId") def disk_encryption_set_id(self) -> Optional[pulumi.Input[str]]: """ ResourceId of the disk encryption set to use for enabling encryption at rest. """ return pulumi.get(self, "disk_encryption_set_id") @disk_encryption_set_id.setter def disk_encryption_set_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "disk_encryption_set_id", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input[Union[str, 'EncryptionType']]]: """ The type of key used to encrypt the data of the disk. """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input[Union[str, 'EncryptionType']]]): pulumi.set(self, "type", value) @pulumi.input_type class ExtendedLocationArgs: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None, type: Optional[pulumi.Input[Union[str, 'ExtendedLocationTypes']]] = None): """ The complex type of the extended location. :param pulumi.Input[str] name: The name of the extended location. :param pulumi.Input[Union[str, 'ExtendedLocationTypes']] type: The type of the extended location. """ if name is not None: pulumi.set(__self__, "name", name) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name of the extended location. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input[Union[str, 'ExtendedLocationTypes']]]: """ The type of the extended location. """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input[Union[str, 'ExtendedLocationTypes']]]): pulumi.set(self, "type", value) @pulumi.input_type class GalleryApplicationVersionPublishingProfileArgs: def __init__(__self__, *, source: pulumi.Input['UserArtifactSourceArgs'], enable_health_check: Optional[pulumi.Input[bool]] = None, end_of_life_date: Optional[pulumi.Input[str]] = None, exclude_from_latest: Optional[pulumi.Input[bool]] = None, manage_actions: Optional[pulumi.Input['UserArtifactManageArgs']] = None, replica_count: Optional[pulumi.Input[int]] = None, storage_account_type: Optional[pulumi.Input[Union[str, 'StorageAccountType']]] = None, target_regions: Optional[pulumi.Input[Sequence[pulumi.Input['TargetRegionArgs']]]] = None): """ The publishing profile of a gallery image version. :param pulumi.Input['UserArtifactSourceArgs'] source: The source image from which the Image Version is going to be created. :param pulumi.Input[bool] enable_health_check: Optional. Whether or not this application reports health. :param pulumi.Input[str] end_of_life_date: The end of life date of the gallery image version. This property can be used for decommissioning purposes. This property is updatable. :param pulumi.Input[bool] exclude_from_latest: If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version. :param pulumi.Input[int] replica_count: The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable. :param pulumi.Input[Union[str, 'StorageAccountType']] storage_account_type: Specifies the storage account type to be used to store the image. This property is not updatable. :param pulumi.Input[Sequence[pulumi.Input['TargetRegionArgs']]] target_regions: The target regions where the Image Version is going to be replicated to. This property is updatable. """ pulumi.set(__self__, "source", source) if enable_health_check is not None: pulumi.set(__self__, "enable_health_check", enable_health_check) if end_of_life_date is not None: pulumi.set(__self__, "end_of_life_date", end_of_life_date) if exclude_from_latest is not None: pulumi.set(__self__, "exclude_from_latest", exclude_from_latest) if manage_actions is not None: pulumi.set(__self__, "manage_actions", manage_actions) if replica_count is not None: pulumi.set(__self__, "replica_count", replica_count) if storage_account_type is not None: pulumi.set(__self__, "storage_account_type", storage_account_type) if target_regions is not None: pulumi.set(__self__, "target_regions", target_regions) @property @pulumi.getter def source(self) -> pulumi.Input['UserArtifactSourceArgs']: """ The source image from which the Image Version is going to be created. """ return pulumi.get(self, "source") @source.setter def source(self, value: pulumi.Input['UserArtifactSourceArgs']): pulumi.set(self, "source", value) @property @pulumi.getter(name="enableHealthCheck") def enable_health_check(self) -> Optional[pulumi.Input[bool]]: """ Optional. Whether or not this application reports health. """ return pulumi.get(self, "enable_health_check") @enable_health_check.setter def enable_health_check(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_health_check", value) @property @pulumi.getter(name="endOfLifeDate") def end_of_life_date(self) -> Optional[pulumi.Input[str]]: """ The end of life date of the gallery image version. This property can be used for decommissioning purposes. This property is updatable. """ return pulumi.get(self, "end_of_life_date") @end_of_life_date.setter def end_of_life_date(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "end_of_life_date", value) @property @pulumi.getter(name="excludeFromLatest") def exclude_from_latest(self) -> Optional[pulumi.Input[bool]]: """ If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version. """ return pulumi.get(self, "exclude_from_latest") @exclude_from_latest.setter def exclude_from_latest(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "exclude_from_latest", value) @property @pulumi.getter(name="manageActions") def manage_actions(self) -> Optional[pulumi.Input['UserArtifactManageArgs']]: return pulumi.get(self, "manage_actions") @manage_actions.setter def manage_actions(self, value: Optional[pulumi.Input['UserArtifactManageArgs']]): pulumi.set(self, "manage_actions", value) @property @pulumi.getter(name="replicaCount") def replica_count(self) -> Optional[pulumi.Input[int]]: """ The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable. """ return pulumi.get(self, "replica_count") @replica_count.setter def replica_count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "replica_count", value) @property @pulumi.getter(name="storageAccountType") def storage_account_type(self) -> Optional[pulumi.Input[Union[str, 'StorageAccountType']]]: """ Specifies the storage account type to be used to store the image. This property is not updatable. """ return pulumi.get(self, "storage_account_type") @storage_account_type.setter def storage_account_type(self, value: Optional[pulumi.Input[Union[str, 'StorageAccountType']]]): pulumi.set(self, "storage_account_type", value) @property @pulumi.getter(name="targetRegions") def target_regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TargetRegionArgs']]]]: """ The target regions where the Image Version is going to be replicated to. This property is updatable. """ return pulumi.get(self, "target_regions") @target_regions.setter def target_regions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TargetRegionArgs']]]]): pulumi.set(self, "target_regions", value) @pulumi.input_type class GalleryArtifactVersionSourceArgs: def __init__(__self__, *, id: Optional[pulumi.Input[str]] = None, uri: Optional[pulumi.Input[str]] = None): """ The gallery artifact version source. :param pulumi.Input[str] id: The id of the gallery artifact version source. Can specify a disk uri, snapshot uri, user image or storage account resource. :param pulumi.Input[str] uri: The uri of the gallery artifact version source. Currently used to specify vhd/blob source. """ if id is not None: pulumi.set(__self__, "id", id) if uri is not None: pulumi.set(__self__, "uri", uri) @property @pulumi.getter def id(self) -> Optional[pulumi.Input[str]]: """ The id of the gallery artifact version source. Can specify a disk uri, snapshot uri, user image or storage account resource. """ return pulumi.get(self, "id") @id.setter def id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "id", value) @property @pulumi.getter def uri(self) -> Optional[pulumi.Input[str]]: """ The uri of the gallery artifact version source. Currently used to specify vhd/blob source. """ return pulumi.get(self, "uri") @uri.setter def uri(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "uri", value) @pulumi.input_type class GalleryDataDiskImageArgs: def __init__(__self__, *, lun: pulumi.Input[int], host_caching: Optional[pulumi.Input['HostCaching']] = None, source: Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']] = None): """ This is the data disk image. :param pulumi.Input[int] lun: This property specifies the logical unit number of the data disk. This value is used to identify data disks within the Virtual Machine and therefore must be unique for each data disk attached to the Virtual Machine. :param pulumi.Input['HostCaching'] host_caching: The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite' :param pulumi.Input['GalleryArtifactVersionSourceArgs'] source: The gallery artifact version source. """ pulumi.set(__self__, "lun", lun) if host_caching is not None: pulumi.set(__self__, "host_caching", host_caching) if source is not None: pulumi.set(__self__, "source", source) @property @pulumi.getter def lun(self) -> pulumi.Input[int]: """ This property specifies the logical unit number of the data disk. This value is used to identify data disks within the Virtual Machine and therefore must be unique for each data disk attached to the Virtual Machine. """ return pulumi.get(self, "lun") @lun.setter def lun(self, value: pulumi.Input[int]): pulumi.set(self, "lun", value) @property @pulumi.getter(name="hostCaching") def host_caching(self) -> Optional[pulumi.Input['HostCaching']]: """ The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite' """ return pulumi.get(self, "host_caching") @host_caching.setter def host_caching(self, value: Optional[pulumi.Input['HostCaching']]): pulumi.set(self, "host_caching", value) @property @pulumi.getter def source(self) -> Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']]: """ The gallery artifact version source. """ return pulumi.get(self, "source") @source.setter def source(self, value: Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']]): pulumi.set(self, "source", value) @pulumi.input_type class GalleryImageFeatureArgs: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None, value: Optional[pulumi.Input[str]] = None): """ A feature for gallery image. :param pulumi.Input[str] name: The name of the gallery image feature. :param pulumi.Input[str] value: The value of the gallery image feature. """ if name is not None: pulumi.set(__self__, "name", name) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name of the gallery image feature. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def value(self) -> Optional[pulumi.Input[str]]: """ The value of the gallery image feature. """ return pulumi.get(self, "value") @value.setter def value(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "value", value) @pulumi.input_type class GalleryImageIdentifierArgs: def __init__(__self__, *, offer: pulumi.Input[str], publisher: pulumi.Input[str], sku: pulumi.Input[str]): """ This is the gallery image definition identifier. :param pulumi.Input[str] offer: The name of the gallery image definition offer. :param pulumi.Input[str] publisher: The name of the gallery image definition publisher. :param pulumi.Input[str] sku: The name of the gallery image definition SKU. """ pulumi.set(__self__, "offer", offer) pulumi.set(__self__, "publisher", publisher) pulumi.set(__self__, "sku", sku) @property @pulumi.getter def offer(self) -> pulumi.Input[str]: """ The name of the gallery image definition offer. """ return pulumi.get(self, "offer") @offer.setter def offer(self, value: pulumi.Input[str]): pulumi.set(self, "offer", value) @property @pulumi.getter def publisher(self) -> pulumi.Input[str]: """ The name of the gallery image definition publisher. """ return pulumi.get(self, "publisher") @publisher.setter def publisher(self, value: pulumi.Input[str]): pulumi.set(self, "publisher", value) @property @pulumi.getter def sku(self) -> pulumi.Input[str]: """ The name of the gallery image definition SKU. """ return pulumi.get(self, "sku") @sku.setter def sku(self, value: pulumi.Input[str]): pulumi.set(self, "sku", value) @pulumi.input_type class GalleryImageVersionPublishingProfileArgs: def __init__(__self__, *, end_of_life_date: Optional[pulumi.Input[str]] = None, exclude_from_latest: Optional[pulumi.Input[bool]] = None, replica_count: Optional[pulumi.Input[int]] = None, storage_account_type: Optional[pulumi.Input[Union[str, 'StorageAccountType']]] = None, target_regions: Optional[pulumi.Input[Sequence[pulumi.Input['TargetRegionArgs']]]] = None): """ The publishing profile of a gallery image Version. :param pulumi.Input[str] end_of_life_date: The end of life date of the gallery image version. This property can be used for decommissioning purposes. This property is updatable. :param pulumi.Input[bool] exclude_from_latest: If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version. :param pulumi.Input[int] replica_count: The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable. :param pulumi.Input[Union[str, 'StorageAccountType']] storage_account_type: Specifies the storage account type to be used to store the image. This property is not updatable. :param pulumi.Input[Sequence[pulumi.Input['TargetRegionArgs']]] target_regions: The target regions where the Image Version is going to be replicated to. This property is updatable. """ if end_of_life_date is not None: pulumi.set(__self__, "end_of_life_date", end_of_life_date) if exclude_from_latest is not None: pulumi.set(__self__, "exclude_from_latest", exclude_from_latest) if replica_count is not None: pulumi.set(__self__, "replica_count", replica_count) if storage_account_type is not None: pulumi.set(__self__, "storage_account_type", storage_account_type) if target_regions is not None: pulumi.set(__self__, "target_regions", target_regions) @property @pulumi.getter(name="endOfLifeDate") def end_of_life_date(self) -> Optional[pulumi.Input[str]]: """ The end of life date of the gallery image version. This property can be used for decommissioning purposes. This property is updatable. """ return pulumi.get(self, "end_of_life_date") @end_of_life_date.setter def end_of_life_date(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "end_of_life_date", value) @property @pulumi.getter(name="excludeFromLatest") def exclude_from_latest(self) -> Optional[pulumi.Input[bool]]: """ If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version. """ return pulumi.get(self, "exclude_from_latest") @exclude_from_latest.setter def exclude_from_latest(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "exclude_from_latest", value) @property @pulumi.getter(name="replicaCount") def replica_count(self) -> Optional[pulumi.Input[int]]: """ The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable. """ return pulumi.get(self, "replica_count") @replica_count.setter def replica_count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "replica_count", value) @property @pulumi.getter(name="storageAccountType") def storage_account_type(self) -> Optional[pulumi.Input[Union[str, 'StorageAccountType']]]: """ Specifies the storage account type to be used to store the image. This property is not updatable. """ return pulumi.get(self, "storage_account_type") @storage_account_type.setter def storage_account_type(self, value: Optional[pulumi.Input[Union[str, 'StorageAccountType']]]): pulumi.set(self, "storage_account_type", value) @property @pulumi.getter(name="targetRegions") def target_regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TargetRegionArgs']]]]: """ The target regions where the Image Version is going to be replicated to. This property is updatable. """ return pulumi.get(self, "target_regions") @target_regions.setter def target_regions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TargetRegionArgs']]]]): pulumi.set(self, "target_regions", value) @pulumi.input_type class GalleryImageVersionStorageProfileArgs: def __init__(__self__, *, data_disk_images: Optional[pulumi.Input[Sequence[pulumi.Input['GalleryDataDiskImageArgs']]]] = None, os_disk_image: Optional[pulumi.Input['GalleryOSDiskImageArgs']] = None, source: Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']] = None): """ This is the storage profile of a Gallery Image Version. :param pulumi.Input[Sequence[pulumi.Input['GalleryDataDiskImageArgs']]] data_disk_images: A list of data disk images. :param pulumi.Input['GalleryOSDiskImageArgs'] os_disk_image: This is the OS disk image. :param pulumi.Input['GalleryArtifactVersionSourceArgs'] source: The gallery artifact version source. """ if data_disk_images is not None: pulumi.set(__self__, "data_disk_images", data_disk_images) if os_disk_image is not None: pulumi.set(__self__, "os_disk_image", os_disk_image) if source is not None: pulumi.set(__self__, "source", source) @property @pulumi.getter(name="dataDiskImages") def data_disk_images(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GalleryDataDiskImageArgs']]]]: """ A list of data disk images. """ return pulumi.get(self, "data_disk_images") @data_disk_images.setter def data_disk_images(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GalleryDataDiskImageArgs']]]]): pulumi.set(self, "data_disk_images", value) @property @pulumi.getter(name="osDiskImage") def os_disk_image(self) -> Optional[pulumi.Input['GalleryOSDiskImageArgs']]: """ This is the OS disk image. """ return pulumi.get(self, "os_disk_image") @os_disk_image.setter def os_disk_image(self, value: Optional[pulumi.Input['GalleryOSDiskImageArgs']]): pulumi.set(self, "os_disk_image", value) @property @pulumi.getter def source(self) -> Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']]: """ The gallery artifact version source. """ return pulumi.get(self, "source") @source.setter def source(self, value: Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']]): pulumi.set(self, "source", value) @pulumi.input_type class GalleryOSDiskImageArgs: def __init__(__self__, *, host_caching: Optional[pulumi.Input['HostCaching']] = None, source: Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']] = None): """ This is the OS disk image. :param pulumi.Input['HostCaching'] host_caching: The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite' :param pulumi.Input['GalleryArtifactVersionSourceArgs'] source: The gallery artifact version source. """ if host_caching is not None: pulumi.set(__self__, "host_caching", host_caching) if source is not None: pulumi.set(__self__, "source", source) @property @pulumi.getter(name="hostCaching") def host_caching(self) -> Optional[pulumi.Input['HostCaching']]: """ The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite' """ return pulumi.get(self, "host_caching") @host_caching.setter def host_caching(self, value: Optional[pulumi.Input['HostCaching']]): pulumi.set(self, "host_caching", value) @property @pulumi.getter def source(self) -> Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']]: """ The gallery artifact version source. """ return pulumi.get(self, "source") @source.setter def source(self, value: Optional[pulumi.Input['GalleryArtifactVersionSourceArgs']]): pulumi.set(self, "source", value) @pulumi.input_type class ImageDiskReferenceArgs: def __init__(__self__, *, id: pulumi.Input[str], lun: Optional[pulumi.Input[int]] = None): """ The source image used for creating the disk. :param pulumi.Input[str] id: A relative uri containing either a Platform Image Repository or user image reference. :param pulumi.Input[int] lun: If the disk is created from an image's data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null. """ pulumi.set(__self__, "id", id) if lun is not None: pulumi.set(__self__, "lun", lun) @property @pulumi.getter def id(self) -> pulumi.Input[str]: """ A relative uri containing either a Platform Image Repository or user image reference. """ return pulumi.get(self, "id") @id.setter def id(self, value: pulumi.Input[str]): pulumi.set(self, "id", value) @property @pulumi.getter def lun(self) -> Optional[pulumi.Input[int]]: """ If the disk is created from an image's data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null. """ return pulumi.get(self, "lun") @lun.setter def lun(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "lun", value) @pulumi.input_type class ImagePurchasePlanArgs: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None, product: Optional[pulumi.Input[str]] = None, publisher: Optional[pulumi.Input[str]] = None): """ Describes the gallery image definition purchase plan. This is used by marketplace images. :param pulumi.Input[str] name: The plan ID. :param pulumi.Input[str] product: The product ID. :param pulumi.Input[str] publisher: The publisher ID. """ if name is not None: pulumi.set(__self__, "name", name) if product is not None: pulumi.set(__self__, "product", product) if publisher is not None: pulumi.set(__self__, "publisher", publisher) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The plan ID. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def product(self) -> Optional[pulumi.Input[str]]: """ The product ID. """ return pulumi.get(self, "product") @product.setter def product(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "product", value) @property @pulumi.getter def publisher(self) -> Optional[pulumi.Input[str]]: """ The publisher ID. """ return pulumi.get(self, "publisher") @publisher.setter def publisher(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "publisher", value) @pulumi.input_type class KeyForDiskEncryptionSetArgs: def __init__(__self__, *, key_url: pulumi.Input[str], source_vault: Optional[pulumi.Input['SourceVaultArgs']] = None): """ Key Vault Key Url to be used for server side encryption of Managed Disks and Snapshots :param pulumi.Input[str] key_url: Fully versioned Key Url pointing to a key in KeyVault :param pulumi.Input['SourceVaultArgs'] source_vault: Resource id of the KeyVault containing the key or secret. This property is optional and cannot be used if the KeyVault subscription is not the same as the Disk Encryption Set subscription. """ pulumi.set(__self__, "key_url", key_url) if source_vault is not None: pulumi.set(__self__, "source_vault", source_vault) @property @pulumi.getter(name="keyUrl") def key_url(self) -> pulumi.Input[str]: """ Fully versioned Key Url pointing to a key in KeyVault """ return pulumi.get(self, "key_url") @key_url.setter def key_url(self, value: pulumi.Input[str]): pulumi.set(self, "key_url", value) @property @pulumi.getter(name="sourceVault") def source_vault(self) -> Optional[pulumi.Input['SourceVaultArgs']]: """ Resource id of the KeyVault containing the key or secret. This property is optional and cannot be used if the KeyVault subscription is not the same as the Disk Encryption Set subscription. """ return pulumi.get(self, "source_vault") @source_vault.setter def source_vault(self, value: Optional[pulumi.Input['SourceVaultArgs']]): pulumi.set(self, "source_vault", value) @pulumi.input_type class KeyVaultAndKeyReferenceArgs: def __init__(__self__, *, key_url: pulumi.Input[str], source_vault: pulumi.Input['SourceVaultArgs']): """ Key Vault Key Url and vault id of KeK, KeK is optional and when provided is used to unwrap the encryptionKey :param pulumi.Input[str] key_url: Url pointing to a key or secret in KeyVault :param pulumi.Input['SourceVaultArgs'] source_vault: Resource id of the KeyVault containing the key or secret """ pulumi.set(__self__, "key_url", key_url) pulumi.set(__self__, "source_vault", source_vault) @property @pulumi.getter(name="keyUrl") def key_url(self) -> pulumi.Input[str]: """ Url pointing to a key or secret in KeyVault """ return pulumi.get(self, "key_url") @key_url.setter def key_url(self, value: pulumi.Input[str]): pulumi.set(self, "key_url", value) @property @pulumi.getter(name="sourceVault") def source_vault(self) -> pulumi.Input['SourceVaultArgs']: """ Resource id of the KeyVault containing the key or secret """ return pulumi.get(self, "source_vault") @source_vault.setter def source_vault(self, value: pulumi.Input['SourceVaultArgs']): pulumi.set(self, "source_vault", value) @pulumi.input_type class KeyVaultAndSecretReferenceArgs: def __init__(__self__, *, secret_url: pulumi.Input[str], source_vault: pulumi.Input['SourceVaultArgs']): """ Key Vault Secret Url and vault id of the encryption key :param pulumi.Input[str] secret_url: Url pointing to a key or secret in KeyVault :param pulumi.Input['SourceVaultArgs'] source_vault: Resource id of the KeyVault containing the key or secret """ pulumi.set(__self__, "secret_url", secret_url) pulumi.set(__self__, "source_vault", source_vault) @property @pulumi.getter(name="secretUrl") def secret_url(self) -> pulumi.Input[str]: """ Url pointing to a key or secret in KeyVault """ return pulumi.get(self, "secret_url") @secret_url.setter def secret_url(self, value: pulumi.Input[str]): pulumi.set(self, "secret_url", value) @property @pulumi.getter(name="sourceVault") def source_vault(self) -> pulumi.Input['SourceVaultArgs']: """ Resource id of the KeyVault containing the key or secret """ return pulumi.get(self, "source_vault") @source_vault.setter def source_vault(self, value: pulumi.Input['SourceVaultArgs']): pulumi.set(self, "source_vault", value) @pulumi.input_type class OSDiskImageEncryptionArgs: def __init__(__self__, *, disk_encryption_set_id: Optional[pulumi.Input[str]] = None): """ Contains encryption settings for an OS disk image. :param pulumi.Input[str] disk_encryption_set_id: A relative URI containing the resource ID of the disk encryption set. """ if disk_encryption_set_id is not None: pulumi.set(__self__, "disk_encryption_set_id", disk_encryption_set_id) @property @pulumi.getter(name="diskEncryptionSetId") def disk_encryption_set_id(self) -> Optional[pulumi.Input[str]]: """ A relative URI containing the resource ID of the disk encryption set. """ return pulumi.get(self, "disk_encryption_set_id") @disk_encryption_set_id.setter def disk_encryption_set_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "disk_encryption_set_id", value) @pulumi.input_type class PrivateLinkServiceConnectionStateArgs: def __init__(__self__, *, actions_required: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, status: Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]] = None): """ A collection of information about the state of the connection between service consumer and provider. :param pulumi.Input[str] actions_required: A message indicating if changes on the service provider require any updates on the consumer. :param pulumi.Input[str] description: The reason for approval/rejection of the connection. :param pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']] status: Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service. """ if actions_required is not None: pulumi.set(__self__, "actions_required", actions_required) if description is not None: pulumi.set(__self__, "description", description) if status is not None: pulumi.set(__self__, "status", status) @property @pulumi.getter(name="actionsRequired") def actions_required(self) -> Optional[pulumi.Input[str]]: """ A message indicating if changes on the service provider require any updates on the consumer. """ return pulumi.get(self, "actions_required") @actions_required.setter def actions_required(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "actions_required", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ The reason for approval/rejection of the connection. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def status(self) -> Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]: """ Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service. """ return pulumi.get(self, "status") @status.setter def status(self, value: Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]): pulumi.set(self, "status", value) @pulumi.input_type class PurchasePlanArgs: def __init__(__self__, *, name: pulumi.Input[str], product: pulumi.Input[str], publisher: pulumi.Input[str], promotion_code: Optional[pulumi.Input[str]] = None): """ Used for establishing the purchase context of any 3rd Party artifact through MarketPlace. :param pulumi.Input[str] name: The plan ID. :param pulumi.Input[str] product: Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference element. :param pulumi.Input[str] publisher: The publisher ID. :param pulumi.Input[str] promotion_code: The Offer Promotion Code. """ pulumi.set(__self__, "name", name) pulumi.set(__self__, "product", product) pulumi.set(__self__, "publisher", publisher) if promotion_code is not None: pulumi.set(__self__, "promotion_code", promotion_code) @property @pulumi.getter def name(self) -> pulumi.Input[str]: """ The plan ID. """ return pulumi.get(self, "name") @name.setter def name(self, value: pulumi.Input[str]): pulumi.set(self, "name", value) @property @pulumi.getter def product(self) -> pulumi.Input[str]: """ Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference element. """ return pulumi.get(self, "product") @product.setter def product(self, value: pulumi.Input[str]): pulumi.set(self, "product", value) @property @pulumi.getter def publisher(self) -> pulumi.Input[str]: """ The publisher ID. """ return pulumi.get(self, "publisher") @publisher.setter def publisher(self, value: pulumi.Input[str]): pulumi.set(self, "publisher", value) @property @pulumi.getter(name="promotionCode") def promotion_code(self) -> Optional[pulumi.Input[str]]: """ The Offer Promotion Code. """ return pulumi.get(self, "promotion_code") @promotion_code.setter def promotion_code(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "promotion_code", value) @pulumi.input_type class RecommendedMachineConfigurationArgs: def __init__(__self__, *, memory: Optional[pulumi.Input['ResourceRangeArgs']] = None, v_cpus: Optional[pulumi.Input['ResourceRangeArgs']] = None): """ The properties describe the recommended machine configuration for this Image Definition. These properties are updatable. :param pulumi.Input['ResourceRangeArgs'] memory: Describes the resource range. :param pulumi.Input['ResourceRangeArgs'] v_cpus: Describes the resource range. """ if memory is not None: pulumi.set(__self__, "memory", memory) if v_cpus is not None: pulumi.set(__self__, "v_cpus", v_cpus) @property @pulumi.getter def memory(self) -> Optional[pulumi.Input['ResourceRangeArgs']]: """ Describes the resource range. """ return pulumi.get(self, "memory") @memory.setter def memory(self, value: Optional[pulumi.Input['ResourceRangeArgs']]): pulumi.set(self, "memory", value) @property @pulumi.getter(name="vCPUs") def v_cpus(self) -> Optional[pulumi.Input['ResourceRangeArgs']]: """ Describes the resource range. """ return pulumi.get(self, "v_cpus") @v_cpus.setter def v_cpus(self, value: Optional[pulumi.Input['ResourceRangeArgs']]): pulumi.set(self, "v_cpus", value) @pulumi.input_type class ResourceRangeArgs: def __init__(__self__, *, max: Optional[pulumi.Input[int]] = None, min: Optional[pulumi.Input[int]] = None): """ Describes the resource range. :param pulumi.Input[int] max: The maximum number of the resource. :param pulumi.Input[int] min: The minimum number of the resource. """ if max is not None: pulumi.set(__self__, "max", max) if min is not None: pulumi.set(__self__, "min", min) @property @pulumi.getter def max(self) -> Optional[pulumi.Input[int]]: """ The maximum number of the resource. """ return pulumi.get(self, "max") @max.setter def max(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max", value) @property @pulumi.getter def min(self) -> Optional[pulumi.Input[int]]: """ The minimum number of the resource. """ return pulumi.get(self, "min") @min.setter def min(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "min", value) @pulumi.input_type class SharingProfileArgs: def __init__(__self__, *, permissions: Optional[pulumi.Input[Union[str, 'GallerySharingPermissionTypes']]] = None): """ Profile for gallery sharing to subscription or tenant :param pulumi.Input[Union[str, 'GallerySharingPermissionTypes']] permissions: This property allows you to specify the permission of sharing gallery. <br><br> Possible values are: <br><br> **Private** <br><br> **Groups** """ if permissions is not None: pulumi.set(__self__, "permissions", permissions) @property @pulumi.getter def permissions(self) -> Optional[pulumi.Input[Union[str, 'GallerySharingPermissionTypes']]]: """ This property allows you to specify the permission of sharing gallery. <br><br> Possible values are: <br><br> **Private** <br><br> **Groups** """ return pulumi.get(self, "permissions") @permissions.setter def permissions(self, value: Optional[pulumi.Input[Union[str, 'GallerySharingPermissionTypes']]]): pulumi.set(self, "permissions", value) @pulumi.input_type class SnapshotSkuArgs: def __init__(__self__, *, name: Optional[pulumi.Input[Union[str, 'SnapshotStorageAccountTypes']]] = None): """ The snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS. This is an optional parameter for incremental snapshot and the default behavior is the SKU will be set to the same sku as the previous snapshot :param pulumi.Input[Union[str, 'SnapshotStorageAccountTypes']] name: The sku name. """ if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[Union[str, 'SnapshotStorageAccountTypes']]]: """ The sku name. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[Union[str, 'SnapshotStorageAccountTypes']]]): pulumi.set(self, "name", value) @pulumi.input_type class SourceVaultArgs: def __init__(__self__, *, id: Optional[pulumi.Input[str]] = None): """ The vault id is an Azure Resource Manager Resource id in the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName} :param pulumi.Input[str] id: Resource Id """ if id is not None: pulumi.set(__self__, "id", id) @property @pulumi.getter def id(self) -> Optional[pulumi.Input[str]]: """ Resource Id """ return pulumi.get(self, "id") @id.setter def id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "id", value) @pulumi.input_type class TargetRegionArgs: def __init__(__self__, *, name: pulumi.Input[str], encryption: Optional[pulumi.Input['EncryptionImagesArgs']] = None, regional_replica_count: Optional[pulumi.Input[int]] = None, storage_account_type: Optional[pulumi.Input[Union[str, 'StorageAccountType']]] = None): """ Describes the target region information. :param pulumi.Input[str] name: The name of the region. :param pulumi.Input['EncryptionImagesArgs'] encryption: Optional. Allows users to provide customer managed keys for encrypting the OS and data disks in the gallery artifact. :param pulumi.Input[int] regional_replica_count: The number of replicas of the Image Version to be created per region. This property is updatable. :param pulumi.Input[Union[str, 'StorageAccountType']] storage_account_type: Specifies the storage account type to be used to store the image. This property is not updatable. """ pulumi.set(__self__, "name", name) if encryption is not None: pulumi.set(__self__, "encryption", encryption) if regional_replica_count is not None: pulumi.set(__self__, "regional_replica_count", regional_replica_count) if storage_account_type is not None: pulumi.set(__self__, "storage_account_type", storage_account_type) @property @pulumi.getter def name(self) -> pulumi.Input[str]: """ The name of the region. """ return pulumi.get(self, "name") @name.setter def name(self, value: pulumi.Input[str]): pulumi.set(self, "name", value) @property @pulumi.getter def encryption(self) -> Optional[pulumi.Input['EncryptionImagesArgs']]: """ Optional. Allows users to provide customer managed keys for encrypting the OS and data disks in the gallery artifact. """ return pulumi.get(self, "encryption") @encryption.setter def encryption(self, value: Optional[pulumi.Input['EncryptionImagesArgs']]): pulumi.set(self, "encryption", value) @property @pulumi.getter(name="regionalReplicaCount") def regional_replica_count(self) -> Optional[pulumi.Input[int]]: """ The number of replicas of the Image Version to be created per region. This property is updatable. """ return pulumi.get(self, "regional_replica_count") @regional_replica_count.setter def regional_replica_count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "regional_replica_count", value) @property @pulumi.getter(name="storageAccountType") def storage_account_type(self) -> Optional[pulumi.Input[Union[str, 'StorageAccountType']]]: """ Specifies the storage account type to be used to store the image. This property is not updatable. """ return pulumi.get(self, "storage_account_type") @storage_account_type.setter def storage_account_type(self, value: Optional[pulumi.Input[Union[str, 'StorageAccountType']]]): pulumi.set(self, "storage_account_type", value) @pulumi.input_type class UserArtifactManageArgs: def __init__(__self__, *, install: pulumi.Input[str], remove: pulumi.Input[str], update: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] install: Required. The path and arguments to install the gallery application. This is limited to 4096 characters. :param pulumi.Input[str] remove: Required. The path and arguments to remove the gallery application. This is limited to 4096 characters. :param pulumi.Input[str] update: Optional. The path and arguments to update the gallery application. If not present, then update operation will invoke remove command on the previous version and install command on the current version of the gallery application. This is limited to 4096 characters. """ pulumi.set(__self__, "install", install) pulumi.set(__self__, "remove", remove) if update is not None: pulumi.set(__self__, "update", update) @property @pulumi.getter def install(self) -> pulumi.Input[str]: """ Required. The path and arguments to install the gallery application. This is limited to 4096 characters. """ return pulumi.get(self, "install") @install.setter def install(self, value: pulumi.Input[str]): pulumi.set(self, "install", value) @property @pulumi.getter def remove(self) -> pulumi.Input[str]: """ Required. The path and arguments to remove the gallery application. This is limited to 4096 characters. """ return pulumi.get(self, "remove") @remove.setter def remove(self, value: pulumi.Input[str]): pulumi.set(self, "remove", value) @property @pulumi.getter def update(self) -> Optional[pulumi.Input[str]]: """ Optional. The path and arguments to update the gallery application. If not present, then update operation will invoke remove command on the previous version and install command on the current version of the gallery application. This is limited to 4096 characters. """ return pulumi.get(self, "update") @update.setter def update(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "update", value) @pulumi.input_type class UserArtifactSourceArgs: def __init__(__self__, *, media_link: pulumi.Input[str], default_configuration_link: Optional[pulumi.Input[str]] = None): """ The source image from which the Image Version is going to be created. :param pulumi.Input[str] media_link: Required. The mediaLink of the artifact, must be a readable storage page blob. :param pulumi.Input[str] default_configuration_link: Optional. The defaultConfigurationLink of the artifact, must be a readable storage page blob. """ pulumi.set(__self__, "media_link", media_link) if default_configuration_link is not None: pulumi.set(__self__, "default_configuration_link", default_configuration_link) @property @pulumi.getter(name="mediaLink") def media_link(self) -> pulumi.Input[str]: """ Required. The mediaLink of the artifact, must be a readable storage page blob. """ return pulumi.get(self, "media_link") @media_link.setter def media_link(self, value: pulumi.Input[str]): pulumi.set(self, "media_link", value) @property @pulumi.getter(name="defaultConfigurationLink") def default_configuration_link(self) -> Optional[pulumi.Input[str]]: """ Optional. The defaultConfigurationLink of the artifact, must be a readable storage page blob. """ return pulumi.get(self, "default_configuration_link") @default_configuration_link.setter def default_configuration_link(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "default_configuration_link", value)
42.138708
389
0.668763
68,890
0.969081
0
0
69,536
0.978168
0
0
32,797
0.461358
f7cdc28f8dbf0a5fa40122f9a836204bf7e9435a
500
py
Python
bgp_adjacencies/BGP_check_job.py
KamyarZiabari/solutions_examples
3dfa80d276ab13d1e489142a3fcbe2bd8ab0eba2
[ "Apache-2.0" ]
59
2019-03-08T15:08:14.000Z
2021-12-23T15:59:03.000Z
bgp_adjacencies/BGP_check_job.py
CiscoTestAutomation/genie_solutions
69c96f57dce466bcd767bd1ea6326aaf6a63fbcf
[ "Apache-2.0" ]
8
2019-04-05T04:29:17.000Z
2021-04-12T15:37:51.000Z
bgp_adjacencies/BGP_check_job.py
CiscoTestAutomation/genie_solutions
69c96f57dce466bcd767bd1ea6326aaf6a63fbcf
[ "Apache-2.0" ]
37
2019-03-15T21:35:38.000Z
2022-03-22T01:49:59.000Z
# To run the job: # pyats run job BGP_check_job.py --testbed-file <testbed_file.yaml> # Description: This job file checks that all BGP neighbors are in Established state import os # All run() must be inside a main function def main(runtime): # Find the location of the script in relation to the job file bgp_tests = os.path.join(os.path.dirname(__file__), 'BGP_Neighbors_Established.py') # Execute the testscript runtime.tasks.run(testscript=bgp_tests)
38.461538
83
0.708
0
0
0
0
0
0
0
0
324
0.648
f7cddf9b0d9e1e72530d863ce9c077212cea7e97
858
py
Python
tvae/utils/logging.py
ReallyAnonNeurips2021/TopographicVAE
97ba47c039f7eab05ce9e17c3faea0a6ec86f1eb
[ "MIT" ]
57
2021-09-02T13:20:43.000Z
2022-03-17T18:35:55.000Z
tvae/utils/logging.py
ReallyAnonNeurips2021/TopographicVAE
97ba47c039f7eab05ce9e17c3faea0a6ec86f1eb
[ "MIT" ]
2
2021-09-07T13:06:40.000Z
2022-03-04T11:54:22.000Z
tvae/utils/logging.py
ReallyAnonNeurips2021/TopographicVAE
97ba47c039f7eab05ce9e17c3faea0a6ec86f1eb
[ "MIT" ]
8
2021-09-07T14:48:25.000Z
2022-03-12T05:44:32.000Z
import os def get_dirs(): cwd = os.path.dirname(os.path.realpath(__file__)) local_savedir = cwd local_datadir = cwd local_wandbdir = cwd return local_savedir, local_datadir, local_wandbdir def configure_logging(config, name, model): if config['wandb_on']: import wandb wandb.init(name=name, project='YOUR_PROJECT_NAME', entity='YOUR_ENTITY_NAME', dir=config['wandb_dir'], config=config) wandb.watch(model) def log(key, val): print(f"{key}: {val}") wandb.log({key: val}) checkpoint_path = os.path.join(wandb.run.dir, 'checkpoint.tar') else: def log(key, val): print(f"{key}: {val}") checkpoint_path = './checkpoint.tar' return log, checkpoint_path
24.514286
71
0.56993
0
0
0
0
0
0
0
0
122
0.142191
f7cde5f2b92aa7e388bad877341add7fc6bed0cb
521
py
Python
create_lesson_plan/admin.py
rishabhranawat/CrowdPlatform
1de2ad7e70fbf6cbf2e29bc9368341134b4f7e0d
[ "MIT" ]
1
2020-07-23T21:35:40.000Z
2020-07-23T21:35:40.000Z
create_lesson_plan/admin.py
rishabhranawat/CrowdPlatform
1de2ad7e70fbf6cbf2e29bc9368341134b4f7e0d
[ "MIT" ]
9
2021-02-08T20:32:35.000Z
2022-03-02T14:58:07.000Z
create_lesson_plan/admin.py
rishabhranawat/CrowdPlatform
1de2ad7e70fbf6cbf2e29bc9368341134b4f7e0d
[ "MIT" ]
null
null
null
from django.contrib import admin from create_lesson_plan.models import * admin.site.register(lesson) admin.site.register(lesson_plan) admin.site.register(Engage_Urls) admin.site.register(Explain_Urls) admin.site.register(Evaluate_Urls) admin.site.register(MCQ) admin.site.register(FITB) admin.site.register(Engage_Images) admin.site.register(Explain_Images) admin.site.register(Evaluate_Images) admin.site.register(Document) admin.site.register(Image) admin.site.register(TestScore) admin.site.register(OfflineDocument)
28.944444
39
0.84261
0
0
0
0
0
0
0
0
0
0
f7ce40df7d33d5f39e5868a59d46a085bed7cd64
3,408
py
Python
src/models/modules/visual_bert_classifier.py
inzva/emotion-recognition-drawings
56435f42d76c10c10fa58149ccbcc8d05efccdc0
[ "MIT" ]
10
2021-11-20T19:01:08.000Z
2022-01-16T09:06:12.000Z
src/models/modules/visual_bert_classifier.py
inzva/emotion-recognition-drawings
56435f42d76c10c10fa58149ccbcc8d05efccdc0
[ "MIT" ]
2
2021-12-11T12:28:03.000Z
2021-12-13T21:09:53.000Z
src/models/modules/visual_bert_classifier.py
inzva/emotion-recognition-drawings
56435f42d76c10c10fa58149ccbcc8d05efccdc0
[ "MIT" ]
null
null
null
import torch from torch import nn from transformers import BertTokenizer, VisualBertModel, VisualBertConfig import numpy as np class VisualBertClassifier(nn.Module): def __init__(self, visual_bert_model, num_classes: int = 8, initial_visual_embedding_dim: int = 96, final_dropout_rate: float = 0.1): """ pooler_output (torch.FloatTensor of shape (batch_size, hidden_size)) — Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. @param initial_visual_embedding_dim: """ super().__init__() self.visual_embedding_projection = nn.Linear(initial_visual_embedding_dim, 2048) self.visual_bert = visual_bert_model self.final_dropout = nn.Dropout(final_dropout_rate) self.out = nn.Linear(768, num_classes) def forward(self, text_input_ids, text_token_type_ids, text_attention_mask, visual_embeds, visual_token_type_ids, visual_attention_mask ): visual_embeds = self.visual_embedding_projection(visual_embeds) output = self.visual_bert(input_ids=text_input_ids, token_type_ids=text_token_type_ids, attention_mask=text_attention_mask, visual_embeds=visual_embeds, visual_token_type_ids=visual_token_type_ids, visual_attention_mask=visual_attention_mask) output = self.final_dropout(output.pooler_output) output = self.out(output) return output if __name__ == '__main__': bert_text_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") inputs = bert_text_tokenizer("What is the man eating?", return_tensors="pt") text_input_ids = inputs.data['input_ids'].to('cuda') text_token_type_ids = inputs.data['token_type_ids'].to('cuda') text_attention_mask = inputs.data['attention_mask'].to('cuda') sample_face_body_embedding_path = "/home/gsoykan20/Desktop/self_development/emotion-recognition-drawings/data/emoreccom_face_body_embeddings_96d/train/0_3_4.jpg.npy" sample_face_body_embedding = np.load(sample_face_body_embedding_path) visual_embeds = torch.from_numpy(sample_face_body_embedding) visual_embeds = visual_embeds.to('cuda') visual_embeds = torch.unsqueeze(visual_embeds, 0) visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long).to('cuda') visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float).to('cuda') classifier = VisualBertClassifier() classifier.to('cuda') classifier.forward(text_input_ids, text_token_type_ids, text_attention_mask, visual_embeds, visual_token_type_ids, visual_attention_mask)
46.054054
169
0.667254
1,978
0.580059
0
0
0
0
0
0
893
0.261877
f7cfecaa2797756809c5e754e4b6bf4f05823087
1,006
py
Python
narrative2vec/logging_instance/pose.py
code-iai/narrative2vec
948071d09838ea41ee9749325af6804427a060d2
[ "MIT" ]
null
null
null
narrative2vec/logging_instance/pose.py
code-iai/narrative2vec
948071d09838ea41ee9749325af6804427a060d2
[ "MIT" ]
null
null
null
narrative2vec/logging_instance/pose.py
code-iai/narrative2vec
948071d09838ea41ee9749325af6804427a060d2
[ "MIT" ]
null
null
null
from narrative2vec.logging_instance.logging_instance import LoggingInstance, _get_first_rdf_query_result from narrative2vec.logging_instance.reasoning_task import ReasoningTask from narrative2vec.ontology.neemNarrativeDefinitions import QUATERNION from narrative2vec.ontology.ontologyHandler import get_knowrob_uri class Pose(LoggingInstance): def get_translation(self): read_translation = self._get_property_('translation') return read_translation.strip().split() def get_quaternion(self): read_orientation = self._get_property_(QUATERNION) return read_orientation.strip().split() def get_reasoning_task__id(self): reasoning_task_property = self._graph_.subjects(get_knowrob_uri('parameter2'), self.context) reasoning_task = _get_first_rdf_query_result(reasoning_task_property) if reasoning_task and not reasoning_task.startswith('file://'): return ReasoningTask(reasoning_task, self._graph_).get_id() return ''
43.73913
104
0.781312
689
0.684891
0
0
0
0
0
0
36
0.035785
f7d0423ade6b86198698a9b5f2ef5a03964e0231
288
py
Python
kobra/settings/development.py
karservice/kobra
2019fd3be499c06d2527e80576fd6ff03d8fe151
[ "MIT" ]
4
2016-08-28T16:00:20.000Z
2018-01-31T18:22:43.000Z
kobra/settings/development.py
karservice/kobra
2019fd3be499c06d2527e80576fd6ff03d8fe151
[ "MIT" ]
25
2016-08-15T20:57:59.000Z
2022-02-10T18:14:48.000Z
kobra/settings/development.py
karservice/kobra
2019fd3be499c06d2527e80576fd6ff03d8fe151
[ "MIT" ]
1
2017-02-06T17:13:16.000Z
2017-02-06T17:13:16.000Z
# -*- coding: utf-8 -*- from . import * SECRET_KEY = env.str('KOBRA_SECRET_KEY', 'Unsafe_development_key._Never_use_in_production.') DEBUG = env.bool('KOBRA_DEBUG_MODE', True) DATABASES = { 'default': env.db_url('KOBRA_DATABASE_URL', 'sqlite:///db.sqlite3') }
24
72
0.652778
0
0
0
0
0
0
0
0
160
0.555556
f7d06f7dd5791848e16c5019b980180600add19a
4,153
py
Python
foobot_grapher.py
jpwright/foobot-slack
ffc1cf8490d08433d76bb62cbf7440c765089784
[ "MIT" ]
1
2018-02-17T14:29:41.000Z
2018-02-17T14:29:41.000Z
foobot_grapher.py
jpwright/foobot-slack
ffc1cf8490d08433d76bb62cbf7440c765089784
[ "MIT" ]
null
null
null
foobot_grapher.py
jpwright/foobot-slack
ffc1cf8490d08433d76bb62cbf7440c765089784
[ "MIT" ]
null
null
null
#!/usr/bin/env python from pyfoobot import Foobot import requests import matplotlib matplotlib.use('Agg') import matplotlib.dates import matplotlib.pyplot import datetime from imgurpython import ImgurClient import ConfigParser def getSensorReadings(notify): config = ConfigParser.ConfigParser() config.read("config.txt") settings = { 'foobot_api_key': '', 'foobot_email': '', 'foobot_password': '', 'imgur_id': '', 'imgur_secret': '', 'slack_webhook': '', 'averaging_period': 15, 'periods_to_graph': 12, 'threshold_pm': 25.0, 'threshold_temperature': 26.5, 'threshold_humidity': 60.0, 'threshold_co2': 30000.0, 'threshold_tvoc': 500.0 } for settings_key in settings: try: value_to_set = config.get('default', settings_key) settings[settings_key] = value_to_set except: pass imgur_supported = False if (len(settings['imgur_id']) > 0 and len(settings['imgur_secret']) > 0): imgur_supported = True imgur = ImgurClient(settings['imgur_id'], settings['imgur_secret']) fb = Foobot(settings['foobot_api_key'], settings['foobot_email'], settings['foobot_password']) devices = fb.devices() device = devices[0] measurement_interval = 60*(int(settings['averaging_period']) * int(settings['periods_to_graph'])) data = device.data_period(measurement_interval, 0) alerts = [] labels = ["PM2.5", "Temperature", "Humidity", "CO2", "tVOC"] units = ["ug/m3", "C", "%", "ppm", "ppb"] max_vals = [0, 0, 0, 0, 0] sums = [0, 0, 0, 0, 0] datapoints = [[], [], [], [], []] timeseries = [] thresholds = [ float(settings['threshold_pm']), float(settings['threshold_temperature']), float(settings['threshold_humidity']), float(settings['threshold_co2']), float(settings['threshold_tvoc']) ] num_averaging_samples = int(len(data['datapoints']) / int(settings['periods_to_graph'])) for i in range(0, len(data['datapoints'])): datapoint = data['datapoints'][i] time = datapoint[0] pm = datapoint[1] tmp = datapoint[2] hum = datapoint[3] co2 = datapoint[4] voc = datapoint[5] allpollu = datapoint[6] for j in range(0, 5): datapoints[j].append(datapoint[j+1]) if (i >= (len(data['datapoints']) - num_averaging_samples)): sums[j] += datapoint[j+1] if datapoint[j] > max_vals[j]: max_vals[j] = datapoint[j+1] timeseries.append(datetime.datetime.fromtimestamp(time)) hours = matplotlib.dates.HourLocator() minutes = matplotlib.dates.MinuteLocator(interval = 10) hoursFmt = matplotlib.dates.DateFormatter('%-I:%M') if notify: for i in range(0, 5): sums[i] = sums[i] / num_averaging_samples if sums[i] > thresholds[i]: print("Sending alert for "+labels[i]) fig, ax = matplotlib.pyplot.subplots() ax.plot(timeseries, datapoints[i]) ax.xaxis.set_major_locator(hours) ax.xaxis.set_major_formatter(hoursFmt) ax.grid(True) matplotlib.pyplot.xlabel("Time") matplotlib.pyplot.ylabel(labels[i] + " ("+units[i]+")") fig.autofmt_xdate() matplotlib.pyplot.savefig("figure.png") if imgur_supported: image = imgur.upload_from_path("figure.png", anon=True) else: image = {"link": "http://imgur.not.supported.com/alter_your_config.txt"} payload = '{"text": "Warning: '+labels[i]+' levels at '+"{0:.2f}".format(sums[i])+' '+units[i]+'.", "attachments": [{"fallback": "Graph.", "image_url": "'+image["link"]+'"}]}' r = requests.post("https://hooks.slack.com/services/"+settings['slack_webhook'], data={"payload": payload}) else: fig, axarr = matplotlib.pyplot.subplots(1,5) for i in range(0, 5): ax = axarr[i] ax.plot(timeseries, datapoints[i]) ax.xaxis.set_major_locator(hours) ax.xaxis.set_major_formatter(hoursFmt) ax.grid(True) ax.set_xlabel("Time") ax.set_title(labels[i] + " ("+units[i]+")") fig.autofmt_xdate() fig.set_size_inches(18, 4) matplotlib.pyplot.savefig("figure.png", bbox_inches='tight') if (imgur_supported): image = imgur.upload_from_path("figure.png", anon=True) else: image = {"link": "http://imgur.not.supported.com/alter_your_config.txt"} return image["link"] if __name__ == "__main__": getSensorReadings(True)
27.503311
179
0.675415
0
0
0
0
0
0
0
0
1,039
0.250181
f7d2351d64f6c5df1c1015aaa80a18aa25236a08
239
py
Python
safexl/__init__.py
ThePoetCoder/safexl
d2fb91ad45d33b6f51946e99c78e7fcf7564e82e
[ "MIT" ]
6
2020-08-28T16:00:28.000Z
2022-01-17T14:48:04.000Z
safexl/__init__.py
ThePoetCoder/safexl
d2fb91ad45d33b6f51946e99c78e7fcf7564e82e
[ "MIT" ]
null
null
null
safexl/__init__.py
ThePoetCoder/safexl
d2fb91ad45d33b6f51946e99c78e7fcf7564e82e
[ "MIT" ]
null
null
null
# Copyright (c) 2020 safexl from safexl.toolkit import * import safexl.xl_constants as xl_constants import safexl.colors as colors __author__ = "Eric Smith" __email__ = "ThePoetCoder@gmail.com" __license__ = "MIT" __version__ = "0.0.7"
19.916667
42
0.76569
0
0
0
0
0
0
0
0
75
0.313808
f7d2cd873463ee3cda95ca64c29e31dbdad2cad2
2,989
py
Python
musicdb/restapi/migrations/0001_initial.py
alexebaker/django-music_database
cffa2574d894509b0eec7c71bd821cc0fd2f2cf7
[ "MIT" ]
null
null
null
musicdb/restapi/migrations/0001_initial.py
alexebaker/django-music_database
cffa2574d894509b0eec7c71bd821cc0fd2f2cf7
[ "MIT" ]
7
2020-06-05T18:23:50.000Z
2022-03-11T23:24:27.000Z
musicdb/restapi/migrations/0001_initial.py
alexebaker/django-music_database
cffa2574d894509b0eec7c71bd821cc0fd2f2cf7
[ "MIT" ]
null
null
null
# Generated by Django 2.0.4 on 2018-05-01 05:22 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Album', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=100)), ('year', models.CharField(max_length=4)), ], ), migrations.CreateModel( name='AlbumArt', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('image', models.ImageField(upload_to='album_art')), ('album', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='album_art', to='restapi.Album')), ], ), migrations.CreateModel( name='Artist', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=200)), ], ), migrations.CreateModel( name='Genre', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ], ), migrations.CreateModel( name='Style', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ], ), migrations.CreateModel( name='Track', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=100)), ('duration', models.TimeField()), ('position', models.CharField(max_length=3)), ('album', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tracks', to='restapi.Album')), ], ), migrations.AddField( model_name='album', name='artist', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='albums', to='restapi.Artist'), ), migrations.AddField( model_name='album', name='genres', field=models.ManyToManyField(blank=True, related_name='albums', to='restapi.Genre'), ), migrations.AddField( model_name='album', name='styles', field=models.ManyToManyField(blank=True, related_name='albums', to='restapi.Style'), ), ]
38.320513
136
0.556708
2,863
0.957845
0
0
0
0
0
0
395
0.132151
f7d39269257b5bc266bf53edfc897cb41af5201f
402
py
Python
ballot_source/sources/migrations/0004_auto_20200824_1444.py
Ballot-Drop/ballot-source
5dd9692ca5e9237a6073833a81771a17ad2c1dc9
[ "MIT" ]
3
2020-09-05T06:02:08.000Z
2020-09-28T23:44:05.000Z
ballot_source/sources/migrations/0004_auto_20200824_1444.py
Ballot-Drop/ballot-source
5dd9692ca5e9237a6073833a81771a17ad2c1dc9
[ "MIT" ]
18
2020-08-28T18:09:54.000Z
2020-09-19T17:36:08.000Z
ballot_source/sources/migrations/0004_auto_20200824_1444.py
Ballot-Drop/ballot-source
5dd9692ca5e9237a6073833a81771a17ad2c1dc9
[ "MIT" ]
null
null
null
# Generated by Django 3.0.9 on 2020-08-24 20:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('sources', '0003_sourcedetail_last_pull'), ] operations = [ migrations.AlterField( model_name='sourcedetail', name='diff', field=models.TextField(blank=True, null=True), ), ]
21.157895
58
0.606965
309
0.768657
0
0
0
0
0
0
105
0.261194
f7d411b7a1e10f51b58ab6692c180f5bbcd91a28
2,007
py
Python
src/tests/Yi/tests/inner_product_between_lobatto_and_gauss.py
Idate96/Mimetic-Fem
75ad3b982ef7ed7c6198f526d19dc460dec28f4d
[ "MIT" ]
null
null
null
src/tests/Yi/tests/inner_product_between_lobatto_and_gauss.py
Idate96/Mimetic-Fem
75ad3b982ef7ed7c6198f526d19dc460dec28f4d
[ "MIT" ]
null
null
null
src/tests/Yi/tests/inner_product_between_lobatto_and_gauss.py
Idate96/Mimetic-Fem
75ad3b982ef7ed7c6198f526d19dc460dec28f4d
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ (SHORT NAME EXPLANATION) >>>DOCTEST COMMANDS (THE TEST ANSWER) @author: Yi Zhang. Created on Mon Jul 10 20:12:27 2017 Department of Aerodynamics Faculty of Aerospace Engineering TU Delft #SUMMARY---------------- #INPUTS----------------- #ESSENTIAL: #OPTIONAL: #OUTPUTS---------------- #EXAMPLES--------------- #NOTES------------------ """ # -*- coding: utf-8 -*- """ (SHORT NAME EXPLANATION) >>>DOCTEST COMMANDS (THE TEST ANSWER) @author: Yi Zhang (张仪). Created on Thu Jul 6 16:00:33 2017 Department of Aerodynamics Faculty of Aerospace Engineering TU Delft #SUMMARY---------------- #INPUTS----------------- #ESSENTIAL: #OPTIONAL: #OUTPUTS---------------- #EXAMPLES--------------- #NOTES------------------ """ from function_space import FunctionSpace import numpy as np from mesh import CrazyMesh from forms import Form from hodge import hodge from coboundaries import d from assemble import assemble from _assembling import assemble_, integral1d_ import matplotlib.pyplot as plt from quadrature import extended_gauss_quad from scipy.integrate import quad from sympy import Matrix import scipy.io from scipy import sparse import scipy as sp from inner_product import inner # %% exact solution define # u^{(1)} = { u, v }^T def u(x,y): return +np.cos(np.pi*x) * np.sin(np.pi*y) def v(x,y): return -np.sin(np.pi*x) * np.cos(np.pi*y) def r_u(x,y): return -2* np.pi**2 * np.cos(np.pi*x) * np.sin(np.pi*y) def r_v(x,y): return 2* np.pi**2 * np.sin(np.pi*x) * np.cos(np.pi*y) # %% define the mesh mesh = CrazyMesh( 2, (2, 2), ((-1, 1), (-1, 1)), 0.05 ) func_space_gauss1 = FunctionSpace(mesh, '1-gauss', (5, 5), is_inner=False) func_space_lobatto1 = FunctionSpace(mesh, '1-lobatto', (5, 5), is_inner=False) form_1_gauss = Form(func_space_gauss1) form_1_lobatto = Form(func_space_lobatto1) M = inner(form_1_lobatto.basis,form_1_gauss.basis)
22.3
78
0.619332
0
0
0
0
0
0
0
0
944
0.468486
f7d511ad2e6640e470287dff8220becb4fa1880a
1,871
py
Python
src/quality_control/bin/createSpotDetectionQCHTML.py
WoutDavid/ST-nextflow-pipeline
8de3da218ec4f10f183e1163fe782c19fd8dd841
[ "MIT" ]
null
null
null
src/quality_control/bin/createSpotDetectionQCHTML.py
WoutDavid/ST-nextflow-pipeline
8de3da218ec4f10f183e1163fe782c19fd8dd841
[ "MIT" ]
null
null
null
src/quality_control/bin/createSpotDetectionQCHTML.py
WoutDavid/ST-nextflow-pipeline
8de3da218ec4f10f183e1163fe782c19fd8dd841
[ "MIT" ]
null
null
null
import json from bs4 import BeautifulSoup import pandas as pd import sys # Argparsing argument_index = 1 template = sys.argv[argument_index] argument_index +=1 recall_json = sys.argv[argument_index] argument_index +=1 recall_plot = sys.argv[argument_index] argument_index +=1 precision_jsons_list = [sys.argv[i] for i in range(argument_index, len(sys.argv))] precision_rows_list = [] # convert jsons back to dicts for html conversion for json_path in precision_jsons_list: with open(json_path, 'r') as json_file: data = json.load(json_file) precision_rows_list.append(data) precision_df = pd.DataFrame(precision_rows_list) precision_df = precision_df.sort_values(by='Round #') precision_html_table = precision_df.to_html(index=False) # Same for recall json recall_rows_list = [] with open(recall_json, 'r') as json_file: data=json.load(json_file) recall_rows_list.append(data) recall_df = pd.DataFrame(recall_rows_list) recall_html_table = recall_df.to_html(index=False) # Create html with open(template, 'r') as template_file: contents = template_file.read() template_soup = BeautifulSoup(contents, features="html.parser") p_list = template_soup.find_all('p') p_index = 0 # Read recall table tag recall_soup = BeautifulSoup(recall_html_table, features="html.parser") table_tag = recall_soup.find('table') p_list[p_index].insert_after(table_tag) p_index+=1 image_tag = template_soup.new_tag('img') image_tag['src']= f"./recall/{recall_plot}" image_tag['width']= 700 image_tag['height']= 500 p_list[p_index].insert_after(image_tag) p_index+=1 precision_soup = BeautifulSoup(precision_html_table, features="html.parser") table_tag = precision_soup.find('table') p_list[p_index].insert_after(table_tag) p_index+=1 with open('spot_detection_qc_report.html', 'w') as result_file: result_file.write(str( template_soup ))
27.115942
82
0.772314
0
0
0
0
0
0
0
0
277
0.148049
f7d56596394f7bfd79f8b0a1466fae7aaa135fac
2,104
py
Python
test/torch/mpc/test_fss.py
NicoSerranoP/PySyft
87fcd566c46fce4c16d363c94396dd26bd82a016
[ "Apache-2.0" ]
3
2020-11-24T05:15:57.000Z
2020-12-07T09:52:45.000Z
test/torch/mpc/test_fss.py
NicoSerranoP/PySyft
87fcd566c46fce4c16d363c94396dd26bd82a016
[ "Apache-2.0" ]
1
2020-09-29T00:24:31.000Z
2020-09-29T00:24:31.000Z
test/torch/mpc/test_fss.py
NicoSerranoP/PySyft
87fcd566c46fce4c16d363c94396dd26bd82a016
[ "Apache-2.0" ]
1
2021-09-04T16:27:41.000Z
2021-09-04T16:27:41.000Z
import pytest import torch as th from syft.frameworks.torch.mpc.fss import DPF, DIF, n @pytest.mark.parametrize("op", ["eq", "le"]) def test_fss_class(op): class_ = {"eq": DPF, "le": DIF}[op] th_op = {"eq": th.eq, "le": th.le}[op] gather_op = {"eq": "__add__", "le": "__xor__"}[op] # single value primitive = class_.keygen(n_values=1) alpha, s_00, s_01, *CW = primitive mask = th.randint(0, 2 ** n, alpha.shape) k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)] x = th.tensor([0]) x_masked = x + k0[0] + k1[0] y0 = class_.eval(0, x_masked, *k0[1:]) y1 = class_.eval(1, x_masked, *k1[1:]) assert (getattr(y0, gather_op)(y1) == th_op(x, 0)).all() # 1D tensor primitive = class_.keygen(n_values=3) alpha, s_00, s_01, *CW = primitive mask = th.randint(0, 2 ** n, alpha.shape) k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)] x = th.tensor([0, 2, -2]) x_masked = x + k0[0] + k1[0] y0 = class_.eval(0, x_masked, *k0[1:]) y1 = class_.eval(1, x_masked, *k1[1:]) assert (getattr(y0, gather_op)(y1) == th_op(x, 0)).all() # 2D tensor primitive = class_.keygen(n_values=4) alpha, s_00, s_01, *CW = primitive mask = th.randint(0, 2 ** n, alpha.shape) k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)] x = th.tensor([[0, 2], [-2, 0]]) x_masked = x + k0[0].reshape(x.shape) + k1[0].reshape(x.shape) y0 = class_.eval(0, x_masked, *k0[1:]) y1 = class_.eval(1, x_masked, *k1[1:]) assert (getattr(y0, gather_op)(y1) == th_op(x, 0)).all() # 3D tensor primitive = class_.keygen(n_values=8) alpha, s_00, s_01, *CW = primitive mask = th.randint(0, 2 ** n, alpha.shape) k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)] x = th.tensor([[[0, 2], [-2, 0]], [[0, 2], [-2, 0]]]) x_masked = x + k0[0].reshape(x.shape) + k1[0].reshape(x.shape) y0 = class_.eval(0, x_masked, *k0[1:]) y1 = class_.eval(1, x_masked, *k1[1:]) assert (getattr(y0, gather_op)(y1) == th_op(x, 0)).all()
32.875
70
0.551331
0
0
0
0
2,013
0.956749
0
0
101
0.048004
f7d62d0a50f28ea90ec1747700a205b806ed75b7
2,684
py
Python
allennlp/tests/data/tokenizers/pretrained_transformer_tokenizer_test.py
donna-legal/allennlp
fd1e3cfaed07ec3ba03b922d12eee47f8be16837
[ "Apache-2.0" ]
1
2020-01-28T07:52:28.000Z
2020-01-28T07:52:28.000Z
allennlp/tests/data/tokenizers/pretrained_transformer_tokenizer_test.py
donna-legal/allennlp
fd1e3cfaed07ec3ba03b922d12eee47f8be16837
[ "Apache-2.0" ]
null
null
null
allennlp/tests/data/tokenizers/pretrained_transformer_tokenizer_test.py
donna-legal/allennlp
fd1e3cfaed07ec3ba03b922d12eee47f8be16837
[ "Apache-2.0" ]
null
null
null
from allennlp.common.testing import AllenNlpTestCase from allennlp.data.tokenizers import PretrainedTransformerTokenizer class TestPretrainedTransformerTokenizer(AllenNlpTestCase): def test_splits_roberta(self): tokenizer = PretrainedTransformerTokenizer("roberta-base") sentence = "A, <mask> AllenNLP sentence." expected_tokens = ["<s>", "A", ",", "<mask>", "Allen", "N", "LP", "Ġsentence", ".", "</s>"] tokens = [t.text for t in tokenizer.tokenize(sentence)] assert tokens == expected_tokens # sentence pair sentence_1 = "A, <mask> AllenNLP sentence." sentence_2 = "A sentence." expected_tokens = [ "<s>", "A", ",", "<mask>", "Allen", "N", "LP", "Ġsentence", ".", "</s>", "</s>", "A", "Ġsentence", ".", "</s>", ] tokens = [t.text for t in tokenizer.tokenize_sentence_pair(sentence_1, sentence_2)] assert tokens == expected_tokens def test_splits_cased_bert(self): tokenizer = PretrainedTransformerTokenizer("bert-base-cased") sentence = "A, [MASK] AllenNLP sentence." expected_tokens = [ "[CLS]", "A", ",", "[MASK]", "Allen", "##NL", "##P", "sentence", ".", "[SEP]", ] tokens = [t.text for t in tokenizer.tokenize(sentence)] assert tokens == expected_tokens # sentence pair sentence_1 = "A, [MASK] AllenNLP sentence." sentence_2 = "A sentence." expected_tokens = [ "[CLS]", "A", ",", "[MASK]", "Allen", "##NL", "##P", "sentence", ".", "[SEP]", "A", "sentence", ".", "[SEP]", ] tokens = [t.text for t in tokenizer.tokenize_sentence_pair(sentence_1, sentence_2)] assert tokens == expected_tokens def test_splits_uncased_bert(self): sentence = "A, [MASK] AllenNLP sentence." expected_tokens = [ "[CLS]", "a", ",", "[MASK]", "allen", "##nl", "##p", "sentence", ".", "[SEP]", ] tokenizer = PretrainedTransformerTokenizer("bert-base-uncased") tokens = [t.text for t in tokenizer.tokenize(sentence)] assert tokens == expected_tokens
28.252632
99
0.462742
2,563
0.953852
0
0
0
0
0
0
594
0.221064
f7d6ae2f3cb3eec3b7e8a4d67b500afb529fc556
2,928
py
Python
openmdao/api.py
ryanfarr01/blue
a9aac98c09cce0f7cadf26cf592e3d978bf4e3ff
[ "Apache-2.0" ]
null
null
null
openmdao/api.py
ryanfarr01/blue
a9aac98c09cce0f7cadf26cf592e3d978bf4e3ff
[ "Apache-2.0" ]
null
null
null
openmdao/api.py
ryanfarr01/blue
a9aac98c09cce0f7cadf26cf592e3d978bf4e3ff
[ "Apache-2.0" ]
null
null
null
"""Key OpenMDAO classes can be imported from here.""" # Core from openmdao.core.problem import Problem from openmdao.core.group import Group from openmdao.core.parallel_group import ParallelGroup from openmdao.core.explicitcomponent import ExplicitComponent from openmdao.core.implicitcomponent import ImplicitComponent from openmdao.core.indepvarcomp import IndepVarComp from openmdao.core.analysis_error import AnalysisError # Components from openmdao.components.deprecated_component import Component from openmdao.components.exec_comp import ExecComp from openmdao.components.linear_system_comp import LinearSystemComp from openmdao.components.meta_model import MetaModel from openmdao.components.multifi_meta_model import MultiFiMetaModel # Solvers from openmdao.solvers.linear.linear_block_gs import LinearBlockGS from openmdao.solvers.linear.linear_block_jac import LinearBlockJac from openmdao.solvers.linear.direct import DirectSolver from openmdao.solvers.linear.petsc_ksp import PetscKSP from openmdao.solvers.linear.linear_runonce import LinearRunOnce from openmdao.solvers.linear.scipy_iter_solver import ScipyIterativeSolver from openmdao.solvers.linesearch.backtracking import ArmijoGoldsteinLS from openmdao.solvers.linesearch.backtracking import BoundsEnforceLS from openmdao.solvers.nonlinear.nonlinear_block_gs import NonlinearBlockGS from openmdao.solvers.nonlinear.nonlinear_block_jac import NonlinearBlockJac from openmdao.solvers.nonlinear.newton import NewtonSolver from openmdao.solvers.nonlinear.nonlinear_runonce import NonLinearRunOnce # Surrogate Models from openmdao.surrogate_models.kriging import KrigingSurrogate, FloatKrigingSurrogate from openmdao.surrogate_models.multifi_cokriging import MultiFiCoKrigingSurrogate, \ FloatMultiFiCoKrigingSurrogate from openmdao.surrogate_models.nearest_neighbor import NearestNeighbor from openmdao.surrogate_models.response_surface import ResponseSurface from openmdao.surrogate_models.surrogate_model import SurrogateModel, \ MultiFiSurrogateModel # Vectors from openmdao.vectors.default_vector import DefaultVector try: from openmdao.vectors.petsc_vector import PETScVector except ImportError: PETScVector = None # Developer Tools from openmdao.devtools.problem_viewer.problem_viewer import view_model from openmdao.devtools.viewconns import view_connections # Derivative Specification from openmdao.jacobians.assembled_jacobian import AssembledJacobian, \ DenseJacobian, COOJacobian, CSRJacobian, CSCJacobian # Drivers try: from openmdao.drivers.pyoptsparse_driver import pyOptSparseDriver except ImportError: pass from openmdao.drivers.scipy_optimizer import ScipyOptimizer # System-Building Tools from openmdao.utils.options_dictionary import OptionsDictionary # Recorders from openmdao.recorders.sqlite_recorder import SqliteRecorder from openmdao.recorders.openmdao_server_recorder import OpenMDAOServerRecorder
41.828571
85
0.873634
0
0
0
0
0
0
0
0
193
0.065915
f7d8750cdaa9ce35d0790079eee8be949cbd02ee
1,443
py
Python
code-buddy.py
xl3ehindTim/Code-buddy
e04b7b4327a0b3ff2790d22aef93dca6fce021f4
[ "MIT" ]
8
2019-11-29T09:20:11.000Z
2020-11-02T10:55:35.000Z
code-buddy.py
xl3ehindTim/Code-buddy
e04b7b4327a0b3ff2790d22aef93dca6fce021f4
[ "MIT" ]
2
2019-12-02T13:48:01.000Z
2019-12-02T17:00:56.000Z
code-buddy.py
xl3ehindTim/Code-buddy
e04b7b4327a0b3ff2790d22aef93dca6fce021f4
[ "MIT" ]
3
2019-11-29T10:03:44.000Z
2020-10-01T10:23:55.000Z
import os from getArgs import getArgs from modules import python, javascript, html, php, bootstrap, cca # from folder import file # code-buddy.py create (file type) (directory name) # Checks for "create" if getArgs(1) == "create": # Checks for which file type projectType = getArgs(2) # Checks for file name if projectType == "python": name = getArgs(3) python.createPythonProject(name) print("Folder created succesfully") elif projectType == "javascript": name = getArgs(3) javascript.createJavascriptProject(name) print("Folder created succesfully") elif projectType == "html": name = getArgs(3) html.createHtmlProject(name) print("Folder created succesfully") elif projectType == "php": name = getArgs(3) php.createPhpProject(name) print("Folder created succesfully") elif projectType == "bootstrap": name = getArgs(3) bootstrap.createPhpProject(name) print("Folder created succesfully") elif projectType == "cca" name = getArgs(3) cca.createCcaProject(name) print("Folder created succesfully") # If not valid file type else: print(f"argument {getArgs(2)} is unknown, try: 'python, javascript, html, php or bootstrap'") else: # If invalid "create" print(f"argument {getArgs(1)} is unknown, use 'create' to create a folder")
33.55814
101
0.644491
0
0
0
0
0
0
0
0
576
0.399168
f7d8d7b6d6bbc7f8a6c1802ec8a9bedc82cb072a
5,799
py
Python
compyle/tests/test_ext_module.py
manish364824/compyle
cc97dd0a0e7b12f904b3f1c0f20aa06a41779c61
[ "BSD-3-Clause" ]
1
2020-11-23T12:13:04.000Z
2020-11-23T12:13:04.000Z
compyle/tests/test_ext_module.py
manish364824/compyle
cc97dd0a0e7b12f904b3f1c0f20aa06a41779c61
[ "BSD-3-Clause" ]
null
null
null
compyle/tests/test_ext_module.py
manish364824/compyle
cc97dd0a0e7b12f904b3f1c0f20aa06a41779c61
[ "BSD-3-Clause" ]
null
null
null
from contextlib import contextmanager from distutils.sysconfig import get_config_var from io import open as io_open import os from os.path import join, exists import shutil import sys import tempfile from textwrap import dedent from multiprocessing import Pool from unittest import TestCase, main try: from unittest import mock except ImportError: import mock from ..ext_module import get_md5, ExtModule, get_ext_extension, get_unicode def _check_write_source(root): """Used to create an ExtModule and test if a file was opened. It returns the number of times "open" was called. """ m = mock.mock_open() orig_side_effect = m.side_effect def _side_effect(*args, **kw): with io_open(*args, **kw) as fp: fp.write(get_unicode("junk")) return orig_side_effect(*args, **kw) m.side_effect = _side_effect with mock.patch('compyle.ext_module.io.open', m, create=True): s = ExtModule("print('hello')", root=root) s.write_source() return m.call_count def _check_compile(root): with mock.patch('shutil.copy') as m: s = ExtModule("print('hello')", root=root) s.write_and_build() if m.called: # If it was called, do the copy to mimic the action. shutil.copy(*m.call_args[0]) return m.call_count class TestMiscExtMod(TestCase): def test_md5(self): data = "hello world" # Two calls with same data produce same result self.assertEqual(get_md5(data), get_md5(data)) # Two calls with different data produce different md5sums. self.assertNotEqual(get_md5(data), get_md5(data + ' ')) class TestExtModule(TestCase): def setUp(self): self.root = tempfile.mkdtemp() self.data = dedent('''\ # cython: language_level=3 def f(): return "hello world" ''') def tearDown(self): if sys.platform.startswith('win'): try: shutil.rmtree(self.root) except WindowsError: pass else: shutil.rmtree(self.root) def test_constructor(self): data = self.data s = ExtModule(data, root=self.root) self.assertTrue(exists(join(self.root, 'build'))) self.assertEqual(s.hash, get_md5(data)) self.assertEqual(s.code, data) expect_name = 'm_%s' % (s.hash) self.assertEqual(s.name, expect_name) self.assertEqual(s.src_path, join(self.root, expect_name + '.pyx')) self.assertEqual(s.ext_path, join(self.root, expect_name + get_ext_extension())) s.write_source() self.assertTrue(exists(s.src_path)) self.assertEqual(data, open(s.src_path).read()) def test_default_root(self): try: data = self.data s = ExtModule(data) s.write_source() self.assertTrue(exists(join(s.root, 'build'))) self.assertEqual(s.hash, get_md5(data)) self.assertEqual(s.code, data) self.assertTrue(exists(s.src_path)) self.assertEqual(data, open(s.src_path).read()) finally: os.unlink(s.src_path) def test_load_module(self): data = self.data s = ExtModule(data, root=self.root) mod = s.load() self.assertEqual(mod.f(), "hello world") self.assertTrue(exists(s.ext_path)) def _create_dummy_module(self): code = "# cython: language_level=3\ndef hello(): return 'hello'" modname = 'test_rebuild.py' f = join(self.root, modname) with open(f, 'w') as fp: fp.write(code) return f @contextmanager def _add_root_to_sys_path(self): import sys if self.root not in sys.path: sys.path.insert(0, self.root) try: yield finally: sys.path.remove(self.root) def test_rebuild_when_dependencies_change(self): # Given. data = self.data depends = ["test_rebuild"] s = ExtModule(data, root=self.root, depends=depends) fname = self._create_dummy_module() f_stat = os.stat(fname) with self._add_root_to_sys_path(): # When self.assertTrue(s.should_recompile()) s.write_and_build() # Then. self.assertFalse(s.should_recompile()) # Now lets re-create the module and try again. # When. fname = self._create_dummy_module() # Update the timestamp to make it newer, otherwise we need to # sleep. os.utime(fname, (f_stat.st_atime, f_stat.st_mtime + 10)) # Then. self.assertTrue(s.should_recompile()) def test_that_multiple_writes_do_not_occur_for_same_source(self): # Given n_proc = 5 p = Pool(n_proc) # When # Note that _create_extension cannot be defined here or even in the # class as a nested function or instance method cannot be pickled. result = p.map(_check_write_source, [self.root]*n_proc) p.close() # Then # The file should have been opened only once. self.assertEqual(sum(result), 1) def test_that_multiple_compiles_do_not_occur_for_same_source(self): # Given n_proc = 5 p = Pool(n_proc) # When # Note that _check_compile cannot be defined here or even in the # class as a nested function or instance method cannot be pickled. result = p.map(_check_compile, [self.root]*n_proc) p.close() # Then # The shutil.copy should have been run only once. self.assertEqual(sum(result), 1) if __name__ == '__main__': main()
29.436548
76
0.607174
4,431
0.764097
218
0.037593
238
0.041042
0
0
1,166
0.201069
f7db3778ef11768f9b2aff72c3bc714173c0ef05
5,286
py
Python
tma/collector/xhn.py
hebpmo/TMA
b07747d3112e822ff92dd2ba4589d2288adab154
[ "MIT" ]
2
2020-02-15T18:31:39.000Z
2020-03-18T13:30:58.000Z
tma/collector/xhn.py
hebpmo/TMA
b07747d3112e822ff92dd2ba4589d2288adab154
[ "MIT" ]
null
null
null
tma/collector/xhn.py
hebpmo/TMA
b07747d3112e822ff92dd2ba4589d2288adab154
[ "MIT" ]
1
2021-02-13T19:14:39.000Z
2021-02-13T19:14:39.000Z
# -*- coding: UTF-8 -*- """ collector.xhn - 新华网数据采集 官网:http://www.xinhuanet.com/ 接口分析: 1. 获取文章列表 http://qc.wa.news.cn/nodeart/list?nid=115093&pgnum=1&cnt=10000 新华全媒体头条 http://www.xinhuanet.com/politics/qmtt/index.htm ==================================================================== """ import requests import re from datetime import datetime from bs4 import BeautifulSoup from zb.crawlers.utils import get_header import traceback import pandas as pd from tqdm import tqdm import tma home_url = "http://www.xinhuanet.com/" def get_website_map(): wzdt_url = "http://www.xinhuanet.com/wzdt2014.htm" html = requests.get(wzdt_url, headers=get_header()) bsobj = BeautifulSoup(html.content.decode('utf-8'), 'lxml') map_raw = bsobj.find('div', {'class': "content_left"}) raise NotImplementedError def get_special_topics(pgnum=1): """获取专题列表""" url = "http://qc.wa.news.cn/nodeart/list?" \ "nid=115093&pgnum=%s&cnt=200" % str(pgnum) res = requests.get(url).text res = res.replace("null", "\'\'") res = eval(res) assert res['status'] == 0, "获取文章列表失败" data = res['data']['list'] specials = [] for a in data: special = { "Abstract": a['Abstract'], "Author": a['Author'], "LinkUrl": a['LinkUrl'], "PubTime": a['PubTime'], "Title": a['Title'], "allPics": a['allPics'], } specials.append(special) return specials def get_article_detail(article_url): """获取新华网article_url中的文章内容 :param article_url: 文章url :return: { "url": article_url, "title": title, "pub_time": pub_time, "source": source, "content": content } """ # article_url = "http://www.xinhuanet.com/fortune/2018-06/20/c_129897476.htm" html = requests.get(article_url, headers=get_header()) bsobj = BeautifulSoup(html.content.decode('utf-8'), 'lxml') # 解析标题 cols = bsobj.find('div', {"class": "h-news"}).text.strip().split("\r\n") title = cols[0].strip() pub_time = cols[1].strip() source = cols[-1].strip() # 解析内容 content = bsobj.find('div', {"id": "p-detail"}).text.strip() content = content.replace("\u3000\u3000", "") content = [x.strip() for x in content.split("\n")] content = [x for x in content if x != ""] content = "\n".join(content) return { "url": article_url, "title": title, "pub_time": pub_time, "source": source, "content": content } class HomePage(object): """新华网首页""" def __init__(self): self.home_url = "http://www.xinhuanet.com/" @staticmethod def _get_date_from_url(url): pat = re.compile("(\d{4}-\d{2}[/-]\d{2})") res = pat.findall(url) if res is not None and len(res) == 1: return res[0].replace('/', "-") else: return None def get_article_list(self, d=None): """获取首页的头条文章列表""" html = requests.get(self.home_url, headers=get_header()) bsobj = BeautifulSoup(html.content.decode('utf-8'), 'lxml') a_list = [] for a in bsobj.find_all("a"): try: url = a['href'] title = a.text.strip() date_ = self._get_date_from_url(url) a_list.append([url, title, date_]) except: if tma.DEBUG: traceback.print_exc() continue a_list = [a for a in a_list if a[0] != "" and a[0].strip("/") != "http://xhgy.xinhuanet.com" and a[0].startswith("http") and a[1] != "" and a[1] != "视频MP4地址" and "c_" in a[0] and a[2] != "" # and 'photo' not in a[0] # and 'video' not in a[0] ] # 根据url去重 df = pd.DataFrame(a_list, columns=['url', 'title', 'date']) df.drop_duplicates('url', inplace=True) res = [list(x) for x in list(df.values)] if d is None: date_list = [datetime.now().date().__str__()] else: date_list = d res = [a for a in res if a[2] in date_list] res = sorted(res, key=lambda x: x[2], reverse=True) return res def get_articles(self, d=None): """获取首页文章内容 :param d: list 限定获取文章的日期,默认是当日日期,可以指定多个离散的日期 :return: list """ # 获取首页文章列表URL、按发布日期过滤、按URL去重 res = self.get_article_list(d) a_list = [a[0] for a in res] a_list = list(set(a_list)) articles = [] for a in tqdm(a_list, ncols=100, desc="xhn.get_articles"): try: article = get_article_detail(a) articles.append(article) except: if tma.DEBUG: traceback.print_exc() return articles class Fortune(object): def __init__(self): self.url1 = "http://www.xinhuanet.com/fortune/" self.url2 = "http://www.xinhuanet.com/fortune/caiyan.htm" self.url3 = "http://www.xinhuanet.com/fortune/cfx.htm" self.url4 = "http://www.xinhuanet.com/fortune/bcxc.htm"
28.26738
81
0.531782
2,889
0.518299
0
0
256
0.045928
0
0
1,917
0.343918
f7dbb6eabf0492827bece2fbca9d7d345965609a
995
py
Python
tests/test_onetv.py
unlocKing/plugins
e5cee730c22a049cfd0e3873389c82e8ab5f7c41
[ "BSD-2-Clause" ]
2
2021-09-02T21:29:48.000Z
2021-09-20T07:05:08.000Z
tests/test_onetv.py
unlocKing/plugins
e5cee730c22a049cfd0e3873389c82e8ab5f7c41
[ "BSD-2-Clause" ]
null
null
null
tests/test_onetv.py
unlocKing/plugins
e5cee730c22a049cfd0e3873389c82e8ab5f7c41
[ "BSD-2-Clause" ]
null
null
null
import unittest from plugins.onetv import OneTV class TestPluginPerviyKanal(unittest.TestCase): def test_can_handle_url(self): regex_test_list = [ "https://media.1tv.ru/embed/ctcmedia/ctc-che.html?start=auto", "https://media.1tv.ru/embed/ctcmedia/ctc-dom.html?start=auto", "https://media.1tv.ru/embed/ctcmedia/ctc-love.html?start=auto", "https://stream.1tv.ru/live", "https://www.1tv.ru/embedlive?start=auto", "https://www.1tv.ru/live", "https://www.chetv.ru/online/", "https://www.ctc.ru/online/", "https://www.ctclove.ru/online/", "https://domashniy.ru/online", "https://ren.tv/live", "https://media.1tv.ru/embed/nmg/nmg-ren.html", "https://www.5-tv.ru/live/", "https://media.1tv.ru/embed/nmg/nmg-5tv.html", ] for url in regex_test_list: self.assertTrue(OneTV.can_handle_url(url))
36.851852
75
0.577889
943
0.947739
0
0
0
0
0
0
535
0.537688
f7dd193790b7ae7797daf8c7c2f3ca9a0623ed89
405
py
Python
tests/test_plugins/pytester_example_dir/test_file_1.py
MORSECorp/snappiershot
acb6a8d01d4496abe0f2fe83c7e7af9cf77aac8e
[ "Apache-2.0" ]
27
2020-10-15T18:36:25.000Z
2022-03-02T19:11:44.000Z
tests/test_plugins/pytester_example_dir/test_file_1.py
MORSECorp/snappiershot
acb6a8d01d4496abe0f2fe83c7e7af9cf77aac8e
[ "Apache-2.0" ]
33
2020-10-15T15:03:37.000Z
2022-03-24T21:00:34.000Z
tests/test_plugins/pytester_example_dir/test_file_1.py
MORSECorp/snappiershot
acb6a8d01d4496abe0f2fe83c7e7af9cf77aac8e
[ "Apache-2.0" ]
5
2020-10-15T16:30:00.000Z
2022-03-30T15:07:28.000Z
""" This is a test file used for testing the pytest plugin. """ def test_function_passed(snapshot): """ The snapshot for this function is expected to exist. """ snapshot.assert_match(3 + 4j) def test_function_new(snapshot): """ The snapshot for this function is expected to exist, but only one assertion is expected. """ snapshot.assert_match(3 + 4j) snapshot.assert_match(3 + 4j)
31.153846
100
0.708642
0
0
0
0
0
0
0
0
219
0.540741
f7de06300594a810a1f4175db45d6b833ced1a94
7,940
py
Python
src/compas/geometry/pointclouds/pointcloud.py
Sam-Bouten/compas
011c7779ded9b69bb602568b470bb0443e336f62
[ "MIT" ]
null
null
null
src/compas/geometry/pointclouds/pointcloud.py
Sam-Bouten/compas
011c7779ded9b69bb602568b470bb0443e336f62
[ "MIT" ]
null
null
null
src/compas/geometry/pointclouds/pointcloud.py
Sam-Bouten/compas
011c7779ded9b69bb602568b470bb0443e336f62
[ "MIT" ]
null
null
null
from __future__ import print_function from __future__ import absolute_import from __future__ import division from random import uniform from compas.geometry import transform_points from compas.geometry import centroid_points from compas.geometry import bounding_box from compas.geometry import Primitive from compas.geometry import Point __all__ = ['Pointcloud'] class Pointcloud(Primitive): """Class for working with pointclouds. Parameters ---------- points : sequence[point] A sequence of points to add to the cloud. **kwargs : dict[str, Any], optional Additional keyword arguments collected in a dict. Attributes ---------- points : list[:class:`compas.geometry.Point`] The points of the cloud. Examples -------- >>> """ def __init__(self, points, **kwargs): super(Pointcloud, self).__init__(**kwargs) self._points = None self.points = points @property def DATASCHEMA(self): from schema import Schema from compas.data import is_float3 return Schema({ 'points': lambda points: all(is_float3(point) for point in points) }) @property def JSONSCHEMANAME(self): return 'pointcloud' @property def data(self): return {'points': [point.data for point in self.points]} @data.setter def data(self, data): self._points = [Point.from_data(point) for point in data['points']] @classmethod def from_data(cls, data): return cls(data['points']) # ========================================================================== # properties # ========================================================================== @property def points(self): return self._points @points.setter def points(self, points): self._points = [Point(*point) for point in points] @property def centroid(self): return centroid_points(self.points) @property def bounding_box(self): return bounding_box(self.points) # ========================================================================== # customization # ========================================================================== def __repr__(self): return 'Pointcloud({0!r})'.format(self.points) def __len__(self): return len(self.points) def __getitem__(self, key): if key > len(self) - 1: raise KeyError return self.points[key] def __setitem__(self, key, value): if key > len(self) - 1: raise KeyError self.points[key] = value def __iter__(self): return iter(self.points) def __eq__(self, other): """Is this pointcloud equal to the other pointcloud? Two pointclouds are considered equal if they have the same number of points and if the XYZ coordinates of the corresponding points are identical. Parameters ---------- other : :class:`compas.geometry.Pointcloud` | list[[float, float, float] | :class:`compas.geometry.Point`] The pointcloud to compare. Returns ------- bool True if the pointclouds are equal. False otherwise. """ if len(self) != len(other): return False A = sorted(self, key=lambda point: (point[0], point[1], point[2])) B = sorted(other, key=lambda point: (point[0], point[1], point[2])) return all(a == b for a, b in zip(A, B)) # ========================================================================== # constructors # ========================================================================== @classmethod def from_ply(cls, filepath): """Construct a pointcloud from a PLY file. Parameters ---------- filepath : str | bytes | os.PathLike Path of the PLY file. Returns ------- :class:`compas.geometry.Pointcloud` """ pass @classmethod def from_pcd(cls, filepath): """Construct a pointcloud from a PCD file. Parameters ---------- filepath : str | bytes | os.PathLike Path of the PCD file. Returns ------- :class:`compas.geometry.Pointcloud` """ pass @classmethod def from_bounds(cls, x, y, z, n): """Construct a point cloud within a given box. Parameters ---------- x : float | tuple[float, float] Size of the cloud in the X direction. If a single value, the size is (0, x). If a pair of values, the size is (x[0], x[1]). y : float | tuple[float, float] Size of the cloud in the Y direction. If a single value, the size is (0, y). If a pair of values, the size is (y[0], y[1]). z : float | tuple[float, float] Size of the cloud in the Z direction. If a single value, the size is (0, z). If a pair of values, the size is (z[0], z[1]). n : int The number of points in the cloud. Returns ------- :class:`compas.geometry.Pointcloud` Notes ----- The XYZ coordinates of the `n` points are radnomly chosen within the provided `x`, `y`, and `z` bounds. Thererefor, there is no guarantee that the bounds are part of the resulting coordinates. Examples -------- >>> """ try: len(x) except TypeError: xmin = 0 xmax = x else: xmin, xmax = x try: len(y) except TypeError: ymin = 0 ymax = y else: ymin, ymax = y try: len(z) except TypeError: zmin = 0 zmax = z else: zmin, zmax = z x = [uniform(xmin, xmax) for i in range(n)] y = [uniform(ymin, ymax) for i in range(n)] z = [uniform(zmin, zmax) for i in range(n)] return cls(list(map(list, zip(x, y, z)))) @classmethod def from_box(cls, box, n): """Construct a point cloud within a given box. Parameters ---------- box: :class:`compas.geometry.Box` The axis aligned bounding box of the cloud. n: int The number of points in the cloud. Returns ------- :class:`compas.geometry.Pointcloud` Examples -------- >>> from compas.geometry import Box >>> cloud = Pointcloud.from_box(Box.from_width_height_depth(10, 3, 5), 100) >>> all((-5 < x < +5) and (-2.5 < y < +2.5) and (-1.5 < z < +1.5) for x, y, z in cloud.points) True """ points = box.points x, y, z = zip(*points) xmin, xmax = min(x), max(x) ymin, ymax = min(y), max(y) zmin, zmax = min(z), max(z) x = [uniform(xmin, xmax) for i in range(n)] y = [uniform(ymin, ymax) for i in range(n)] z = [uniform(zmin, zmax) for i in range(n)] return cls(list(map(list, zip(x, y, z)))) # ========================================================================== # methods # ========================================================================== def transform(self, T): """Apply a transformation to the pointcloud. Parameters ---------- T : :class:`compas.geometry.Transformation` The transformation. Returns ------- None The cloud is modified in place. """ for index, point in enumerate(transform_points(self.points, T)): self.points[index].x = point[0] self.points[index].y = point[1] self.points[index].z = point[2]
27.957746
114
0.500756
7,571
0.953526
0
0
4,378
0.551385
0
0
4,186
0.527204
f7de36b7d46515af7a1b6676baaac3b4ccaf3705
4,366
py
Python
oa/regex.py
Worteks/OrangeAssassin
21baf0b84fbedd887f6d88e13c624f14fb0b5e06
[ "Apache-2.0" ]
null
null
null
oa/regex.py
Worteks/OrangeAssassin
21baf0b84fbedd887f6d88e13c624f14fb0b5e06
[ "Apache-2.0" ]
null
null
null
oa/regex.py
Worteks/OrangeAssassin
21baf0b84fbedd887f6d88e13c624f14fb0b5e06
[ "Apache-2.0" ]
null
null
null
"""Handle regex conversions.""" from builtins import object import re import operator from functools import reduce import oa.errors # Map of perl flags and the corresponding re ones. FLAGS = { "i": re.IGNORECASE, "s": re.DOTALL, "m": re.MULTILINE, "x": re.VERBOSE, } DELIMS = { "/": "/", "{": "}", "%": "%", "<": ">", "'": "'", "~": "~", ",": ",", "!": "!", ";": ";", } # Regex substitution for Perl -> Python compatibility _CONVERTS = ( (re.compile(r""" # Python does not support local extensions so remove those. For example: # (?i:test) becomes (?:test) (?<=\(\?) # Look-behind and match (? (([adlupimsx-]*?)|(\^[?^alupimsx]*?)) # Capture the extension (?=:) # Look-ahead and match the : """, re.VERBOSE), r""), (re.compile(r""" # Python doesn't have support for expression such as \b? # Replace it with (\b)? (\\b) # Capture group that matches \b or \B (?=\?) # Look-ahead that matches ? """, re.VERBOSE | re.IGNORECASE), r"(\1)"), (re.compile(r""" # Python doesn't have support for "independent" subexpression (?>) # Replace those with non capturing groups (?:) (?<=\(\?) # Look-behind and match (? (>) # Match > """, re.VERBOSE), r":"), ) class Pattern(object): """Abstract class for rule regex matching.""" def __init__(self, pattern): self._pattern = pattern def match(self, text): raise NotImplementedError() class MatchPattern(Pattern): """This pattern does a search on the text and returns either 1 or 0.""" def match(self, text): return 1 if self._pattern.search(text) else 0 class NotMatchPattern(Pattern): """This pattern does a search on the text and returns either 1 or 0.""" def match(self, text): return 0 if self._pattern.search(text) else 1 def perl2re(pattern, match_op="=~"): """Convert a Perl type regex to a Python one.""" # We don't need to consider the pre-flags pattern = pattern.strip().lstrip("mgs") delim = pattern[0] try: rev_delim = DELIMS[delim] except KeyError: raise oa.errors.InvalidRegex("Invalid regex delimiter %r in %r" % (delim, pattern)) try: pattern, flags_str = pattern.lstrip(delim).rsplit(rev_delim, 1) except ValueError: raise oa.errors.InvalidRegex("Invalid regex %r. Please make sure you " "have escaped all the special characters " "when you defined the regex in " "configuration file" % pattern) for conv_p, repl in _CONVERTS: pattern = conv_p.sub(repl, pattern) flags = reduce(operator.or_, (FLAGS.get(flag, 0) for flag in flags_str), 0) try: if match_op == "=~": return MatchPattern(re.compile(pattern, flags)) elif match_op == "!~": return NotMatchPattern(re.compile(pattern, flags)) except re.error as e: raise oa.errors.InvalidRegex("Invalid regex %r: %s" % (pattern, e)) class Regex(object): """Customised regex class to work in lazy mode""" compiled = None def __init__(self, pattern, flags=0): self.pattern = pattern self.flags = flags def compile(self): from oa.config import LAZY_MODE if LAZY_MODE: return re.compile(self.pattern, self.flags) elif not self.compiled: self.compiled = re.compile(self.pattern, self.flags) return self.compiled def search(self, string): return self.compile().search(string) def match(self, string): return self.compile().match(string) def fullmatch(self, string): return self.compile().fullmatch(string) def sub(self, repl, string, count=0): return self.compile().sub(repl, string, count) def subn(self, repl, string, count=0): return self.compile().sub(repl, string, count) def split(self, string, maxsplit=0): return self.compile().split(string, maxsplit) def findall(self, string): return self.compile().findall(string) def finditer(self, string): return self.compile().finditer(string)
28.535948
81
0.574668
1,723
0.39464
0
0
0
0
0
0
1,500
0.343564
f7dec0cd3c585519d06741f3516a5564ea368e83
1,749
py
Python
test_data/barometer_kalman.py
theo-brown/ahrs
cd9c9e0bbf9db7fd67a297e1aafa8518bf17050d
[ "MIT" ]
1
2022-01-19T14:20:05.000Z
2022-01-19T14:20:05.000Z
test_data/barometer_kalman.py
theo-brown/ahrs
cd9c9e0bbf9db7fd67a297e1aafa8518bf17050d
[ "MIT" ]
null
null
null
test_data/barometer_kalman.py
theo-brown/ahrs
cd9c9e0bbf9db7fd67a297e1aafa8518bf17050d
[ "MIT" ]
null
null
null
import numpy as np import matplotlib.pyplot as plt from matplotlib.widgets import Slider from kalman_filter import KalmanFilter raw_data = np.loadtxt("barometer_data.txt") # Truncate raw data (it's super long) raw_data = raw_data[:raw_data.size//4] raw_data_step = np.loadtxt("barometer_data_step.txt") t1 = np.arange(0, raw_data.size/12.5, 1/12.5) t2 = np.arange(0, raw_data_step.size/12.5, 1/12.5) fig1 = plt.figure("Data") ax1 = fig1.add_subplot(121) ax2 = fig1.add_subplot(122) fig1.subplots_adjust(bottom=0.25) [unfiltered_raw_line] = ax1.plot(t1, raw_data) [unfiltered__step_line] = ax2.plot(t2, raw_data_step) def filter_data(data, x0, P, Q, R): filter1 = KalmanFilter(x0, P, 1, 0, 1, Q, R) x_out = np.zeros(data.size) P_out = np.zeros(data.size) for k in np.arange(1, data.size): x_out[k], P_out[k] = filter1.update(0, data[k]) return x_out, P_out P0 = 2 Q0 = 1e-4 [filtered_raw_line] = ax1.plot(t1, filter_data(raw_data, 0, P0, Q0, R=raw_data.var())[0]) [filtered_step_line] = ax2.plot(t2, filter_data(raw_data_step, 0, P0, Q0, R=raw_data.var())[0]) P_slider_ax = fig1.add_axes([0.25, 0.15, 0.65, 0.03]) Q_slider_ax = fig1.add_axes([0.25, 0.1, 0.65, 0.03]) P_slider = Slider(P_slider_ax, 'P', 0.5, 5, valinit=P0) Q_slider = Slider(Q_slider_ax, 'Q', 1e-4, 1e-3, valinit=Q0) def sliders_on_changed(val): P = P_slider.val Q = Q_slider.val x_raw_new, P_raw_new = filter_data(raw_data, 0, P, Q, R=raw_data.var()) filtered_raw_line.set_ydata(x_raw_new) x_step_new, P_step_new = filter_data(raw_data_step, 0, P, Q, R=raw_data.var()) filtered_step_line.set_ydata(x_step_new) P_slider.on_changed(sliders_on_changed) Q_slider.on_changed(sliders_on_changed) plt.show()
31.232143
95
0.704974
0
0
0
0
0
0
0
0
94
0.053745
f7df479cf0eb03f9edb6d36fe5773b716ab0594f
1,694
py
Python
number-of-orders-in-the-backlog/number_of_orders_in_the_backlog.py
joaojunior/hackerrank
a5ee0449e791535930b8659dfb7dddcf9e1237de
[ "MIT" ]
null
null
null
number-of-orders-in-the-backlog/number_of_orders_in_the_backlog.py
joaojunior/hackerrank
a5ee0449e791535930b8659dfb7dddcf9e1237de
[ "MIT" ]
null
null
null
number-of-orders-in-the-backlog/number_of_orders_in_the_backlog.py
joaojunior/hackerrank
a5ee0449e791535930b8659dfb7dddcf9e1237de
[ "MIT" ]
1
2019-06-19T00:51:02.000Z
2019-06-19T00:51:02.000Z
import heapq from typing import List class Solution: def get_number_of_backlog_orders(self, orders: List[List[int]]) -> int: sell_backlog = [] buy_backlog = [] for price, amount, order_type in orders: if order_type == 0: while amount > 0: if sell_backlog and sell_backlog[0][0] <= price: sell_price, sell_amount = heapq.heappop(sell_backlog) if sell_amount > amount: heapq.heappush(sell_backlog, (sell_price, sell_amount - amount)) amount = 0 else: amount -= sell_amount else: heapq.heappush(buy_backlog, (-price, amount)) amount = 0 else: while amount > 0: if buy_backlog and -buy_backlog[0][0] >= price: buy_price, buy_amount = heapq.heappop(buy_backlog) if buy_amount > amount: heapq.heappush(buy_backlog, (buy_price, buy_amount - amount)) amount = 0 else: amount -= buy_amount else: heapq.heappush(sell_backlog, (price, amount)) amount = 0 result = 0 for _, amount in sell_backlog: result += amount for _, amount in buy_backlog: result += amount return result % (10**9 + 7)
40.333333
78
0.432113
1,654
0.976387
0
0
0
0
0
0
0
0
f7df8183ed1dfeac2b83cb6b6b173f961a29bd8f
2,585
py
Python
scripts/plotresults.py
rafzi/DeepThings
d12e8e8ad9f9ebaa3b0d55f547c0b3c7f1baf636
[ "MIT" ]
1
2020-02-28T10:07:47.000Z
2020-02-28T10:07:47.000Z
scripts/plotresults.py
rafzi/DeepThings
d12e8e8ad9f9ebaa3b0d55f547c0b3c7f1baf636
[ "MIT" ]
null
null
null
scripts/plotresults.py
rafzi/DeepThings
d12e8e8ad9f9ebaa3b0d55f547c0b3c7f1baf636
[ "MIT" ]
2
2020-03-10T15:17:55.000Z
2020-03-17T15:37:37.000Z
import pandas as pd import numpy as np import matplotlib.pyplot as plt # 1: YOLOv2, 2: AlexNet, 3: VGG-16, 4: GoogLeNet model = 4 LINEPLOT = True dfs = pd.read_excel("t.xlsx", sheet_name=None, header=None) if model == 1: ms = "YOLOv2" elif model == 2: ms = "AlexNet" elif model == 3: ms = "VGG-16" elif model == 4: ms = "GoogLeNet" sh = dfs[ms] print(sh) labels = ["1", "2", "3", "4", "5", "6"] x = np.arange(len(labels)) plt.rcParams.update({"font.size": 11}) fig, ax = plt.subplots() plt.subplots_adjust(top=0.95, right=0.95) # Workaround for this: https://bugs.python.org/issue32790 def fmtFlt(f, digits): s = ("{:#." + str(digits) + "g}").format(f) sz = len(s) - 1 if sz < digits: s += "0" if s[-1] == ".": s = s[:-1] return s def autolabel(rects): """Attach a text label above each bar in *rects*, displaying its height.""" for rect in rects: height = rect.get_height() ax.annotate(fmtFlt(height, 3), xy=(rect.get_x() + 1.2*rect.get_width() / 2, height), xytext=(0, 3), # 3 points vertical offset textcoords="offset points", ha='center', va='bottom', rotation=90, fontsize=9.5) def addData(speed, fused): y = [] lineindex = -4 + (speed)*(13+4) addindex = 1 if fused else 0 for i in range(0, 6): y.append(sh[5*2 + addindex][lineindex] / sh[i*2 + addindex][lineindex]) y = np.array(y)# / 1000 y = np.flip(y) l = ("OWP @ " if fused else "LOP @ ") + \ ("1 GBit/s" if speed == 1 else ("100 MBit/s" if speed == 2 else "10 MBit/s")) color = "C1" if fused else "C0" if LINEPLOT: color = "C3" if speed == 1 else ("C4" if speed == 2 else "C1") #line = "o" if speed == 1 else ("v" if speed == 2 else "s") line = "o" if fused else "s" line += "--" if fused else "-" ax.plot(x, y, line, label=l, color=color) else: barw = 0.15 bars = 6 i = 2 * (-speed+4-1) + int(fused) #patterns = ["\\\\", "//", "||", "--", "..", "OO"] patterns = ["\\\\", "\\\\", "//", "//", "..", ".."] g = ax.bar(x + barw/2 - bars/2*barw + i * barw, y, barw, label=l, color=color, hatch=patterns[i], alpha=0.99) #autolabel(g) # 1: 1gbit, 2: 100mbit, 3: 10mbit addData(1, True) addData(1, False) addData(2, True) addData(2, False) addData(3, True) addData(3, False) #plt.ylim(plt.ylim()*1.1) ybot, ytop = plt.ylim() plt.ylim(ybot, ytop*1.05) ax.set_xlabel("Number of devices") ax.set_ylabel("Run time speedup over one device") ax.set_xticks(x) ax.set_xticklabels(labels) ax.legend() plt.savefig("plot_runtime.pdf") plt.show()
26.927083
82
0.573308
0
0
0
0
0
0
0
0
695
0.268859
f7e1dfd58619e2e27eaf63ac95f9bbd2215fc5c4
565
py
Python
setup.py
oubiwann/myriad-worlds
bfbbab713e35c5700e37158a892c3a66a8c9f37a
[ "MIT" ]
3
2015-01-29T05:24:32.000Z
2021-05-10T01:47:36.000Z
setup.py
oubiwann/myriad-worlds
bfbbab713e35c5700e37158a892c3a66a8c9f37a
[ "MIT" ]
null
null
null
setup.py
oubiwann/myriad-worlds
bfbbab713e35c5700e37158a892c3a66a8c9f37a
[ "MIT" ]
null
null
null
from setuptools import setup, find_packages from myriad import meta from myriad.util import dist setup( name=meta.display_name, version=meta.version, description=meta.description, long_description=meta.long_description, author=meta.author, author_email=meta.author_email, url=meta.url, license=meta.license, packages=find_packages() + ["twisted.plugins"], package_data={ "twisted": ['plugins/example_server.py'] }, install_requires=meta.requires, zip_safe=False ) dist.refresh_plugin_cache()
21.730769
51
0.709735
0
0
0
0
0
0
0
0
53
0.093805
f7e2347893dbbd12b3c90e6ec6f949cb83aa2a4f
1,110
py
Python
val_resnet.py
AlexKhakhlyuk/fixedconv
bf3848c3fd60af2e617f2118064ee6f551b45d95
[ "Apache-1.1" ]
1
2020-05-05T07:20:25.000Z
2020-05-05T07:20:25.000Z
val_resnet.py
khakhlyuk/fixedconv
bf3848c3fd60af2e617f2118064ee6f551b45d95
[ "Apache-1.1" ]
null
null
null
val_resnet.py
khakhlyuk/fixedconv
bf3848c3fd60af2e617f2118064ee6f551b45d95
[ "Apache-1.1" ]
null
null
null
from subprocess import run # python -u val_resnet.py cuda = 0 # which gpu to use dataset = 'cifar10' logs_path = 'logs_resnet' + '_' + dataset manualSeed = 99 workers = 0 for model in ['resnet20', 'preact_resnet20']: commands = [ 'python', '-u', 'validate_resnet.py', '--dataset=' + dataset, '--model=' + model, '-c=' + str(cuda), '--workers=' + str(workers), '--manualSeed=' + str(manualSeed), '--logs_path=' + logs_path, ] run(commands) for model in ['resnet20', 'preact_resnet20']: f = True for k in [1, 3]: for ff in [False, True]: commands = [ 'python', '-u', 'validate_resnet.py', '--dataset=' + dataset, '--model=' + model, '-k=' + str(k), '-c=' + str(cuda), '--workers=' + str(workers), '--manualSeed=' + str(manualSeed), '--logs_path=' + logs_path, ] if f: commands.append('-f') if ff: commands.append('--ff') run(commands)
27.75
53
0.473874
0
0
0
0
0
0
0
0
337
0.303604
f7e3584c6b4d27959b077f55eb4556611369a6be
466
py
Python
temboo/core/Library/KhanAcademy/Badges/__init__.py
jordanemedlock/psychtruths
52e09033ade9608bd5143129f8a1bfac22d634dd
[ "Apache-2.0" ]
7
2016-03-07T02:07:21.000Z
2022-01-21T02:22:41.000Z
temboo/core/Library/KhanAcademy/Badges/__init__.py
jordanemedlock/psychtruths
52e09033ade9608bd5143129f8a1bfac22d634dd
[ "Apache-2.0" ]
null
null
null
temboo/core/Library/KhanAcademy/Badges/__init__.py
jordanemedlock/psychtruths
52e09033ade9608bd5143129f8a1bfac22d634dd
[ "Apache-2.0" ]
8
2016-06-14T06:01:11.000Z
2020-04-22T09:21:44.000Z
from temboo.Library.KhanAcademy.Badges.AllCategories import AllCategories, AllCategoriesInputSet, AllCategoriesResultSet, AllCategoriesChoreographyExecution from temboo.Library.KhanAcademy.Badges.BadgesByCategory import BadgesByCategory, BadgesByCategoryInputSet, BadgesByCategoryResultSet, BadgesByCategoryChoreographyExecution from temboo.Library.KhanAcademy.Badges.GetBadges import GetBadges, GetBadgesInputSet, GetBadgesResultSet, GetBadgesChoreographyExecution
116.5
171
0.909871
0
0
0
0
0
0
0
0
0
0
f7e5ec76a74f735b8085dae26118d20f0eea400d
453
py
Python
akagi/data_sources/spreadsheet_data_source.py
pauchan/akagi
7cf1f5a52b8f1ebfdc74a527bf6b26254f99343b
[ "MIT" ]
26
2017-05-18T11:52:04.000Z
2018-08-25T22:03:07.000Z
akagi/data_sources/spreadsheet_data_source.py
pauchan/akagi
7cf1f5a52b8f1ebfdc74a527bf6b26254f99343b
[ "MIT" ]
325
2017-05-08T07:22:28.000Z
2022-03-31T15:43:18.000Z
akagi/data_sources/spreadsheet_data_source.py
pauchan/akagi
7cf1f5a52b8f1ebfdc74a527bf6b26254f99343b
[ "MIT" ]
7
2017-05-02T02:06:15.000Z
2020-04-09T05:32:11.000Z
from akagi.data_source import DataSource from akagi.data_file import DataFile class SpreadsheetDataSource(DataSource): '''SpreadsheetSource replesents a data on Google Spreadsheets ''' def __init__(self, sheet_id, sheet_range='A:Z', no_cache=False): self._sheet_id = sheet_id self._sheet_range = sheet_range @property def data_files(self): return [DataFile.spreadsheet(self._sheet_id, self._sheet_range)]
28.3125
72
0.732892
372
0.821192
0
0
108
0.238411
0
0
74
0.163355
f7e673c1a03cc4b207464e8a0e2d7bce749cb8ba
7,401
py
Python
stanCode_projects/my_drawing/my_drawing.py
kenhuang1204/stanCode_projects
f697a34a1c54a864c1140cb0f2f76e2d70b45698
[ "MIT" ]
null
null
null
stanCode_projects/my_drawing/my_drawing.py
kenhuang1204/stanCode_projects
f697a34a1c54a864c1140cb0f2f76e2d70b45698
[ "MIT" ]
null
null
null
stanCode_projects/my_drawing/my_drawing.py
kenhuang1204/stanCode_projects
f697a34a1c54a864c1140cb0f2f76e2d70b45698
[ "MIT" ]
null
null
null
""" File: my_drawing.py Name: 黃科諺 ---------------------- TODO: """ from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc from campy.graphics.gwindow import GWindow def main(): """ Meet Snorlax (卡比獸) of stanCode! He dreams of Python when he sleeps. Be like Snorlax. """ window = GWindow(width=300, height=300) face_outer = GOval(120, 75, x=(window.width-120)/2, y=50) face_outer.filled = True face_outer.fill_color = 'darkcyan' face_outer.color = 'darkcyan' window.add(face_outer) face_inner = GOval(100, 65, x=(window.width-100)/2, y=60) face_inner.filled = True face_inner.fill_color = 'lightsalmon' face_inner.color = 'lightsalmon' window.add(face_inner) forehead = GPolygon() forehead.add_vertex((135, 60)) forehead.add_vertex((165, 60)) forehead.add_vertex((150, 68)) forehead.filled = True forehead.fill_color = 'darkcyan' forehead.color = 'darkcyan' window.add(forehead) r_ear = GPolygon() r_ear.add_vertex((113, 35)) r_ear.add_vertex((95, 75)) r_ear.add_vertex((140, 50)) r_ear.filled = True r_ear.fill_color = 'darkcyan' r_ear.color = 'darkcyan' window.add(r_ear) l_ear = GPolygon() l_ear.add_vertex((187, 35)) l_ear.add_vertex((205, 75)) l_ear.add_vertex((160, 50)) l_ear.filled = True l_ear.fill_color = 'darkcyan' l_ear.color = 'darkcyan' window.add(l_ear) r_eye = GLine (120, 75, 140, 75) window.add(r_eye) l_eye = GLine(180, 75, 160, 75) window.add(l_eye) mouth = GLine(135, 85, 165, 85) window.add(mouth) r_tooth = GPolygon() r_tooth.add_vertex((135, 84)) r_tooth.add_vertex((139, 84)) r_tooth.add_vertex((137, 80)) r_tooth.filled = True r_tooth.fill_color = 'white' r_tooth.color = 'white' window.add(r_tooth) l_tooth = GPolygon() l_tooth.add_vertex((165, 84)) l_tooth.add_vertex((161, 84)) l_tooth.add_vertex((163, 80)) l_tooth.filled = True l_tooth.fill_color = 'white' l_tooth.color = 'white' window.add(l_tooth) r_arm = GOval(100, 45, x=25, y=98) r_arm.filled = True r_arm.fill_color = 'darkcyan' r_arm.color = 'darkcyan' window.add(r_arm) l_arm = GOval(100, 45, x=175, y=98) l_arm.filled = True l_arm.fill_color = 'darkcyan' l_arm.color = 'darkcyan' window.add(l_arm) body = GOval(200, 160, x=(window.width - 200) / 2, y=95) body.filled = True body.fill_color = 'darkcyan' body.color = 'darkcyan' window.add(body) belly = GOval(176, 120, x=(window.width - 176) / 2, y=95) belly.filled = True belly.fill_color = 'lightsalmon' window.add(belly) r_claw1 = GPolygon() r_claw1.add_vertex((38, 100)) r_claw1.add_vertex((44, 102)) r_claw1.add_vertex((40, 106)) r_claw1.filled = True r_claw1.fill_color = 'white' window.add(r_claw1) r_claw2 = GPolygon() r_claw2.add_vertex((32, 102)) r_claw2.add_vertex((38, 104)) r_claw2.add_vertex((35, 108)) r_claw2.filled = True r_claw2.fill_color = 'white' window.add(r_claw2) r_claw3 = GPolygon() r_claw3.add_vertex((28, 104)) r_claw3.add_vertex((34, 106)) r_claw3.add_vertex((31, 110)) r_claw3.filled = True r_claw3.fill_color = 'white' window.add(r_claw3) r_claw4 = GPolygon() r_claw4.add_vertex((24, 109)) r_claw4.add_vertex((30, 111)) r_claw4.add_vertex((27, 115)) r_claw4.filled = True r_claw4.fill_color = 'white' window.add(r_claw4) r_claw5 = GPolygon() r_claw5.add_vertex((19, 122)) r_claw5.add_vertex((25, 121)) r_claw5.add_vertex((28, 127)) r_claw5.filled = True r_claw5.fill_color = 'white' window.add(r_claw5) l_claw1 = GPolygon() l_claw1.add_vertex((262, 100)) l_claw1.add_vertex((256, 102)) l_claw1.add_vertex((260, 106)) l_claw1.filled = True l_claw1.fill_color = 'white' window.add(l_claw1) l_claw2 = GPolygon() l_claw2.add_vertex((268, 102)) l_claw2.add_vertex((262, 104)) l_claw2.add_vertex((265, 108)) l_claw2.filled = True l_claw2.fill_color = 'white' window.add(l_claw2) l_claw3 = GPolygon() l_claw3.add_vertex((272, 104)) l_claw3.add_vertex((266, 106)) l_claw3.add_vertex((269, 110)) l_claw3.filled = True l_claw3.fill_color = 'white' window.add(l_claw3) r_claw4 = GPolygon() r_claw4.add_vertex((276, 109)) r_claw4.add_vertex((270, 111)) r_claw4.add_vertex((273, 115)) r_claw4.filled = True r_claw4.fill_color = 'white' window.add(r_claw4) r_claw5 = GPolygon() r_claw5.add_vertex((281, 122)) r_claw5.add_vertex((275, 121)) r_claw5.add_vertex((272, 127)) r_claw5.filled = True r_claw5.fill_color = 'white' window.add(r_claw5) r_foot = GOval(65, 60, x=50, y=220) r_foot.filled = True r_foot.fill_color = 'lightsalmon' r_foot.color = 'lightsalmon' window.add(r_foot) r_palm = GOval(45, 40, x=65, y=235) r_palm.filled = True r_palm.fill_color = 'Chocolate' r_palm.color = 'Chocolate' window.add(r_palm) r_nail1 = GPolygon() r_nail1.add_vertex((80, 210)) r_nail1.add_vertex((88, 223)) r_nail1.add_vertex((78, 224)) r_nail1.filled = True r_nail1.fill_color = 'white' window.add(r_nail1) r_nail2 = GPolygon() r_nail2.add_vertex((52, 220)) r_nail2.add_vertex((65, 228)) r_nail2.add_vertex((57, 235)) r_nail2.filled = True r_nail2.fill_color = 'white' window.add(r_nail2) r_nail3 = GPolygon() r_nail3.add_vertex((43, 250)) r_nail3.add_vertex((54, 248)) r_nail3.add_vertex((52, 258)) r_nail3.filled = True r_nail3.fill_color = 'white' window.add(r_nail3) l_foot = GOval(65, 60, x=185, y=220) l_foot.filled = True l_foot.fill_color = 'lightsalmon' l_foot.color = 'lightsalmon' window.add(l_foot) l_palm = GOval(45, 40, x=190, y=235) l_palm.filled = True l_palm.fill_color = 'Chocolate' l_palm.color = 'Chocolate' window.add(l_palm) l_nail1 = GPolygon() l_nail1.add_vertex((220, 210)) l_nail1.add_vertex((212, 223)) l_nail1.add_vertex((222, 224)) l_nail1.filled = True l_nail1.fill_color = 'white' window.add(l_nail1) r_nail2 = GPolygon() r_nail2.add_vertex((248, 220)) r_nail2.add_vertex((235, 228)) r_nail2.add_vertex((243, 235)) r_nail2.filled = True r_nail2.fill_color = 'white' window.add(r_nail2) r_nail3 = GPolygon() r_nail3.add_vertex((257, 250)) r_nail3.add_vertex((246, 248)) r_nail3.add_vertex((248, 258)) r_nail3.filled = True r_nail3.fill_color = 'white' window.add(r_nail3) word = GLabel('stanCode', x=123, y=185) word.font = '-8-bold' window.add(word) bubble1 = GOval(10, 10, x=140, y=35) window.add(bubble1) bubble2 = GOval(15, 15, x=155, y=23) window.add(bubble2) bubble3 = GOval(20, 20, x=175, y=12) window.add(bubble3) bubble4 = GOval(95, 85, x=200, y=5) window.add(bubble4) word2 = GLabel('Python', x=207, y=50) word2.font = 'Courier-18' window.add(word2) word3 = GLabel('Python', x=220, y=80) word3.font = 'Courier-13' window.add(word3) word4 = GLabel('Python', x=242, y=60) word4.font = 'Courier-8' window.add(word4) if __name__ == '__main__': main()
28.910156
88
0.638427
0
0
0
0
0
0
0
0
681
0.091866
f7e7736eb2b76396a07e8f09a10926efaa231ede
748
py
Python
kivy/core/clipboard/clipboard_xsel.py
CharaD7/kivy
85065fe6633f5ac831c193dc84e3f636b789cc3a
[ "MIT" ]
2
2021-05-16T09:46:14.000Z
2021-11-17T11:23:15.000Z
kivy/core/clipboard/clipboard_xsel.py
CharaD7/kivy
85065fe6633f5ac831c193dc84e3f636b789cc3a
[ "MIT" ]
1
2016-11-11T13:45:42.000Z
2016-11-11T13:45:42.000Z
kivy/core/clipboard/clipboard_xsel.py
CharaD7/kivy
85065fe6633f5ac831c193dc84e3f636b789cc3a
[ "MIT" ]
2
2017-03-09T14:27:03.000Z
2019-05-03T08:36:02.000Z
''' Clipboard xsel: an implementation of the Clipboard using xsel command line tool. ''' __all__ = ('ClipboardXsel', ) from kivy.utils import platform from kivy.core.clipboard._clipboard_ext import ClipboardExternalBase if platform != 'linux': raise SystemError('unsupported platform for xsel clipboard') try: import subprocess p = subprocess.Popen(['xsel'], stdout=subprocess.PIPE) p.communicate() except: raise class ClipboardXsel(ClipboardExternalBase): @staticmethod def _clip(inout, selection): pipe = {'std' + inout: subprocess.PIPE} sel = 'b' if selection == 'clipboard' else selection[0] io = inout[0] return subprocess.Popen( ['xsel', '-' + sel + io], **pipe)
24.933333
80
0.67246
307
0.410428
0
0
259
0.346257
0
0
185
0.247326
f7ea40e807af6204059adeba1056db95e63b5bcf
492
py
Python
plugins/hashsum_download/girder_hashsum_download/settings.py
JKitok/girder
317962d155fc9811d25e5f33bd3e849c4ac96645
[ "Apache-2.0" ]
395
2015-01-12T19:20:13.000Z
2022-03-30T05:40:40.000Z
plugins/hashsum_download/girder_hashsum_download/settings.py
JKitok/girder
317962d155fc9811d25e5f33bd3e849c4ac96645
[ "Apache-2.0" ]
2,388
2015-01-01T20:09:19.000Z
2022-03-29T16:49:14.000Z
plugins/hashsum_download/girder_hashsum_download/settings.py
JKitok/girder
317962d155fc9811d25e5f33bd3e849c4ac96645
[ "Apache-2.0" ]
177
2015-01-04T14:47:00.000Z
2022-03-25T09:01:51.000Z
from girder.exceptions import ValidationException from girder.utility import setting_utilities class PluginSettings: AUTO_COMPUTE = 'hashsum_download.auto_compute' @setting_utilities.default(PluginSettings.AUTO_COMPUTE) def _defaultAutoCompute(): return False @setting_utilities.validator(PluginSettings.AUTO_COMPUTE) def _validateAutoCompute(doc): if not isinstance(doc['value'], bool): raise ValidationException('Auto-compute hash setting must be true or false.')
27.333333
85
0.802846
72
0.146341
0
0
316
0.642276
0
0
88
0.178862
f7ea6e1ab40e2fa5eea55fc79f11b658b6c35f7e
44,837
py
Python
forager_server/forager_server_api/views.py
jeremyephron/forager
6db1590686e0e34b2e42ff5deb70f62fcee73d7d
[ "MIT" ]
1
2020-12-01T23:25:58.000Z
2020-12-01T23:25:58.000Z
forager_server/forager_server_api/views.py
jeremyephron/forager
6db1590686e0e34b2e42ff5deb70f62fcee73d7d
[ "MIT" ]
2
2020-10-07T01:03:06.000Z
2020-10-12T19:08:55.000Z
forager_server/forager_server_api/views.py
jeremyephron/forager
6db1590686e0e34b2e42ff5deb70f62fcee73d7d
[ "MIT" ]
null
null
null
from collections import defaultdict, namedtuple from dataclasses import dataclass import distutils.util import functools import itertools import json import math import operator import os import random import uuid import shutil import logging import time from typing import List, Dict, NamedTuple, Optional from django.db.models import Q from django.http import JsonResponse from django.views.decorators.csrf import csrf_exempt from django.shortcuts import get_object_or_404, get_list_or_404 from django.conf import settings from google.cloud import storage from rest_framework.decorators import api_view import requests from expiringdict import ExpiringDict from .models import ( Dataset, DatasetItem, Category, Mode, User, Annotation, DNNModel, CategoryCount, ) BUILTIN_MODES = ["POSITIVE", "NEGATIVE", "HARD_NEGATIVE", "UNSURE"] logger = logging.getLogger(__name__) @api_view(["POST"]) @csrf_exempt def start_cluster(request): # TODO(mihirg): Remove this setting from Django; it's now managed by Terraform # (or figure out how to set it from the frontend if we need that) r = requests.post( settings.EMBEDDING_SERVER_ADDRESS + "/start_cluster", ) response_data = r.json() return JsonResponse( { "status": "success", "cluster_id": response_data["cluster_id"], } ) @api_view(["GET"]) @csrf_exempt def get_cluster_status(request, cluster_id): params = {"cluster_id": cluster_id} r = requests.get( settings.EMBEDDING_SERVER_ADDRESS + "/cluster_status", params=params ) response_data = r.json() return JsonResponse(response_data) @api_view(["POST"]) @csrf_exempt def stop_cluster(request, cluster_id): params = {"cluster_id": cluster_id} requests.post( settings.EMBEDDING_SERVER_ADDRESS + "/stop_cluster", json=params, ) return JsonResponse( { "status": "success", } ) @api_view(["POST"]) @csrf_exempt def create_model(request, dataset_name, dataset=None): payload = json.loads(request.body) model_name = payload["model_name"] cluster_id = payload["cluster_id"] bucket_name = payload["bucket"] index_id = payload["index_id"] pos_tags = parse_tag_set_from_query_v2(payload["pos_tags"]) neg_tags = parse_tag_set_from_query_v2(payload["neg_tags"]) val_pos_tags = parse_tag_set_from_query_v2(payload["val_pos_tags"]) val_neg_tags = parse_tag_set_from_query_v2(payload["val_neg_tags"]) augment_negs = bool(payload["augment_negs"]) model_kwargs = payload["model_kwargs"] resume_model_id = payload.get("resume", None) dataset = get_object_or_404(Dataset, name=dataset_name) eligible_images = DatasetItem.objects.filter(dataset=dataset, is_val=False) categories = Category.objects.filter( tag_sets_to_query(pos_tags, neg_tags, val_pos_tags, val_neg_tags) ) annotations = Annotation.objects.filter( dataset_item__in=eligible_images, category__in=categories, ) tags_by_pk = get_tags_from_annotations_v2(annotations) pos_dataset_item_pks = [] neg_dataset_item_pks = [] val_pos_dataset_item_pks = [] val_neg_dataset_item_pks = [] for pk, tags in tags_by_pk.items(): if any(t in pos_tags for t in tags): pos_dataset_item_pks.append(pk) elif any(t in neg_tags for t in tags): neg_dataset_item_pks.append(pk) elif any(t in val_pos_tags for t in tags): val_pos_dataset_item_pks.append(pk) elif any(t in val_neg_tags for t in tags): val_neg_dataset_item_pks.append(pk) # Augment with randomly sampled negatives if requested num_extra_negs = settings.BGSPLIT_NUM_NEGS_MULTIPLIER * len( pos_dataset_item_pks ) - len(neg_dataset_item_pks) if augment_negs and num_extra_negs > 0: # Uses "include" and "exclude" category sets from request all_eligible_pks = filtered_images_v2( request, dataset, exclude_pks=( pos_dataset_item_pks + neg_dataset_item_pks + val_pos_dataset_item_pks + val_neg_dataset_item_pks ), ) sampled_pks = random.sample( all_eligible_pks, min(len(all_eligible_pks), num_extra_negs) ) neg_dataset_item_pks.extend(sampled_pks) pos_dataset_item_internal_identifiers = list( DatasetItem.objects.filter(pk__in=pos_dataset_item_pks).values_list( "identifier", flat=True ) ) neg_dataset_item_internal_identifiers = list( DatasetItem.objects.filter(pk__in=neg_dataset_item_pks).values_list( "identifier", flat=True ) ) val_pos_dataset_item_internal_identifiers = list( DatasetItem.objects.filter(pk__in=val_pos_dataset_item_pks).values_list( "identifier", flat=True ) ) val_neg_dataset_item_internal_identifiers = list( DatasetItem.objects.filter(pk__in=val_neg_dataset_item_pks).values_list( "identifier", flat=True ) ) if resume_model_id: resume_model = get_object_or_404(DNNModel, model_id=resume_model_id) resume_model_path = resume_model.checkpoint_path else: resume_model = None resume_model_path = None params = { "pos_identifiers": pos_dataset_item_internal_identifiers, "neg_identifiers": neg_dataset_item_internal_identifiers, "val_pos_identifiers": val_pos_dataset_item_internal_identifiers, "val_neg_identifiers": val_neg_dataset_item_internal_identifiers, "augment_negs": augment_negs, "model_kwargs": model_kwargs, "model_name": model_name, "bucket": bucket_name, "cluster_id": cluster_id, "index_id": index_id, "resume_from": resume_model_path, } r = requests.post( settings.EMBEDDING_SERVER_ADDRESS + "/start_bgsplit_job", json=params, ) response_data = r.json() if r.status_code != 200: return JsonResponse( {"status": "failure", "reason": response_data.get("reason", "")}, status=r.status_code, ) m = DNNModel( dataset=dataset, name=model_name, model_id=response_data["model_id"], category_spec={ "augment_negs": augment_negs, "pos_tags": payload["pos_tags"], "neg_tags": payload["neg_tags"], "augment_negs_include": payload.get("include", []) if augment_negs else [], "augment_negs_exclude": payload.get("exclude", []) if augment_negs else [], }, ) model_epoch = -1 + model_kwargs.get("epochs_to_run", 1) if resume_model_id: m.resume_model_id = resume_model_id if model_kwargs.get("resume_training", False): model_epoch += resume_model.epoch + 1 m.epoch = model_epoch m.save() return JsonResponse( { "status": "success", "model_id": response_data["model_id"], } ) @api_view(["GET"]) @csrf_exempt def get_model_status(request, model_id): params = {"model_id": model_id} r = requests.get( settings.EMBEDDING_SERVER_ADDRESS + "/bgsplit_job_status", params=params ) response_data = r.json() if response_data["has_model"]: # Index has been successfully created & uploaded -> persist m = get_object_or_404(DNNModel, model_id=model_id) m.checkpoint_path = response_data["checkpoint_path"] m.save() return JsonResponse(response_data) @api_view(["POST"]) @csrf_exempt def update_model_v2(request): payload = json.loads(request.body) # user = payload["user"] old_model_name = payload["old_model_name"] new_model_name = payload["new_model_name"] models = get_list_or_404(DNNModel, name=old_model_name) for m in models: m.name = new_model_name m.save() return JsonResponse({"success": True}) @api_view(["POST"]) @csrf_exempt def delete_model_v2(request): payload = json.loads(request.body) model_name = payload["model_name"] # cluster_id = payload['cluster_id'] models = get_list_or_404(DNNModel, name=model_name) for m in models: # TODO(fpoms): delete model data stored on NFS? # shutil.rmtree(os.path.join(m.checkpoint_path, '..')) shutil.rmtree(m.output_directory, ignore_errors=True) m.delete() return JsonResponse({"success": True}) @api_view(["POST"]) @csrf_exempt def run_model_inference(request, dataset_name, dataset=None): payload = json.loads(request.body) model_id = payload["model_id"] cluster_id = payload["cluster_id"] bucket_name = payload["bucket"] index_id = payload["index_id"] dataset = get_object_or_404(Dataset, name=dataset_name) model_checkpoint_path = get_object_or_404( DNNModel, model_id=model_id ).checkpoint_path if model_checkpoint_path is None or len(model_checkpoint_path) == 0: return JsonResponse( { "status": "failure", "reason": f"Model {model_id} does not have a model checkpoint.", }, status=400, ) params = { "bucket": bucket_name, "model_id": model_id, "checkpoint_path": model_checkpoint_path, "cluster_id": cluster_id, "index_id": index_id, } r = requests.post( settings.EMBEDDING_SERVER_ADDRESS + "/start_bgsplit_inference_job", json=params, ) response_data = r.json() return JsonResponse( { "status": "success", "job_id": response_data["job_id"], } ) @api_view(["GET"]) @csrf_exempt def get_model_inference_status(request, job_id): params = {"job_id": job_id} r = requests.get( settings.EMBEDDING_SERVER_ADDRESS + "/bgsplit_inference_job_status", params=params, ) response_data = r.json() if response_data["has_output"]: model_id = response_data["model_id"] # Index has been successfully created & uploaded -> persist m = get_object_or_404(DNNModel, model_id=model_id) m.output_directory = response_data["output_dir"] m.save() return JsonResponse(response_data) @api_view(["POST"]) @csrf_exempt def stop_model_inference(request, job_id): params = {"job_id": job_id} r = requests.post( settings.EMBEDDING_SERVER_ADDRESS + "/stop_bgsplit_inference_job", json=params ) response_data = r.json() return JsonResponse(response_data, status=r.status_code) # # V2 ENDPOINTS # TODO(mihirg): Make these faster # Tag = namedtuple("Tag", "category value") # type: NamedTuple[str, str] Box = namedtuple( "Box", "category value x1 y1 x2 y2" ) # type: NamedTuple[str, str, float, float, float, float] PkType = int @dataclass class ResultSet: type: str ranking: List[PkType] distances: List[float] model: Optional[str] # TODO(fpoms): this needs to be wrapped in a lock so that # updates are atomic across concurrent requests current_result_sets = ExpiringDict( max_age_seconds=30 * 60, max_len=50, ) # type: Dict[str, ResultSet] def parse_tag_set_from_query_v2(s): if isinstance(s, list): parts = s elif isinstance(s, str) and s: parts = s.split(",") else: parts = [] ts = set() for part in parts: if not part: continue category, value_str = part.split(":") ts.add(Tag(category, value_str)) return ts def tag_sets_to_query(*tagsets): merged = set().union(*tagsets) if not merged: return Q() return Q( annotation__in=Annotation.objects.filter( functools.reduce( operator.or_, [Q(category__name=t.category, mode__name=t.value) for t in merged], ) ) ) def serialize_tag_set_for_client_v2(ts): return [{"category": t.category, "value": t.value} for t in sorted(list(ts))] def serialize_boxes_for_client_v2(bs): return [ { "category": b.category, "value": b.value, "x1": b.x1, "y1": b.y1, "x2": b.x2, "y2": b.y2, } for b in sorted(list(bs)) ] def get_tags_from_annotations_v2(annotations): tags_by_pk = defaultdict(list) annotations = annotations.filter(is_box=False) ann_dicts = annotations.values("dataset_item__pk", "category__name", "mode__name") for ann in ann_dicts: pk = ann["dataset_item__pk"] category = ann["category__name"] mode = ann["mode__name"] tags_by_pk[pk].append(Tag(category, mode)) return tags_by_pk def get_boxes_from_annotations_v2(annotations): boxes_by_pk = defaultdict(list) annotations = annotations.filter(is_box=True) ann_dicts = annotations.values( "dataset_item__pk", "category__name", "mode__name", "bbox_x1", "bbox_y1", "bbox_x2", "bbox_y2", ) for ann in ann_dicts: pk = ann["dataset_item__pk"] category = ann["category__name"] mode = ann["mode__name"] box = (ann["bbox_x1"], ann["bbox_y1"], ann["bbox_x2"], ann["bbox_y2"]) boxes_by_pk[pk].append(Box(category, mode, *box)) return boxes_by_pk def filtered_images_v2(request, dataset, exclude_pks=None) -> List[PkType]: filt_start = time.time() if request.method == "POST": payload = json.loads(request.body) include_tags = parse_tag_set_from_query_v2(payload.get("include")) exclude_tags = parse_tag_set_from_query_v2(payload.get("exclude")) pks = [i for i in payload.get("subset", []) if i] split = payload.get("split", "train") offset_to_return = int(payload.get("offset", 0)) num_to_return = int(payload.get("num", -1)) else: include_tags = parse_tag_set_from_query_v2(request.GET.get("include")) exclude_tags = parse_tag_set_from_query_v2(request.GET.get("exclude")) pks = [i for i in request.GET.get("subset", "").split(",") if i] split = request.GET.get("split", "train") offset_to_return = int(request.GET.get("offset", 0)) num_to_return = int(request.GET.get("num", -1)) end_to_return = None if num_to_return == -1 else offset_to_return + num_to_return dataset_items = None is_val = split == "val" db_start = time.time() # Get pks for dataset items of interest if pks and exclude_pks: # Get specific pks - excluded pks if requested exclude_pks = set(exclude_pks) pks = [pk for pk in pks if pk not in exclude_pks] elif not pks: # Otherwise get all dataset items - exclude pks dataset_items = DatasetItem.objects.filter(dataset=dataset, is_val=is_val) if exclude_pks: dataset_items = dataset_items.exclude(pk__in=exclude_pks) pks = dataset_items.values_list("pk", flat=True) db_end = time.time() result = None db_tag_start = time.time() if not include_tags and not exclude_tags: # If no tags specified, just return retrieved pks result = pks else: # Otherwise, filter using include and exclude tags if dataset_items is None: dataset_items = DatasetItem.objects.filter(pk__in=pks) if include_tags: dataset_items = dataset_items.filter(tag_sets_to_query(include_tags)) if exclude_tags: dataset_items = dataset_items.exclude(tag_sets_to_query(exclude_tags)) result = dataset_items.values_list("pk", flat=True) db_tag_end = time.time() result = list(result[offset_to_return:end_to_return]) filt_end = time.time() print( f"filtered_images_v2: tot: {filt_end-filt_start}, " f"db ({len(result)} items): {db_end-db_start}, db tag: {db_tag_end-db_tag_start}" ) return result def process_image_query_results_v2(request, dataset, query_response): filtered_pks = filtered_images_v2(request, dataset) # TODO(mihirg): Eliminate this database call by directly returning pks from backend dataset_items = DatasetItem.objects.filter(pk__in=filtered_pks) dataset_items_by_path = {di.path: di for di in dataset_items} distances = [] ordered_pks = [] for r in query_response["results"]: if r["label"] in dataset_items_by_path: ordered_pks.append(dataset_items_by_path[r["label"]].pk) distances.append(r["dist"]) return dict( pks=ordered_pks, distances=distances, ) def create_result_set_v2(results, type, model=None): pks = results["pks"] distances = results["distances"] result_set_id = str(uuid.uuid4()) current_result_sets[result_set_id] = ResultSet( type=type, ranking=pks, distances=distances, model=model ) return { "id": result_set_id, "num_results": len(pks), "type": type, } @api_view(["GET"]) @csrf_exempt def get_results_v2(request, dataset_name): dataset = get_object_or_404(Dataset, name=dataset_name) index_id = request.GET["index_id"] result_set_id = request.GET["result_set_id"] offset_to_return = int(request.GET.get("offset", 0)) num_to_return = int(request.GET.get("num", 500)) clustering_model = request.GET.get("clustering_model", None) result_set = current_result_sets[result_set_id] pks = result_set.ranking[offset_to_return : offset_to_return + num_to_return] distances = result_set.distances[ offset_to_return : offset_to_return + num_to_return ] dataset_items_by_pk = DatasetItem.objects.in_bulk(pks) dataset_items = [dataset_items_by_pk[pk] for pk in pks] # preserve order bucket_name = dataset.train_directory[len("gs://") :].split("/")[0] path_template = "https://storage.googleapis.com/{:s}/".format(bucket_name) + "{:s}" internal_identifiers = [di.identifier for di in dataset_items] params = { "index_id": index_id, "identifiers": internal_identifiers, } if clustering_model: params["model"] = clustering_model r = requests.post( settings.EMBEDDING_SERVER_ADDRESS + "/perform_clustering", json=params, ) clustering_data = r.json() dataset_item_paths = [ (di.path if di.path.find("http") != -1 else path_template.format(di.path)) for di in dataset_items ] dataset_item_identifiers = [di.pk for di in dataset_items] return JsonResponse( { "paths": dataset_item_paths, "identifiers": dataset_item_identifiers, "distances": distances, "clustering": clustering_data["clustering"], } ) @api_view(["POST"]) @csrf_exempt def keep_alive_v2(request): requests.post( settings.EMBEDDING_SERVER_ADDRESS + "/keep_alive", ) return JsonResponse({"status": "success"}) @api_view(["POST"]) @csrf_exempt def generate_embedding_v2(request): payload = json.loads(request.body) image_id = payload.get("image_id") if image_id: payload["identifier"] = DatasetItem.objects.get(pk=image_id).identifier r = requests.post( settings.EMBEDDING_SERVER_ADDRESS + "/generate_embedding", json=payload, ) return JsonResponse(r.json()) @api_view(["POST"]) @csrf_exempt def generate_text_embedding_v2(request): payload = json.loads(request.body) r = requests.post( settings.EMBEDDING_SERVER_ADDRESS + "/generate_text_embedding", json=payload, ) return JsonResponse(r.json()) @api_view(["POST"]) @csrf_exempt def query_knn_v2(request, dataset_name): payload = json.loads(request.body) index_id = payload["index_id"] embeddings = payload["embeddings"] use_full_image = bool(payload.get("use_full_image", True)) use_dot_product = bool(payload.get("use_dot_product", False)) model = payload.get("model", "imagenet") dataset = get_object_or_404(Dataset, name=dataset_name) query_knn_start = time.time() params = { "index_id": index_id, "embeddings": embeddings, "use_full_image": use_full_image, "use_dot_product": use_dot_product, "model": model, } r = requests.post( settings.EMBEDDING_SERVER_ADDRESS + "/query_knn_v2", json=params, ) response_data = r.json() query_knn_end = time.time() logger.debug("query_knn_v2 time: {:f}".format(query_knn_end - query_knn_start)) results = process_image_query_results_v2( request, dataset, response_data, ) return JsonResponse(create_result_set_v2(results, "knn", model=model)) @api_view(["GET"]) @csrf_exempt def train_svm_v2(request, dataset_name): index_id = request.GET["index_id"] model = request.GET.get("model", "imagenet") pos_tags = parse_tag_set_from_query_v2(request.GET["pos_tags"]) neg_tags = parse_tag_set_from_query_v2(request.GET.get("neg_tags")) augment_negs = bool( distutils.util.strtobool(request.GET.get("augment_negs", "false")) ) dataset = get_object_or_404(Dataset, name=dataset_name) pos_dataset_items = DatasetItem.objects.filter( tag_sets_to_query(pos_tags), dataset=dataset, is_val=False, ) pos_dataset_item_pks = list(pos_dataset_items.values_list("pk", flat=True)) if neg_tags: neg_dataset_items = DatasetItem.objects.filter( tag_sets_to_query(neg_tags), dataset=dataset, is_val=False, ).difference(pos_dataset_items) neg_dataset_item_pks = list(neg_dataset_items.values_list("pk", flat=True)) else: neg_dataset_item_pks = [] # Augment with randomly sampled negatives if requested num_extra_negs = settings.SVM_NUM_NEGS_MULTIPLIER * len(pos_dataset_item_pks) - len( neg_dataset_item_pks ) if augment_negs and num_extra_negs > 0: # Uses "include" and "exclude" category sets from GET request all_eligible_pks = filtered_images_v2( request, dataset, exclude_pks=pos_dataset_item_pks + neg_dataset_item_pks ) sampled_pks = random.sample( all_eligible_pks, min(len(all_eligible_pks), num_extra_negs) ) neg_dataset_item_pks.extend(sampled_pks) pos_dataset_item_internal_identifiers = list( DatasetItem.objects.filter(pk__in=pos_dataset_item_pks).values_list( "identifier", flat=True ) ) neg_dataset_item_internal_identifiers = list( DatasetItem.objects.filter(pk__in=neg_dataset_item_pks).values_list( "identifier", flat=True ) ) params = { "index_id": index_id, "pos_identifiers": pos_dataset_item_internal_identifiers, "neg_identifiers": neg_dataset_item_internal_identifiers, "model": model, } r = requests.post( settings.EMBEDDING_SERVER_ADDRESS + "/train_svm_v2", json=params, ) return JsonResponse(r.json()) # {"svm_vector": base64-encoded string} @api_view(["POST"]) @csrf_exempt def query_svm_v2(request, dataset_name): payload = json.loads(request.body) index_id = payload["index_id"] svm_vector = payload["svm_vector"] score_min = float(payload.get("score_min", 0.0)) score_max = float(payload.get("score_max", 1.0)) model = payload.get("model", "imagenet") dataset = get_object_or_404(Dataset, name=dataset_name) params = { "index_id": index_id, "svm_vector": svm_vector, "score_min": score_min, "score_max": score_max, "model": model, } r = requests.post( settings.EMBEDDING_SERVER_ADDRESS + "/query_svm_v2", json=params, ) response_data = r.json() # TODO(mihirg, jeremye): Consider some smarter pagination/filtering scheme to avoid # running a separate query over the index every single time the user adjusts score # thresholds results = process_image_query_results_v2( request, dataset, response_data, ) return JsonResponse(create_result_set_v2(results, "svm")) @api_view(["POST"]) @csrf_exempt def query_ranking_v2(request, dataset_name): payload = json.loads(request.body) index_id = payload["index_id"] score_min = float(payload.get("score_min", 0.0)) score_max = float(payload.get("score_max", 1.0)) model = payload["model"] dataset = get_object_or_404(Dataset, name=dataset_name) params = { "index_id": index_id, "score_min": score_min, "score_max": score_max, "model": model, } r = requests.post( settings.EMBEDDING_SERVER_ADDRESS + "/query_ranking_v2", json=params, ) response_data = r.json() # TODO(mihirg, jeremye): Consider some smarter pagination/filtering scheme to avoid # running a separate query over the index every single time the user adjusts score # thresholds results = process_image_query_results_v2( request, dataset, response_data, ) return JsonResponse(create_result_set_v2(results, "ranking", model=model)) @api_view(["POST"]) @csrf_exempt def query_images_v2(request, dataset_name): query_start = time.time() dataset = get_object_or_404(Dataset, name=dataset_name) payload = json.loads(request.body) order = payload.get("order", "id") filter_start = time.time() result_pks = filtered_images_v2(request, dataset) filter_end = time.time() if order == "random": random.shuffle(result_pks) elif order == "id": result_pks.sort() results = {"pks": result_pks, "distances": [-1 for _ in result_pks]} resp = JsonResponse(create_result_set_v2(results, "query")) query_end = time.time() print( f"query_images_v2: tot: {query_end-query_start}, " f"filter: {filter_end-filter_start}" ) return resp # # ACTIVE VALIDATION # VAL_NEGATIVE_TYPE = "model_val_negative" def get_val_examples_v2(dataset, model_id): # Get positive and negative categories model = get_object_or_404(DNNModel, model_id=model_id) pos_tags = parse_tag_set_from_query_v2(model.category_spec["pos_tags"]) neg_tags = parse_tag_set_from_query_v2(model.category_spec["neg_tags"]) augment_negs = model.category_spec.get("augment_negs", False) augment_negs_include = ( parse_tag_set_from_query_v2(model.category_spec.get("augment_negs_include", [])) if augment_negs else set() ) # Limit to validation set eligible_dataset_items = DatasetItem.objects.filter( dataset=dataset, is_val=True, ) # Get positives and negatives matching these categories categories = Category.objects.filter( tag_sets_to_query(pos_tags, neg_tags, augment_negs_include) ) annotations = Annotation.objects.filter( dataset_item__in=eligible_dataset_items, category__in=categories, ) tags_by_pk = get_tags_from_annotations_v2(annotations) pos_dataset_item_pks = [] neg_dataset_item_pks = [] for pk, tags in tags_by_pk.items(): if any(t in pos_tags for t in tags): pos_dataset_item_pks.append(pk) elif any(t in neg_tags or t in augment_negs_include for t in tags): neg_dataset_item_pks.append(pk) # Get extra negatives if augment_negs: annotations = Annotation.objects.filter( dataset_item__in=eligible_dataset_items, label_category=model_id, label_type=VAL_NEGATIVE_TYPE, ) neg_dataset_item_pks.extend(ann.dataset_item.pk for ann in annotations) return pos_dataset_item_pks, neg_dataset_item_pks @api_view(["POST"]) def query_metrics_v2(request, dataset_name): dataset = get_object_or_404(Dataset, name=dataset_name) payload = json.loads(request.body) model_id = payload["model"] index_id = payload["index_id"] internal_identifiers_to_weights = payload["weights"] # type: Dict[str, int] pos_dataset_item_pks, neg_dataset_item_pks = get_val_examples_v2(dataset, model_id) # Construct identifiers, labels, and weights dataset_items_by_pk = DatasetItem.objects.in_bulk( pos_dataset_item_pks + neg_dataset_item_pks ) identifiers = [] labels = [] weights = [] for pk, label in itertools.chain( ((pk, True) for pk in pos_dataset_item_pks), ((pk, False) for pk in neg_dataset_item_pks), ): di = dataset_items_by_pk[pk] identifier = di.identifier weight = internal_identifiers_to_weights.get(identifier) if weight is None: continue identifiers.append(identifier) labels.append(label) weights.append(weight) # TODO(mihirg): Parse false positives and false negatives params = { "index_id": index_id, "model": model_id, "identifiers": identifiers, "labels": labels, "weights": weights, } r = requests.post( settings.EMBEDDING_SERVER_ADDRESS + "/query_metrics", json=params, ) response_data = r.json() return JsonResponse(response_data) @api_view(["POST"]) def query_active_validation_v2(request, dataset_name): dataset = get_object_or_404(Dataset, name=dataset_name) payload = json.loads(request.body) model_id = payload["model"] index_id = payload["index_id"] current_f1 = payload.get("current_f1") if current_f1 is None: current_f1 = 0.5 pos_dataset_item_pks, neg_dataset_item_pks = get_val_examples_v2(dataset, model_id) # Construct paths, identifiers, and labels dataset_items_by_pk = DatasetItem.objects.in_bulk( pos_dataset_item_pks + neg_dataset_item_pks ) identifiers = [] labels = [] for pk, label in itertools.chain( ((pk, True) for pk in pos_dataset_item_pks), ((pk, False) for pk in neg_dataset_item_pks), ): di = dataset_items_by_pk[pk] identifiers.append(di.identifier) labels.append(label) params = { "index_id": index_id, "model": model_id, "identifiers": identifiers, "labels": labels, "current_f1": current_f1, } r = requests.post( settings.EMBEDDING_SERVER_ADDRESS + "/query_active_validation", json=params, ) response_data = r.json() if response_data["identifiers"]: pks_and_paths = list( DatasetItem.objects.filter( dataset=dataset, identifier__in=response_data["identifiers"], is_val=True, ).values_list("pk", "path") ) random.shuffle(pks_and_paths) pks, paths = zip(*pks_and_paths) else: pks, paths = [], [] bucket_name = dataset.val_directory[len("gs://") :].split("/")[0] path_template = "https://storage.googleapis.com/{:s}/".format(bucket_name) + "{:s}" paths = [path_template.format(p) for p in paths] return JsonResponse( { "paths": paths, "identifiers": pks, "weights": response_data["weights"], } ) @api_view(["POST"]) def add_val_annotations_v2(request): payload = json.loads(request.body) annotations = payload["annotations"] user_email = payload["user"] model = payload["model"] anns = [] cat_modes = defaultdict(int) dataset = None for ann_payload in annotations: image_pk = ann_payload["identifier"] is_other_negative = ann_payload.get("is_other_negative", False) mode_str = "NEGATIVE" if is_other_negative else ann_payload["mode"] category_name = ( "active:" + model if is_other_negative else ann_payload["category"] ) user, _ = User.objects.get_or_create(email=user_email) category, _ = Category.objects.get_or_create(name=category_name) mode, _ = Mode.objects.get_or_create(name=mode_str) di = DatasetItem.objects.get(pk=image_pk) dataset = di.dataset assert di.is_val ann = Annotation( dataset_item=di, user=user, category=category, mode=mode, misc_data={"created_by": "active_val"}, ) cat_modes[(category, mode)] += 1 anns.append(ann) Annotation.objects.bulk_create(anns) for (cat, mode), c in cat_modes.items(): category_count, _ = CategoryCount.objects.get_or_create( dataset=dataset, category=cat, mode=mode ) category_count.count += c category_count.save() return JsonResponse({"created": len(anns)}) # DATASET INFO @api_view(["GET"]) @csrf_exempt def get_datasets_v2(request): datasets = Dataset.objects.filter(hidden=False) dataset_names = list(datasets.values_list("name", flat=True)) return JsonResponse({"dataset_names": dataset_names}) @api_view(["GET"]) @csrf_exempt def get_dataset_info_v2(request, dataset_name): dataset = get_object_or_404(Dataset, name=dataset_name) num_train = dataset.datasetitem_set.filter(is_val=False).count() num_val = dataset.datasetitem_set.filter(is_val=True).count() return JsonResponse( { "index_id": dataset.index_id, "num_train": num_train, "num_val": num_val, } ) @api_view(["GET"]) @csrf_exempt def get_models_v2(request, dataset_name): dataset = get_object_or_404(Dataset, name=dataset_name) model_objs = DNNModel.objects.filter( dataset=dataset, checkpoint_path__isnull=False, ).order_by("-last_updated") model_names = set() latest = {} with_output = {} for model in model_objs: model_names.add(model.name) if model.name not in latest: latest[model.name] = model if model.output_directory and model.name not in with_output: with_output[model.name] = model models = [ { "name": model_name, "latest": model_info(latest[model_name]), "with_output": model_info(with_output.get(model_name)), } for model_name in model_names ] return JsonResponse({"models": models}) def model_info(model): if model is None: return None pos_tags = parse_tag_set_from_query_v2(model.category_spec.get("pos_tags", [])) neg_tags = parse_tag_set_from_query_v2(model.category_spec.get("neg_tags", [])) augment_negs_include = parse_tag_set_from_query_v2( model.category_spec.get("augment_negs_include", []) ) return { "model_id": model.model_id, "timestamp": model.last_updated, "has_checkpoint": model.checkpoint_path is not None, "has_output": model.output_directory is not None, "pos_tags": serialize_tag_set_for_client_v2(pos_tags), "neg_tags": serialize_tag_set_for_client_v2(neg_tags | augment_negs_include), "augment_negs": model.category_spec.get("augment_negs", False), "epoch": model.epoch, } @api_view(["POST"]) @csrf_exempt def create_dataset_v2(request): payload = json.loads(request.body) name = payload["dataset"] train_directory = payload["train_path"] val_directory = payload["val_path"] index_id = payload["index_id"] assert all(d.startswith("gs://") for d in (train_directory, val_directory)) # Download index on index server params = {"index_id": index_id} requests.post( settings.EMBEDDING_SERVER_ADDRESS + "/download_index", json=params, ) client = storage.Client() all_blobs = [] for d, is_val in ((train_directory, False), (val_directory, True)): split_dir = d[len("gs://") :].split("/") bucket_name = split_dir[0] bucket_path = "/".join(split_dir[1:]) all_blobs.extend( (blob, is_val) for blob in client.list_blobs(bucket_name, prefix=bucket_path) ) dataset = Dataset( name=name, train_directory=train_directory, val_directory=val_directory, index_id=index_id, ) dataset.save() # Create all the DatasetItems for this dataset items = [ DatasetItem( dataset=dataset, identifier=os.path.splitext(os.path.basename(blob.name))[0], path=blob.name, is_val=is_val, ) for blob, is_val in all_blobs if ( blob.name.endswith(".jpg") or blob.name.endswith(".jpeg") or blob.name.endswith(".png") ) ] DatasetItem.objects.bulk_create(items, batch_size=10000) return JsonResponse({"status": "success"}) @api_view(["POST"]) @csrf_exempt def get_annotations_v2(request): payload = json.loads(request.body) image_pks = [i for i in payload["identifiers"] if i] if not image_pks: return JsonResponse({}) annotations = Annotation.objects.filter( dataset_item__in=DatasetItem.objects.filter(pk__in=image_pks), ) tags_by_pk = get_tags_from_annotations_v2(annotations) boxes_by_pk = get_boxes_from_annotations_v2(annotations) annotations_by_pk = defaultdict(lambda: {"tags": [], "boxes": []}) for pk, tags in tags_by_pk.items(): annotations_by_pk[pk]["tags"] = serialize_tag_set_for_client_v2(tags) for pk, boxes in boxes_by_pk.items(): annotations_by_pk[pk]["boxes"] = serialize_boxes_for_client_v2(boxes) return JsonResponse(annotations_by_pk) @api_view(["POST"]) @csrf_exempt def add_annotations_v2(request): payload = json.loads(request.body) image_pks = payload["identifiers"] images = DatasetItem.objects.filter(pk__in=image_pks) num_created = bulk_add_single_tag_annotations_v2(payload, images) return JsonResponse({"created": num_created}) @api_view(["POST"]) @csrf_exempt def add_annotations_multi_v2(request): payload = json.loads(request.body) num_created = bulk_add_multi_annotations_v2(payload) return JsonResponse({"created": num_created}) @api_view(["POST"]) @csrf_exempt def add_annotations_by_internal_identifiers_v2(request, dataset_name): dataset = get_object_or_404(Dataset, name=dataset_name) payload = json.loads(request.body) image_identifiers = payload["identifiers"] images = DatasetItem.objects.filter( dataset=dataset, identifier__in=image_identifiers ) num_created = bulk_add_single_tag_annotations_v2(payload, images) return JsonResponse({"created": num_created}) @api_view(["POST"]) @csrf_exempt def add_annotations_to_result_set_v2(request): payload = json.loads(request.body) result_set_id = payload["result_set_id"] lower_bound = float(payload["from"]) upper_bound = float(payload["to"]) result_set = current_result_sets[result_set_id] result_ranking = result_set.ranking # e.g., lower_bound=0.0, upper_bound=0.5 -> second half of the result set start_index = math.ceil(len(result_ranking) * (1.0 - upper_bound)) end_index = math.floor(len(result_ranking) * (1.0 - lower_bound)) image_pks = result_ranking[start_index:end_index] images = DatasetItem.objects.filter(pk__in=image_pks) num_created = bulk_add_single_tag_annotations_v2(payload, images) return JsonResponse({"created": num_created}) def bulk_add_single_tag_annotations_v2(payload, images): '''Adds annotations for a single tag to many dataset items''' if not images: return 0 user_email = payload["user"] category_name = payload["category"] mode_name = payload["mode"] created_by = payload.get("created_by", "tag" if len(images) == 1 else "tag-bulk") dataset = None if len(images) > 0: dataset = images[0].dataset user, _ = User.objects.get_or_create(email=user_email) category, _ = Category.objects.get_or_create(name=category_name) mode, _ = Mode.objects.get_or_create(name=mode_name) Annotation.objects.filter( dataset_item__in=images, category=category, is_box=False).delete() # TODO: Add an actual endpoint to delete annotations (probably by pk); don't rely # on this hacky "TOMBSTONE" string annotations = [ Annotation( dataset_item=di, user=user, category=category, mode=mode, is_box=False, misc_data={"created_by": created_by}, ) for di in images ] bulk_add_annotations_v2(dataset, annotations) return len(annotations) def bulk_add_multi_annotations_v2(payload : Dict): '''Adds multiple annotations for the same dataset and user to the database at once''' dataset_name = payload["dataset"] dataset = get_object_or_404(Dataset, name=dataset_name) user_email = payload["user"] user, _ = User.objects.get_or_create(email=user_email) created_by = payload.get("created_by", "tag" if len(payload["annotations"]) == 1 else "tag-bulk") # Get pks idents = [ann['identifier'] for ann in payload["annotations"] if 'identifier' in ann] di_pks = list(DatasetItem.objects.filter( dataset=dataset, identifier__in=idents ).values_list("pk", "identifier")) ident_to_pk = {ident: pk for pk, ident in di_pks} cats = {} modes = {} to_delete = defaultdict(set) annotations = [] for ann in payload["annotations"]: db_ann = Annotation() category_name = ann["category"] mode_name = ann["mode"] if category_name not in cats: cats[category_name] = Category.objects.get_or_create( name=category_name)[0] if mode_name not in modes: modes[mode_name] = Mode.objects.get_or_create( name=mode_name)[0] if "identifier" in ann: pk = ident_to_pk[ann["identifier"]] else: pk = ann["pk"] db_ann.dataset_item_id = pk db_ann.user = user db_ann.category = cats[category_name] db_ann.mode = modes[mode_name] db_ann.is_box = ann.get("is_box", False) if db_ann.is_box: db_ann.bbox_x1 = ann["x1"] db_ann.bbox_y1 = ann["y1"] db_ann.bbox_x2 = ann["x2"] db_ann.bbox_y2 = ann["y2"] else: to_delete[db_ann.category].add(pk) db_ann.misc_data={"created_by": created_by} annotations.append(db_ann) for cat, pks in to_delete.items(): # Delete per-frame annotations for the category if they exist since # we should only have on mode per image Annotation.objects.filter( category=cat, dataset_item_id__in=pks, is_box=False).delete() # TODO: Add an actual endpoint to delete annotations (probably by pk); don't rely # on this hacky "TOMBSTONE" string bulk_add_annotations_v2(dataset, annotations) return len(annotations) def bulk_add_annotations_v2(dataset, annotations): '''Handles book keeping for adding many annotations at once''' Annotation.objects.bulk_create(annotations) counts = defaultdict(int) for ann in annotations: counts[(ann.category, ann.mode)] += 1 for (cat, mode), count in counts.items(): category_count, _ = CategoryCount.objects.get_or_create( dataset=dataset, category=cat, mode=mode ) category_count.count += count category_count.save() @api_view(["POST"]) @csrf_exempt def delete_category_v2(request): payload = json.loads(request.body) category = payload["category"] category = Category.objects.get(name=category) category.delete() return JsonResponse({"status": "success"}) @api_view(["POST"]) @csrf_exempt def update_category_v2(request): payload = json.loads(request.body) old_category_name = payload["oldCategory"] new_category_name = payload["newCategory"] category = Category.objects.get(name=old_category_name) category.name = new_category_name category.save() return JsonResponse({"status": "success"}) @api_view(["GET"]) @csrf_exempt def get_category_counts_v2(request, dataset_name): dataset = get_object_or_404(Dataset, name=dataset_name) counts = CategoryCount.objects.filter(dataset=dataset).values( "category__name", "mode__name", "count" ) n_labeled = defaultdict(dict) for c in counts: category = c["category__name"] mode = c["mode__name"] count = c["count"] n_labeled[category][mode] = count return JsonResponse(n_labeled)
31.072072
89
0.657872
108
0.002409
0
0
30,703
0.684769
0
0
7,257
0.161853
f7eab2118d85cfe10c666d128c82a3c415e87f34
2,632
py
Python
ccmlib/cluster_factory.py
justinchuch/ccm
808b6ca13526785b0fddfe1ead2383c060c4b8b6
[ "Apache-2.0" ]
626
2015-01-01T18:11:03.000Z
2017-12-19T00:06:49.000Z
ccmlib/cluster_factory.py
justinchuch/ccm
808b6ca13526785b0fddfe1ead2383c060c4b8b6
[ "Apache-2.0" ]
358
2015-01-21T17:06:45.000Z
2017-12-20T16:03:01.000Z
ccmlib/cluster_factory.py
justinchuch/ccm
808b6ca13526785b0fddfe1ead2383c060c4b8b6
[ "Apache-2.0" ]
172
2015-01-02T21:40:45.000Z
2017-12-19T20:17:49.000Z
from __future__ import absolute_import import os import yaml from ccmlib import common, extension, repository from ccmlib.cluster import Cluster from ccmlib.dse_cluster import DseCluster from ccmlib.node import Node from distutils.version import LooseVersion #pylint: disable=import-error, no-name-in-module class ClusterFactory(): @staticmethod def load(path, name): cluster_path = os.path.join(path, name) filename = os.path.join(cluster_path, 'cluster.conf') with open(filename, 'r') as f: data = yaml.safe_load(f) try: install_dir = None if 'install_dir' in data: install_dir = data['install_dir'] repository.validate(install_dir) if install_dir is None and 'cassandra_dir' in data: install_dir = data['cassandra_dir'] repository.validate(install_dir) cassandra_version = None if 'cassandra_version' in data: cassandra_version = LooseVersion(data['cassandra_version']) if common.isDse(install_dir): cluster = DseCluster(path, data['name'], install_dir=install_dir, create_directory=False, derived_cassandra_version=cassandra_version) else: cluster = Cluster(path, data['name'], install_dir=install_dir, create_directory=False, derived_cassandra_version=cassandra_version) node_list = data['nodes'] seed_list = data['seeds'] if 'partitioner' in data: cluster.partitioner = data['partitioner'] if 'config_options' in data: cluster._config_options = data['config_options'] if 'dse_config_options' in data: cluster._dse_config_options = data['dse_config_options'] if 'misc_config_options' in data: cluster._misc_config_options = data['misc_config_options'] if 'log_level' in data: cluster.__log_level = data['log_level'] if 'use_vnodes' in data: cluster.use_vnodes = data['use_vnodes'] if 'datadirs' in data: cluster.data_dir_count = int(data['datadirs']) extension.load_from_cluster_config(cluster, data) except KeyError as k: raise common.LoadError("Error Loading " + filename + ", missing property:" + k) for node_name in node_list: cluster.nodes[node_name] = Node.load(cluster_path, node_name, cluster) for seed in seed_list: cluster.seeds.append(seed) return cluster
39.878788
150
0.628799
2,316
0.879939
0
0
2,287
0.868921
0
0
428
0.162614
f7eb16ad3bcd19920bd13a45530065dd321f93c0
9,872
py
Python
causalnex/structure/pytorch/dist_type/_base.py
Rishab26/causalnex
127d9324a3d68c1795299c7522f22cdea880f344
[ "Apache-2.0" ]
1,523
2020-01-28T12:37:48.000Z
2022-03-31T09:27:58.000Z
causalnex/structure/pytorch/dist_type/_base.py
Rishab26/causalnex
127d9324a3d68c1795299c7522f22cdea880f344
[ "Apache-2.0" ]
124
2020-01-28T15:12:07.000Z
2022-03-31T18:59:16.000Z
causalnex/structure/pytorch/dist_type/_base.py
Rishab26/causalnex
127d9324a3d68c1795299c7522f22cdea880f344
[ "Apache-2.0" ]
169
2020-01-28T15:13:53.000Z
2022-03-30T21:04:02.000Z
# Copyright 2019-2020 QuantumBlack Visual Analytics Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND # NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS # BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo # (either separately or in combination, "QuantumBlack Trademarks") are # trademarks of QuantumBlack. The License does not grant you any right or # license to the QuantumBlack Trademarks. You may not use the QuantumBlack # Trademarks or any confusingly similar mark as a trademark for your product, # or use the QuantumBlack Trademarks in any other manner that might cause # confusion in the marketplace, including but not limited to in advertising, # on websites, or on software. # # See the License for the specific language governing permissions and # limitations under the License. """ ``causalnex.pytorch.dist_type._base`` defines the distribution type class interface and default behavior. """ import itertools from abc import ABCMeta, abstractmethod from copy import deepcopy from typing import Dict, List, Tuple import numpy as np import torch from causalnex.structure.structuremodel import StructureModel class DistTypeBase(metaclass=ABCMeta): """Base class defining the distribution default behavior and interface""" def __init__(self, idx: int): """ Default constructor for the DistTypeBase class. Unless overridden, provides default behavior to all subclasses. Args: idx: Positional index in data passed to the NOTEARS algorithm which correspond to this datatype. """ self.idx = idx def get_columns( self, X: np.ndarray, ) -> np.ndarray: """ Gets the column(s) associated with the instantiated DistType. Args: X: Full dataset to be selected from. Returns: 1d or 2d np.ndarray of columns. """ return X[:, self.idx] # pylint: disable=no-self-use # pylint: disable=unused-argument def preprocess_X(self, X: np.ndarray, fit_transform: bool = True) -> np.ndarray: """ Overload this method to perform any required preprocessing of the data matrix. This can include data conversion, column expansion etc. Changes to the tabu parameters should also be done here. **WARN** This preprocessing CANNOT reorder the columns of X. Args: X: The original passed-in data. fit_transform: Whether the class first fits then transforms the data, or just transforms. Just transforming is used to preprocess new data after the initial NOTEARS fit. Returns: Preprocessed X """ return X # pylint: disable=no-self-use def preprocess_tabu_edges( self, tabu_edges: List[Tuple[int, int]] ) -> List[Tuple[int, int]]: """ Overload this method to perform any required preprocessing of the tabu_edges. Args: tabu_edges: The original tabu_edges. Returns: Preprocessed tabu_edges. """ return tabu_edges # pylint: disable=no-self-use def preprocess_tabu_nodes(self, tabu_nodes: List[int]) -> List[int]: """ Overload this method to perform any required preprocessing of the tabu_nodes. Args: tabu_nodes: The original tabu_nodes. Returns: Preprocessed tabu_nodes. """ return tabu_nodes # pylint: disable=no-self-use def update_idx_col(self, idx_col: Dict[int, str]) -> Dict[int, str]: """ Overload this method to update the idx_col dict with expanded colnames. Args: idx_col: The original index to column mapping. Returns: Updated index to column mapping. """ return idx_col def add_to_node(self, sm: StructureModel) -> StructureModel: """ Adds self to a node of a structure model corresponding to self.idx. Args: sm: The input StructureModel Returns: Updated StructureModel """ sm.nodes[self.idx]["dist_type"] = self return sm # pylint: disable=no-self-use def modify_h(self, square_weight_mat: torch.Tensor) -> torch.Tensor: """ Overload this method to apply updates to the W matrix in h(W). Typically used to prevent spurious cycles when using expended columns. Args: square_weight_mat: The weight matrix used in h(W). Returns: Updated weight matrix used in h(W). """ return square_weight_mat # pylint: disable=no-self-use def collapse_adj(self, adj: np.ndarray) -> np.ndarray: """ Overload this method to apply updates to collapse the W matrix of a multi-parameter distribution Likely has the same impact as modify_h. Args: adj: The adjacency matrix. Returns: Updated adjacency matrix. """ return adj @abstractmethod def loss(self, X: torch.Tensor, X_hat: torch.Tensor) -> torch.Tensor: """ Args: X: The original data passed into NOTEARS (i.e. the reconstruction target). X_hat: The reconstructed data. Returns: Scalar pytorch tensor of the reconstruction loss between X and X_hat. """ raise NotImplementedError("Must implement the loss() method") @abstractmethod def inverse_link_function(self, X_hat: torch.Tensor) -> torch.Tensor: """ Convert the transformed data from the latent space to the original dtype using the inverse link function. Args: X_hat: Reconstructed data in the latent space. Returns: Modified X_hat. MUST be same shape as passed in data. Projects the self.idx column from the latent space to the dist_type space. """ raise NotImplementedError("Must implement the inverse_link_function() method") class ExpandColumnsMixin: """ Mixin class providing convenience methods for column expansion. """ @staticmethod def _expand_columns(X: np.ndarray, new_columns: np.ndarray) -> np.ndarray: """ Expands the data matrix columns without reordering the indices. Args: X: Base dataset to expand. new_columns: The columns to expand the dataset by. Returns: Expanded dataset. """ return np.hstack([X, new_columns]) @staticmethod def update_tabu_edges( idx_group: List[int], tabu_edges: List[Tuple[int, int]], tabu_idx_group: bool, ) -> List[Tuple[int, int]]: """ Tabu edges are: 1. all user defined connections to original feature column 2. all inter-feature connections (optional) Args: idx_group: The group of indices which correspond to a single expanded column. tabu_edges: The list of tabu_edges to be updated. tabu_idx_group: Whether inter-group edges should also be considered tabu. I.e if a result of a column expansion, often want to prevent edges being learned between parameters. Returns: Updated tabu_edges """ if tabu_edges is None: tabu_edges = [] # copy to prevent mutations tabu_edges = deepcopy(tabu_edges) # handle 1. new_tabu_edges = [] # for each original tabu pair for (i, j) in tabu_edges: # idx_group[0] is the original column index if i == idx_group[0]: new_tabu_edges += [(idx, j) for idx in idx_group[1:]] elif j == idx_group[0]: new_tabu_edges += [(i, idx) for idx in idx_group[1:]] # all new edges added to tabu_edges tabu_edges += new_tabu_edges # handle 2. if tabu_idx_group: # add on all pairwise permutations of particular feature group # NOTE: permutations are needed for edge directionality tabu_edges += list(itertools.permutations(idx_group, 2)) return tabu_edges @staticmethod def update_tabu_nodes( idx_group: List[int], tabu_nodes: List[int] ) -> List[Tuple[int, int]]: """ Tabu nodes are: 1. all user defined connections to original feature column Args: idx_group: The group of indices which correspond to a single expanded column. tabu_nodes: The list of tabu_nodes to be updated. Returns: Updated tabu_nodes """ if tabu_nodes is None: return tabu_nodes # copy to prevent mutations tabu_nodes = deepcopy(tabu_nodes) new_tabu_nodes = [] for i in tabu_nodes: # NOTE: the first element in the idx_group is guaranteed as self.idx if i == idx_group[0]: new_tabu_nodes += idx_group[1:] # add on the new tabu nodes tabu_nodes += new_tabu_nodes return tabu_nodes
31.742765
105
0.630976
8,103
0.820806
0
0
4,021
0.407314
0
0
6,801
0.688918
f7ec17b78bb1ba2ad0135e9a1b1bf5b7c8916ff3
4,225
py
Python
src/cmdsh/utils.py
kotfu/cmdsh
c9083793de9117e4c5c4dfcccdeee1b83a0be7ab
[ "MIT" ]
null
null
null
src/cmdsh/utils.py
kotfu/cmdsh
c9083793de9117e4c5c4dfcccdeee1b83a0be7ab
[ "MIT" ]
null
null
null
src/cmdsh/utils.py
kotfu/cmdsh
c9083793de9117e4c5c4dfcccdeee1b83a0be7ab
[ "MIT" ]
null
null
null
# # -*- coding: utf-8 -*- # # Copyright (c) 2019 Jared Crapo # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # """ Utility functions (not classes) """ import inspect import types from typing import Callable def validate_callable_param_count(func: Callable, count: int) -> None: """Ensure a function has the given number of parameters.""" signature = inspect.signature(func) # validate that the callable has the right number of parameters nparam = len(signature.parameters) if nparam != count: raise TypeError('{} has {} positional arguments, expected {}'.format( func.__name__, nparam, count, )) def validate_callable_argument(func, argnum, typ) -> None: """Validate that a certain argument of func is annotated for a specific type""" signature = inspect.signature(func) paramname = list(signature.parameters.keys())[argnum-1] param = signature.parameters[paramname] if param.annotation != typ: raise TypeError('argument {} of {} has incompatible type {}, expected {}'.format( argnum, func.__name__, param.annotation, typ.__name__, )) def validate_callable_return(func, typ) -> None: """Validate that func is annotated to return a specific type""" signature = inspect.signature(func) if typ: typname = typ.__name__ else: typname = 'None' if signature.return_annotation != typ: raise TypeError("{} must declare return a return type of '{}'".format( func.__name__, typname, )) def rebind_method(method, obj) -> None: """Rebind method from one object to another Call it something like this: rebind_method(obj1, obj2.do_command) This rebinds the ``do_command`` method from obj2 to obj1. Meaning after this function call you can: obj1.do_command() This works only on instantiated objects, not on classes. """ # # this is dark python magic # # if we were doing this in a hardcoded way, we might do: # # obj.method_name = types.MethodType(self.method_name.__func__, obj) # # TODO add force keyword parameter which defaults to false. If false, raise an # exception if the method already exists on obj method_name = method.__name__ setattr(obj, method_name, types.MethodType(method.__func__, obj)) def bind_function(func, obj) -> None: """Bind a function to an object You must define func with a ``self`` parameter, which is gonna look wierd: def myfunc(self, param): return param shell = cmdsh.Shell() utils.bind_function(myfunc, shell) You can use this function to bind a function to a class, so that all future objects of that class have the method: cmdsh.utils.bind_function(cmdsh.parsers.SimpleParser.parse, cmdsh.Shell) """ # # this is dark python magic # # if we were doing this in a hardcoded way, we would: # # obj.method_name = types.Methodtype(func, obj) # func_name = func.__name__ setattr(obj, func_name, types.MethodType(func, obj)) # TODO write bind_attribute()
32.5
89
0.680947
0
0
0
0
0
0
0
0
2,790
0.660355
f7ecb294c442659591e90f954f3dc3437349ef17
4,992
py
Python
tensorflow/python/tpu/tpu_outside_compilation_test.py
Arushacked/tensorflow
9abd61ae0b2d239d3060cdd3d46b54a105159828
[ "Apache-2.0" ]
78
2020-08-04T12:36:25.000Z
2022-03-25T04:23:40.000Z
tensorflow/python/tpu/tpu_outside_compilation_test.py
Arushacked/tensorflow
9abd61ae0b2d239d3060cdd3d46b54a105159828
[ "Apache-2.0" ]
2
2021-11-10T20:08:14.000Z
2022-02-10T02:44:26.000Z
tensorflow/python/tpu/tpu_outside_compilation_test.py
Arushacked/tensorflow
9abd61ae0b2d239d3060cdd3d46b54a105159828
[ "Apache-2.0" ]
25
2020-08-31T12:21:19.000Z
2022-03-20T05:16:32.000Z
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for TPU outside compilation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.distribute import tpu_strategy as tpu_lib from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver from tensorflow.python.eager import def_function from tensorflow.python.eager import remote from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.ops import logging_ops from tensorflow.python.ops import variables from tensorflow.python.platform import flags from tensorflow.python.tpu import tpu from tensorflow.python.tpu import tpu_strategy_util FLAGS = flags.FLAGS flags.DEFINE_string("tpu", "", "Name of TPU to connect to.") flags.DEFINE_string("project", None, "Name of GCP project with TPU.") flags.DEFINE_string("zone", None, "Name of GCP zone with TPU.") def get_tpu_cluster_resolver(): resolver = tpu_cluster_resolver.TPUClusterResolver( tpu=FLAGS.tpu, zone=FLAGS.zone, project=FLAGS.project, ) return resolver def get_tpu_strategy(): resolver = get_tpu_cluster_resolver() remote.connect_to_cluster(resolver) tpu_strategy_util.initialize_tpu_system(resolver) return tpu_lib.TPUStrategy(resolver) class TpuOutsideCompilationTest(test.TestCase): def testResourceVariableAssignOnHost(self): strategy = get_tpu_strategy() with strategy.scope(): v = variables.Variable( 0.0, aggregation=variables.VariableAggregation.MEAN) v2 = variables.Variable(0.0, aggregation=variables.VariableAggregation.MEAN) def assign_fn(): v2.assign_add(4.0) @def_function.function def train_step(): def assign_add(): v.assign_add(2.0) tpu.outside_compilation(assign_fn) v.assign_add(3.0) strategy.run(assign_add) return train_step() self.assertAllEqual(4.0 * strategy.num_replicas_in_sync, v2.numpy()) self.assertAllEqual(5.0, v.numpy()) def testHostInputOnly(self): strategy = get_tpu_strategy() def outside_fn(x): logging_ops.print_v2("Outside compiled", x) @def_function.function def train_step(): def tpu_fn(x): x2 = x + 5.0 tpu.outside_compilation(outside_fn, x2) return x2 + 5.0 return strategy.run(tpu_fn, args=(25.0,)) self.assertAllEqual( strategy.experimental_local_results(train_step()), constant_op.constant(35., shape=(strategy.num_replicas_in_sync))) def testHostInputOutput(self): strategy = get_tpu_strategy() def outside_fn(x): logging_ops.print_v2("Outside compiled", x) return x + 6.0 @def_function.function def train_step(): def tpu_fn(x): x2 = x + 5.0 output = tpu.outside_compilation(outside_fn, x2) return output return strategy.run(tpu_fn, args=(25.0,)) self.assertAllEqual( strategy.experimental_local_results(train_step()), constant_op.constant(36., shape=(strategy.num_replicas_in_sync))) def testOutsideCompilationControlFlowIf(self): strategy = get_tpu_strategy() def outside_fn(x): logging_ops.print_v2("Outside compiled", x) return x + 6.0 @def_function.function def train_step(): def tpu_fn(x): x2 = x + 5.0 if x < 50.0: return tpu.outside_compilation(outside_fn, x2) else: return x2 return strategy.run(tpu_fn, args=(25.0,)) self.assertAllEqual( strategy.experimental_local_results(train_step()), constant_op.constant(36., shape=(strategy.num_replicas_in_sync))) def testOutsideCompilationControlFlowWhile(self): strategy = get_tpu_strategy() def outside_fn(x): logging_ops.print_v2("Outside compiled", x) return x + 6.0 @def_function.function def train_step(): def tpu_fn(x): x2 = x + 5.0 while x2 < 50.0: x2 = tpu.outside_compilation(outside_fn, x2) return x2 + 4.0 return strategy.run(tpu_fn, args=(25.0,)) self.assertAllEqual( strategy.experimental_local_results(train_step()), constant_op.constant(58., shape=(strategy.num_replicas_in_sync))) if __name__ == "__main__": test.main()
29.192982
80
0.698317
2,957
0.592348
0
0
1,120
0.224359
0
0
906
0.18149
f7eccedc6580e295788f95c53fa5d25556b9e059
1,338
py
Python
Source/Oyooni/Text Recognition/server.py
Oyooni5245/Oyooni
a00b845ac97eaee74d40cab563b9532fdeca97c8
[ "MIT" ]
null
null
null
Source/Oyooni/Text Recognition/server.py
Oyooni5245/Oyooni
a00b845ac97eaee74d40cab563b9532fdeca97c8
[ "MIT" ]
null
null
null
Source/Oyooni/Text Recognition/server.py
Oyooni5245/Oyooni
a00b845ac97eaee74d40cab563b9532fdeca97c8
[ "MIT" ]
null
null
null
from flask import Flask, request from flask_restful import Resource, Api from test import get_models, getTextFromImage from testDocument import getText from time import time app = Flask(__name__) api = Api(app) net, refine_net = get_models() class TextRecognizerService(Resource): def post(self): try: json = request.get_json() image_path = json["ImagePath"] isDocument = bool(json["IsDocument"]) if isDocument == False: start = time() brand_name, texts, language = getTextFromImage( image_path, net, refine_net) end = time() return { "brand_name": brand_name, "texts": texts, "language": language, "inference_time": end - start }, 200 else: text, language = getText(image_path, 'fullDocument.json') return { "text": text, "language": language }, 200 except Exception as e: return { 'message': e }, 501 api.add_resource(TextRecognizerService, "/recognize-text") if __name__ == "__main__": port = 5006 app.run(debug=True, port=port)
26.76
73
0.523916
951
0.710762
0
0
0
0
0
0
139
0.103886
f7ee1b4e15755381cc1c76d8d915f30011f727a3
17,132
py
Python
varats/varats/plots/blame_interaction_graph_plots.py
Kaufi-Jonas/VaRA-Tool-Suite
31563896ad7dd1c1a147202b0c5c9fffe772b803
[ "BSD-2-Clause" ]
null
null
null
varats/varats/plots/blame_interaction_graph_plots.py
Kaufi-Jonas/VaRA-Tool-Suite
31563896ad7dd1c1a147202b0c5c9fffe772b803
[ "BSD-2-Clause" ]
null
null
null
varats/varats/plots/blame_interaction_graph_plots.py
Kaufi-Jonas/VaRA-Tool-Suite
31563896ad7dd1c1a147202b0c5c9fffe772b803
[ "BSD-2-Clause" ]
null
null
null
"""Module for BlameInteractionGraph plots.""" import typing as tp from datetime import datetime from pathlib import Path import click import matplotlib.pyplot as plt import networkx as nx import pandas as pd import plotly.offline as offply from matplotlib import style from varats.data.reports.blame_interaction_graph import ( create_blame_interaction_graph, CIGNodeAttrs, CIGEdgeAttrs, AIGNodeAttrs, CAIGNodeAttrs, ) from varats.data.reports.blame_report import BlameReport from varats.mapping.commit_map import get_commit_map from varats.paper_mgmt.case_study import ( newest_processed_revision_for_case_study, ) from varats.plot.plot import Plot, PlotDataEmpty from varats.plot.plots import ( PlotGenerator, REQUIRE_CASE_STUDY, REQUIRE_REVISION, ) from varats.plots.chord_plot_utils import ( make_chord_plot, make_arc_plot, NodeTy, ChordPlotNodeInfo, ChordPlotEdgeInfo, ArcPlotEdgeInfo, ArcPlotNodeInfo, ) from varats.ts_utils.cli_util import CLIOptionTy, make_cli_option from varats.utils.git_util import ( CommitRepoPair, create_commit_lookup_helper, UNCOMMITTED_COMMIT_HASH, FullCommitHash, ShortCommitHash, ) class CommitInteractionGraphPlot(Plot, plot_name='cig_plot'): """Creates a dot file for a commit interaction graph.""" def plot(self, view_mode: bool) -> None: # Nothing to do here. pass def save(self, plot_dir: Path, filetype: str = 'svg') -> None: project_name = self.plot_kwargs["project"] revision = self.plot_kwargs["revision"] cig = create_blame_interaction_graph(project_name, revision ).commit_interaction_graph() nx.set_node_attributes( cig, {node: cig.nodes[node]["commit_hash"] for node in cig.nodes}, "label" ) # pylint: disable=import-outside-toplevel from networkx.drawing.nx_agraph import write_dot write_dot(cig, plot_dir / self.plot_file_name("dot")) def calc_missing_revisions( self, boundary_gradient: float ) -> tp.Set[FullCommitHash]: raise NotImplementedError class CommitInteractionGraphPlotGenerator( PlotGenerator, generator_name="cig-plot", options=[REQUIRE_CASE_STUDY, REQUIRE_REVISION] ): """Plot a commit interaction graph.""" def generate(self) -> tp.List[Plot]: return [ CommitInteractionGraphPlot(self.plot_config, **self.plot_kwargs) ] NodeInfoTy = tp.TypeVar("NodeInfoTy", ChordPlotNodeInfo, ArcPlotNodeInfo) EdgeInfoTy = tp.TypeVar("EdgeInfoTy", ChordPlotEdgeInfo, ArcPlotEdgeInfo) def _prepare_cig_plotly( project_name: str, revision: FullCommitHash, create_node_info: tp.Callable[[NodeTy, CommitRepoPair, nx.DiGraph], NodeInfoTy], create_edge_info: tp.Callable[[CommitRepoPair, CommitRepoPair, int], EdgeInfoTy] ) -> tp.Tuple[tp.List[tp.Tuple[NodeTy, NodeInfoTy]], tp.List[tp.Tuple[ NodeTy, NodeTy, EdgeInfoTy]]]: commit_lookup = create_commit_lookup_helper(project_name) cig = create_blame_interaction_graph(project_name, revision).commit_interaction_graph() def filter_nodes(node: CommitRepoPair) -> bool: if node.commit_hash == UNCOMMITTED_COMMIT_HASH: return False commit = commit_lookup(node) if not commit: return False # make filter configurable return datetime.utcfromtimestamp(commit.commit_time ) >= datetime(2015, 1, 1) nodes: tp.List[tp.Tuple[NodeTy, NodeInfoTy]] = [] node_meta: tp.Dict[NodeTy, CommitRepoPair] = {} for node in cig.nodes: node_attrs = tp.cast(CIGNodeAttrs, cig.nodes[node]) commit = node_attrs["commit"] if not filter_nodes(commit): continue node_meta[node] = commit nodes.append((node, create_node_info(node, commit, cig))) nodes = sorted( nodes, key=lambda x: int(commit_lookup(node_meta[x[0]]).commit_time) ) edges: tp.List[tp.Tuple[NodeTy, NodeTy, EdgeInfoTy]] = [] for source, sink in cig.edges: amount = tp.cast(CIGEdgeAttrs, cig[source][sink])["amount"] source_commit = tp.cast(CIGNodeAttrs, cig.nodes[source])["commit"] sink_commit = tp.cast(CIGNodeAttrs, cig.nodes[sink])["commit"] if not filter_nodes(source_commit) or not filter_nodes(sink_commit): continue edges.append(( source, sink, create_edge_info(source_commit, sink_commit, amount) )) return nodes, edges class CommitInteractionGraphChordPlot(Plot, plot_name='cig_chord_plot'): """Chord plot for a commit interaction graph.""" def plot(self, view_mode: bool) -> None: project_name: str = self.plot_kwargs["case_study"].project_name revision = get_commit_map(project_name).convert_to_full_or_warn( ShortCommitHash(self.plot_kwargs["revision"]) ) def create_node_data( node: NodeTy, commit: CommitRepoPair, cig: nx.DiGraph ) -> ChordPlotNodeInfo: del node del cig return {"info": commit.commit_hash.short_hash, "color": 1} def create_edge_data( source_commit: CommitRepoPair, sink_commit: CommitRepoPair, amount: int ) -> ChordPlotEdgeInfo: return { "size": amount, "color": 1, "info": f"{source_commit.commit_hash.short_hash} " f"--{{{amount}}}--> " f"{sink_commit.commit_hash.short_hash}" } nodes, edges = _prepare_cig_plotly( project_name, revision, create_node_data, create_edge_data ) figure = make_chord_plot(nodes, edges, "Commit Interaction Graph") if view_mode: figure.show() else: offply.plot(figure, filename=self.plot_file_name("html")) def calc_missing_revisions( self, boundary_gradient: float ) -> tp.Set[FullCommitHash]: raise NotImplementedError class CIGChordPlotGenerator( PlotGenerator, generator_name="cig-chord-plot", options=[REQUIRE_CASE_STUDY, REQUIRE_REVISION] ): """Generates a chord plot for a commit interaction graph.""" def generate(self) -> tp.List[Plot]: return [ CommitInteractionGraphChordPlot( self.plot_config, **self.plot_kwargs ) ] class CommitInteractionGraphArcPlot(Plot, plot_name='cig_arc_plot'): """Arc plot for a commit interaction graph.""" def plot(self, view_mode: bool) -> None: project_name: str = self.plot_kwargs["case_study"].project_name revision = get_commit_map(project_name).convert_to_full_or_warn( ShortCommitHash(self.plot_kwargs["revision"]) ) def create_node_data( node: NodeTy, commit: CommitRepoPair, cig: nx.DiGraph ) -> ArcPlotNodeInfo: return { "info": commit.commit_hash.short_hash, "size": cig.degree(node), "fill_color": cig.out_degree(node), "line_color": cig.in_degree(node) } def create_edge_data( source_commit: CommitRepoPair, sink_commit: CommitRepoPair, amount: int ) -> ArcPlotEdgeInfo: return { "size": amount, "color": amount, "info": f"{source_commit.commit_hash.short_hash} " f"--{{{amount}}}--> " f"{sink_commit.commit_hash.short_hash}" } nodes, edges = _prepare_cig_plotly( project_name, revision, create_node_data, create_edge_data ) figure = make_arc_plot(nodes, edges, "Commit Interaction Graph") if view_mode: figure.show() else: offply.plot(figure, filename=self.plot_file_name("html")) def calc_missing_revisions( self, boundary_gradient: float ) -> tp.Set[FullCommitHash]: raise NotImplementedError class CIGArcPlotGenerator( PlotGenerator, generator_name="cig-arc-plot", options=[REQUIRE_CASE_STUDY, REQUIRE_REVISION] ): """Generates an arc plot for a commit interaction graph.""" def generate(self) -> tp.List[Plot]: return [ CommitInteractionGraphArcPlot(self.plot_config, **self.plot_kwargs) ] OPTIONAL_SORT_METHOD: CLIOptionTy = make_cli_option( "--sort-by", type=click.Choice(["degree", "time"]), default="degree", required=False, help="Sort method for commit interaction graph nodes." ) class CommitInteractionGraphNodeDegreePlot(Plot, plot_name='cig_node_degrees'): """ Plot node degrees of a commit interaction graph. Additional arguments: - sort: criteria to sort the revisions [degree, time] """ def plot(self, view_mode: bool) -> None: sort = self.plot_kwargs["sort"] case_study = self.plot_kwargs["plot_case_study"] style.use(self.plot_config.style()) fig, axes = plt.subplots(1, 1, sharey="all") fig.subplots_adjust(hspace=0.5) fig.suptitle("Commit Interaction Graph - Node Degrees") axes.set_title(case_study.project_name) axes.set_ylabel("Degree") xlabel = "" if sort == "time": xlabel = "Time (old to new)" elif sort == "degree": xlabel = "Commits" axes.set_xlabel(xlabel) revision = newest_processed_revision_for_case_study( case_study, BlameReport ) if not revision: raise PlotDataEmpty() cig = create_blame_interaction_graph(case_study.project_name, revision ).commit_interaction_graph() commit_lookup = create_commit_lookup_helper(case_study.project_name) def filter_nodes(node: CommitRepoPair) -> bool: if node.commit_hash == UNCOMMITTED_COMMIT_HASH: return False return bool(commit_lookup(node)) def commit_time(node: CommitRepoPair) -> datetime: return datetime.utcfromtimestamp(commit_lookup(node).commit_time) nodes: tp.List[tp.Dict[str, tp.Any]] = [] for node in cig.nodes: node_attrs = tp.cast(CIGNodeAttrs, cig.nodes[node]) commit = node_attrs["commit"] if not filter_nodes(commit): continue nodes.append(({ "commit_hash": commit.commit_hash, "commit_time": commit_time(commit), "node_degree": cig.degree(node), "node_out_degree": cig.out_degree(node), "node_in_degree": cig.in_degree(node), })) data = pd.DataFrame(nodes) if sort == "time": data.sort_values(by="commit_time", inplace=True) node_degrees = data.loc[:, ["commit_hash", "node_degree"]] node_out_degrees = data.loc[:, ["commit_hash", "node_out_degree"]] node_in_degrees = data.loc[:, ["commit_hash", "node_in_degree"]] if sort == "degree": node_degrees.sort_values(by="node_degree", inplace=True) node_out_degrees.sort_values(by="node_out_degree", inplace=True) node_in_degrees.sort_values(by="node_in_degree", inplace=True) axes.plot(node_degrees["node_degree"].values, label="degree") axes.plot( node_out_degrees["node_out_degree"].values, label="out_degree" ) axes.plot(node_in_degrees["node_in_degree"].values, label="in_degree") axes.legend() def calc_missing_revisions( self, boundary_gradient: float ) -> tp.Set[FullCommitHash]: raise NotImplementedError class CIGNodeDegreePlotGenerator( PlotGenerator, generator_name="cig-node-degrees", options=[REQUIRE_CASE_STUDY, OPTIONAL_SORT_METHOD] ): """Generates a plot of node degrees of a commit interaction graph.""" def generate(self) -> tp.List[Plot]: return [ CommitInteractionGraphNodeDegreePlot( self.plot_config, **self.plot_kwargs ) ] class AuthorInteractionGraphNodeDegreePlot(Plot, plot_name='aig_node_degrees'): """Plot node degrees of a author interaction graph.""" def plot(self, view_mode: bool) -> None: case_study = self.plot_kwargs["plot_case_study"] style.use(self.plot_config.style()) fig, axes = plt.subplots(1, 1, sharey="all") fig.subplots_adjust(hspace=0.5) fig.suptitle("Author Interaction Graph - Node Degrees") axes.set_title(case_study.project_name) axes.set_ylabel("Degree") axes.set_xlabel("Authors") project_name = case_study.project_name revision = newest_processed_revision_for_case_study( case_study, BlameReport ) if not revision: raise PlotDataEmpty() aig = create_blame_interaction_graph(project_name, revision ).author_interaction_graph() nodes: tp.List[tp.Dict[str, tp.Any]] = [] for node in aig.nodes: node_attrs = tp.cast(AIGNodeAttrs, aig.nodes[node]) author = node_attrs["author"] nodes.append(({ "author": author, "node_degree": aig.degree(node), "node_out_degree": aig.out_degree(node), "node_in_degree": aig.in_degree(node), })) data = pd.DataFrame(nodes) node_degrees = data.loc[:, ["author", "node_degree"]] node_out_degrees = data.loc[:, ["author", "node_out_degree"]] node_in_degrees = data.loc[:, ["author", "node_in_degree"]] node_degrees.sort_values(by="node_degree", inplace=True) node_out_degrees.sort_values(by="node_out_degree", inplace=True) node_in_degrees.sort_values(by="node_in_degree", inplace=True) axes.plot(node_degrees["node_degree"].values, label="degree") axes.plot( node_out_degrees["node_out_degree"].values, label="out_degree" ) axes.plot(node_in_degrees["node_in_degree"].values, label="in_degree") axes.legend() def calc_missing_revisions( self, boundary_gradient: float ) -> tp.Set[FullCommitHash]: raise NotImplementedError class AIGNodeDegreePlotGenerator( PlotGenerator, generator_name="aig-node-degrees", options=[REQUIRE_CASE_STUDY] ): """Generates a plot of node degrees of a author interaction graph.""" def generate(self) -> tp.List[Plot]: return [ AuthorInteractionGraphNodeDegreePlot( self.plot_config, **self.plot_kwargs ) ] class CommitAuthorInteractionGraphNodeDegreePlot( Plot, plot_name='caig_node_degrees' ): """Plot node degrees of commits in a commit-author interaction graph.""" def plot(self, view_mode: bool) -> None: case_study = self.plot_kwargs["plot_case_study"] style.use(self.plot_config.style()) fig, axes = plt.subplots(1, 1, sharey="all") fig.subplots_adjust(hspace=0.5) fig.suptitle("Commit-Author Interaction Graph - # Interacting Authors") axes.set_title(case_study.project_name) axes.set_ylabel("Authors") axes.set_xlabel("Commits") project_name = case_study.project_name revision = newest_processed_revision_for_case_study( case_study, BlameReport ) if not revision: raise PlotDataEmpty() caig = create_blame_interaction_graph(project_name, revision ).commit_author_interaction_graph() nodes: tp.List[tp.Dict[str, tp.Any]] = [] for node in caig.nodes: node_attrs = tp.cast(CAIGNodeAttrs, caig.nodes[node]) commit = node_attrs["commit"] if commit: nodes.append(({ "commit": commit.commit_hash, "num_authors": caig.degree(node) })) data = pd.DataFrame(nodes) num_authors = data.loc[:, ["commit", "num_authors"]] num_authors.sort_values(by="num_authors", inplace=True) axes.plot(num_authors["num_authors"].values) def calc_missing_revisions( self, boundary_gradient: float ) -> tp.Set[FullCommitHash]: raise NotImplementedError class CAIGNodeDegreePlotGenerator( PlotGenerator, generator_name="caig-node-degrees", options=[ REQUIRE_CASE_STUDY, ] ): """Generates a plot of node degrees of a commit-author interaction graph.""" def generate(self) -> tp.List[Plot]: return [ CommitAuthorInteractionGraphNodeDegreePlot( self.plot_config, **self.plot_kwargs ) ]
33.330739
80
0.627714
13,489
0.787357
0
0
0
0
0
0
2,651
0.15474
f7ef21c429f9bf83356bf40d0aaa0462acb403b0
2,632
py
Python
Day 7/Day 7.py
Dullstar/Advent-Of-Code-2020
7d3a64906ced2ac98bcfe67a9f3294c8756dc493
[ "MIT" ]
null
null
null
Day 7/Day 7.py
Dullstar/Advent-Of-Code-2020
7d3a64906ced2ac98bcfe67a9f3294c8756dc493
[ "MIT" ]
null
null
null
Day 7/Day 7.py
Dullstar/Advent-Of-Code-2020
7d3a64906ced2ac98bcfe67a9f3294c8756dc493
[ "MIT" ]
null
null
null
import re class Rule: def __init__(self, line): line = line.strip().split(" contain ") line[1] = line[1].strip(".").split(", ") self.contents = {} for item in line[1]: # Grab that number out in front regex = re.compile(r"[0-9]+") # If we didn't find one that means it's no bags inside if match := regex.match(item): quantity = int(item[match.span()[0]:match.span()[1]]) # The +1 deals with the space bag_type = item[match.span()[1] + 1:] if quantity > 1: # This gets rid of the s if it's plural bag_type = bag_type[:-1] self.contents[bag_type] = quantity # The s makes things irritating so I want it gone self.bag_type = line[0][:-1] def contains_directly(self, bag_type: str): return bag_type in self.contents # Warning: recursive def contains(self, bag_type: str, rule_dict: dict): if self.contains_directly(bag_type): return True else: for bag in self.contents: if bag in rule_dict: if rule_dict[bag].contains(bag_type, rule_dict): return True else: print("An unexpected bag was discovered!") return False def count_internal_bags(self, rule_dict: dict): internal_bags = 0 for bag in self.contents: # count these bags... internal_bags += self.contents[bag] # recall that this value represents the quantity # ...and count the bags inside of it internal_bags += rule_dict[bag].count_internal_bags(rule_dict) * self.contents[bag] return internal_bags def parse_input(filename: str): with open(filename, "r") as file: rules = {} for line in file: rule = Rule(line) print(f"{rule.bag_type} contains {rule.contents}") rules[rule.bag_type] = rule return rules def main(): rule_dict = parse_input("input.txt") shiny_gold = 0 for rule_entry in rule_dict.keys(): rule = rule_dict[rule_entry] if rule.contains("shiny gold bag", rule_dict): print(f"Found {rule.contents} in {rule.bag_type}") shiny_gold += 1 print("\n") print(f"Found {shiny_gold} bags containing at least one shiny gold bag.") print(f"A shiny gold bag contains {rule_dict['shiny gold bag'].count_internal_bags(rule_dict)} bags.") if __name__ == "__main__": main()
33.74359
106
0.56383
1,808
0.68693
0
0
0
0
0
0
680
0.258359
f7efbdb4f4f2e1681183c05075e6b958502a3563
83,010
py
Python
sdk/python/pulumi_aws_native/apigateway/outputs.py
AaronFriel/pulumi-aws-native
5621690373ac44accdbd20b11bae3be1baf022d1
[ "Apache-2.0" ]
29
2021-09-30T19:32:07.000Z
2022-03-22T21:06:08.000Z
sdk/python/pulumi_aws_native/apigateway/outputs.py
AaronFriel/pulumi-aws-native
5621690373ac44accdbd20b11bae3be1baf022d1
[ "Apache-2.0" ]
232
2021-09-30T19:26:26.000Z
2022-03-31T23:22:06.000Z
sdk/python/pulumi_aws_native/apigateway/outputs.py
AaronFriel/pulumi-aws-native
5621690373ac44accdbd20b11bae3be1baf022d1
[ "Apache-2.0" ]
4
2021-11-10T19:42:01.000Z
2022-02-05T10:15:49.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs from ._enums import * __all__ = [ 'ApiKeyStageKey', 'ApiKeyTag', 'ClientCertificateTag', 'DeploymentAccessLogSetting', 'DeploymentCanarySetting', 'DeploymentCanarySettings', 'DeploymentMethodSetting', 'DeploymentStageDescription', 'DeploymentTag', 'DocumentationPartLocation', 'DomainNameEndpointConfiguration', 'DomainNameMutualTlsAuthentication', 'DomainNameTag', 'MethodIntegration', 'MethodIntegrationResponse', 'MethodResponse', 'RestApiEndpointConfiguration', 'RestApiS3Location', 'RestApiTag', 'StageAccessLogSetting', 'StageCanarySetting', 'StageMethodSetting', 'StageTag', 'UsagePlanApiStage', 'UsagePlanQuotaSettings', 'UsagePlanTag', 'UsagePlanThrottleSettings', 'VpcLinkTag', ] @pulumi.output_type class ApiKeyStageKey(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "restApiId": suggest = "rest_api_id" elif key == "stageName": suggest = "stage_name" if suggest: pulumi.log.warn(f"Key '{key}' not found in ApiKeyStageKey. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ApiKeyStageKey.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ApiKeyStageKey.__key_warning(key) return super().get(key, default) def __init__(__self__, *, rest_api_id: Optional[str] = None, stage_name: Optional[str] = None): """ :param str rest_api_id: The ID of a RestApi resource that includes the stage with which you want to associate the API key. :param str stage_name: The name of the stage with which to associate the API key. The stage must be included in the RestApi resource that you specified in the RestApiId property. """ if rest_api_id is not None: pulumi.set(__self__, "rest_api_id", rest_api_id) if stage_name is not None: pulumi.set(__self__, "stage_name", stage_name) @property @pulumi.getter(name="restApiId") def rest_api_id(self) -> Optional[str]: """ The ID of a RestApi resource that includes the stage with which you want to associate the API key. """ return pulumi.get(self, "rest_api_id") @property @pulumi.getter(name="stageName") def stage_name(self) -> Optional[str]: """ The name of the stage with which to associate the API key. The stage must be included in the RestApi resource that you specified in the RestApiId property. """ return pulumi.get(self, "stage_name") @pulumi.output_type class ApiKeyTag(dict): def __init__(__self__, *, key: str, value: str): """ :param str key: The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. :param str value: The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. """ pulumi.set(__self__, "key", key) pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> str: """ The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. """ return pulumi.get(self, "key") @property @pulumi.getter def value(self) -> str: """ The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. """ return pulumi.get(self, "value") @pulumi.output_type class ClientCertificateTag(dict): def __init__(__self__, *, key: str, value: str): pulumi.set(__self__, "key", key) pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> str: return pulumi.get(self, "key") @property @pulumi.getter def value(self) -> str: return pulumi.get(self, "value") @pulumi.output_type class DeploymentAccessLogSetting(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "destinationArn": suggest = "destination_arn" if suggest: pulumi.log.warn(f"Key '{key}' not found in DeploymentAccessLogSetting. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: DeploymentAccessLogSetting.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: DeploymentAccessLogSetting.__key_warning(key) return super().get(key, default) def __init__(__self__, *, destination_arn: Optional[str] = None, format: Optional[str] = None): """ :param str destination_arn: The Amazon Resource Name (ARN) of the CloudWatch Logs log group or Kinesis Data Firehose delivery stream to receive access logs. If you specify a Kinesis Data Firehose delivery stream, the stream name must begin with amazon-apigateway-. :param str format: A single line format of the access logs of data, as specified by selected $context variables. The format must include at least $context.requestId. """ if destination_arn is not None: pulumi.set(__self__, "destination_arn", destination_arn) if format is not None: pulumi.set(__self__, "format", format) @property @pulumi.getter(name="destinationArn") def destination_arn(self) -> Optional[str]: """ The Amazon Resource Name (ARN) of the CloudWatch Logs log group or Kinesis Data Firehose delivery stream to receive access logs. If you specify a Kinesis Data Firehose delivery stream, the stream name must begin with amazon-apigateway-. """ return pulumi.get(self, "destination_arn") @property @pulumi.getter def format(self) -> Optional[str]: """ A single line format of the access logs of data, as specified by selected $context variables. The format must include at least $context.requestId. """ return pulumi.get(self, "format") @pulumi.output_type class DeploymentCanarySetting(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "percentTraffic": suggest = "percent_traffic" elif key == "stageVariableOverrides": suggest = "stage_variable_overrides" elif key == "useStageCache": suggest = "use_stage_cache" if suggest: pulumi.log.warn(f"Key '{key}' not found in DeploymentCanarySetting. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: DeploymentCanarySetting.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: DeploymentCanarySetting.__key_warning(key) return super().get(key, default) def __init__(__self__, *, percent_traffic: Optional[float] = None, stage_variable_overrides: Optional[Any] = None, use_stage_cache: Optional[bool] = None): """ :param float percent_traffic: The percent (0-100) of traffic diverted to a canary deployment. :param Any stage_variable_overrides: Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values. :param bool use_stage_cache: Whether the canary deployment uses the stage cache or not. """ if percent_traffic is not None: pulumi.set(__self__, "percent_traffic", percent_traffic) if stage_variable_overrides is not None: pulumi.set(__self__, "stage_variable_overrides", stage_variable_overrides) if use_stage_cache is not None: pulumi.set(__self__, "use_stage_cache", use_stage_cache) @property @pulumi.getter(name="percentTraffic") def percent_traffic(self) -> Optional[float]: """ The percent (0-100) of traffic diverted to a canary deployment. """ return pulumi.get(self, "percent_traffic") @property @pulumi.getter(name="stageVariableOverrides") def stage_variable_overrides(self) -> Optional[Any]: """ Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values. """ return pulumi.get(self, "stage_variable_overrides") @property @pulumi.getter(name="useStageCache") def use_stage_cache(self) -> Optional[bool]: """ Whether the canary deployment uses the stage cache or not. """ return pulumi.get(self, "use_stage_cache") @pulumi.output_type class DeploymentCanarySettings(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "percentTraffic": suggest = "percent_traffic" elif key == "stageVariableOverrides": suggest = "stage_variable_overrides" elif key == "useStageCache": suggest = "use_stage_cache" if suggest: pulumi.log.warn(f"Key '{key}' not found in DeploymentCanarySettings. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: DeploymentCanarySettings.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: DeploymentCanarySettings.__key_warning(key) return super().get(key, default) def __init__(__self__, *, percent_traffic: Optional[float] = None, stage_variable_overrides: Optional[Any] = None, use_stage_cache: Optional[bool] = None): """ :param float percent_traffic: The percentage (0-100) of traffic diverted to a canary deployment. :param Any stage_variable_overrides: Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values. Duplicates are not allowed. :param bool use_stage_cache: Whether the canary deployment uses the stage cache. """ if percent_traffic is not None: pulumi.set(__self__, "percent_traffic", percent_traffic) if stage_variable_overrides is not None: pulumi.set(__self__, "stage_variable_overrides", stage_variable_overrides) if use_stage_cache is not None: pulumi.set(__self__, "use_stage_cache", use_stage_cache) @property @pulumi.getter(name="percentTraffic") def percent_traffic(self) -> Optional[float]: """ The percentage (0-100) of traffic diverted to a canary deployment. """ return pulumi.get(self, "percent_traffic") @property @pulumi.getter(name="stageVariableOverrides") def stage_variable_overrides(self) -> Optional[Any]: """ Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values. Duplicates are not allowed. """ return pulumi.get(self, "stage_variable_overrides") @property @pulumi.getter(name="useStageCache") def use_stage_cache(self) -> Optional[bool]: """ Whether the canary deployment uses the stage cache. """ return pulumi.get(self, "use_stage_cache") @pulumi.output_type class DeploymentMethodSetting(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "cacheDataEncrypted": suggest = "cache_data_encrypted" elif key == "cacheTtlInSeconds": suggest = "cache_ttl_in_seconds" elif key == "cachingEnabled": suggest = "caching_enabled" elif key == "dataTraceEnabled": suggest = "data_trace_enabled" elif key == "httpMethod": suggest = "http_method" elif key == "loggingLevel": suggest = "logging_level" elif key == "metricsEnabled": suggest = "metrics_enabled" elif key == "resourcePath": suggest = "resource_path" elif key == "throttlingBurstLimit": suggest = "throttling_burst_limit" elif key == "throttlingRateLimit": suggest = "throttling_rate_limit" if suggest: pulumi.log.warn(f"Key '{key}' not found in DeploymentMethodSetting. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: DeploymentMethodSetting.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: DeploymentMethodSetting.__key_warning(key) return super().get(key, default) def __init__(__self__, *, cache_data_encrypted: Optional[bool] = None, cache_ttl_in_seconds: Optional[int] = None, caching_enabled: Optional[bool] = None, data_trace_enabled: Optional[bool] = None, http_method: Optional[str] = None, logging_level: Optional[str] = None, metrics_enabled: Optional[bool] = None, resource_path: Optional[str] = None, throttling_burst_limit: Optional[int] = None, throttling_rate_limit: Optional[float] = None): """ :param bool cache_data_encrypted: Indicates whether the cached responses are encrypted :param int cache_ttl_in_seconds: The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses. :param bool caching_enabled: Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses. :param bool data_trace_enabled: Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs. :param str http_method: The HTTP method. :param str logging_level: The logging level for this method. For valid values, see the loggingLevel property of the Stage resource in the Amazon API Gateway API Reference. :param bool metrics_enabled: Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage. :param str resource_path: The resource path for this method. Forward slashes (/) are encoded as ~1 and the initial slash must include a forward slash. :param int throttling_burst_limit: The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account. :param float throttling_rate_limit: The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account. """ if cache_data_encrypted is not None: pulumi.set(__self__, "cache_data_encrypted", cache_data_encrypted) if cache_ttl_in_seconds is not None: pulumi.set(__self__, "cache_ttl_in_seconds", cache_ttl_in_seconds) if caching_enabled is not None: pulumi.set(__self__, "caching_enabled", caching_enabled) if data_trace_enabled is not None: pulumi.set(__self__, "data_trace_enabled", data_trace_enabled) if http_method is not None: pulumi.set(__self__, "http_method", http_method) if logging_level is not None: pulumi.set(__self__, "logging_level", logging_level) if metrics_enabled is not None: pulumi.set(__self__, "metrics_enabled", metrics_enabled) if resource_path is not None: pulumi.set(__self__, "resource_path", resource_path) if throttling_burst_limit is not None: pulumi.set(__self__, "throttling_burst_limit", throttling_burst_limit) if throttling_rate_limit is not None: pulumi.set(__self__, "throttling_rate_limit", throttling_rate_limit) @property @pulumi.getter(name="cacheDataEncrypted") def cache_data_encrypted(self) -> Optional[bool]: """ Indicates whether the cached responses are encrypted """ return pulumi.get(self, "cache_data_encrypted") @property @pulumi.getter(name="cacheTtlInSeconds") def cache_ttl_in_seconds(self) -> Optional[int]: """ The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses. """ return pulumi.get(self, "cache_ttl_in_seconds") @property @pulumi.getter(name="cachingEnabled") def caching_enabled(self) -> Optional[bool]: """ Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses. """ return pulumi.get(self, "caching_enabled") @property @pulumi.getter(name="dataTraceEnabled") def data_trace_enabled(self) -> Optional[bool]: """ Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs. """ return pulumi.get(self, "data_trace_enabled") @property @pulumi.getter(name="httpMethod") def http_method(self) -> Optional[str]: """ The HTTP method. """ return pulumi.get(self, "http_method") @property @pulumi.getter(name="loggingLevel") def logging_level(self) -> Optional[str]: """ The logging level for this method. For valid values, see the loggingLevel property of the Stage resource in the Amazon API Gateway API Reference. """ return pulumi.get(self, "logging_level") @property @pulumi.getter(name="metricsEnabled") def metrics_enabled(self) -> Optional[bool]: """ Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage. """ return pulumi.get(self, "metrics_enabled") @property @pulumi.getter(name="resourcePath") def resource_path(self) -> Optional[str]: """ The resource path for this method. Forward slashes (/) are encoded as ~1 and the initial slash must include a forward slash. """ return pulumi.get(self, "resource_path") @property @pulumi.getter(name="throttlingBurstLimit") def throttling_burst_limit(self) -> Optional[int]: """ The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account. """ return pulumi.get(self, "throttling_burst_limit") @property @pulumi.getter(name="throttlingRateLimit") def throttling_rate_limit(self) -> Optional[float]: """ The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account. """ return pulumi.get(self, "throttling_rate_limit") @pulumi.output_type class DeploymentStageDescription(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "accessLogSetting": suggest = "access_log_setting" elif key == "cacheClusterEnabled": suggest = "cache_cluster_enabled" elif key == "cacheClusterSize": suggest = "cache_cluster_size" elif key == "cacheDataEncrypted": suggest = "cache_data_encrypted" elif key == "cacheTtlInSeconds": suggest = "cache_ttl_in_seconds" elif key == "cachingEnabled": suggest = "caching_enabled" elif key == "canarySetting": suggest = "canary_setting" elif key == "clientCertificateId": suggest = "client_certificate_id" elif key == "dataTraceEnabled": suggest = "data_trace_enabled" elif key == "documentationVersion": suggest = "documentation_version" elif key == "loggingLevel": suggest = "logging_level" elif key == "methodSettings": suggest = "method_settings" elif key == "metricsEnabled": suggest = "metrics_enabled" elif key == "throttlingBurstLimit": suggest = "throttling_burst_limit" elif key == "throttlingRateLimit": suggest = "throttling_rate_limit" elif key == "tracingEnabled": suggest = "tracing_enabled" if suggest: pulumi.log.warn(f"Key '{key}' not found in DeploymentStageDescription. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: DeploymentStageDescription.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: DeploymentStageDescription.__key_warning(key) return super().get(key, default) def __init__(__self__, *, access_log_setting: Optional['outputs.DeploymentAccessLogSetting'] = None, cache_cluster_enabled: Optional[bool] = None, cache_cluster_size: Optional[str] = None, cache_data_encrypted: Optional[bool] = None, cache_ttl_in_seconds: Optional[int] = None, caching_enabled: Optional[bool] = None, canary_setting: Optional['outputs.DeploymentCanarySetting'] = None, client_certificate_id: Optional[str] = None, data_trace_enabled: Optional[bool] = None, description: Optional[str] = None, documentation_version: Optional[str] = None, logging_level: Optional[str] = None, method_settings: Optional[Sequence['outputs.DeploymentMethodSetting']] = None, metrics_enabled: Optional[bool] = None, tags: Optional[Sequence['outputs.DeploymentTag']] = None, throttling_burst_limit: Optional[int] = None, throttling_rate_limit: Optional[float] = None, tracing_enabled: Optional[bool] = None, variables: Optional[Any] = None): """ :param 'DeploymentAccessLogSetting' access_log_setting: Specifies settings for logging access in this stage. :param bool cache_cluster_enabled: Indicates whether cache clustering is enabled for the stage. :param str cache_cluster_size: The size of the stage's cache cluster. :param bool cache_data_encrypted: The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses. :param int cache_ttl_in_seconds: The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses. :param bool caching_enabled: Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses. :param 'DeploymentCanarySetting' canary_setting: Specifies settings for the canary deployment in this stage. :param str client_certificate_id: The identifier of the client certificate that API Gateway uses to call your integration endpoints in the stage. :param bool data_trace_enabled: Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs. :param str description: A description of the purpose of the stage. :param str documentation_version: The version identifier of the API documentation snapshot. :param str logging_level: The logging level for this method. For valid values, see the loggingLevel property of the Stage resource in the Amazon API Gateway API Reference. :param Sequence['DeploymentMethodSetting'] method_settings: Configures settings for all of the stage's methods. :param bool metrics_enabled: Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage. :param Sequence['DeploymentTag'] tags: An array of arbitrary tags (key-value pairs) to associate with the stage. :param int throttling_burst_limit: The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account. :param float throttling_rate_limit: The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account. :param bool tracing_enabled: Specifies whether active tracing with X-ray is enabled for this stage. :param Any variables: A map that defines the stage variables. Variable names must consist of alphanumeric characters, and the values must match the following regular expression: [A-Za-z0-9-._~:/?#&=,]+. """ if access_log_setting is not None: pulumi.set(__self__, "access_log_setting", access_log_setting) if cache_cluster_enabled is not None: pulumi.set(__self__, "cache_cluster_enabled", cache_cluster_enabled) if cache_cluster_size is not None: pulumi.set(__self__, "cache_cluster_size", cache_cluster_size) if cache_data_encrypted is not None: pulumi.set(__self__, "cache_data_encrypted", cache_data_encrypted) if cache_ttl_in_seconds is not None: pulumi.set(__self__, "cache_ttl_in_seconds", cache_ttl_in_seconds) if caching_enabled is not None: pulumi.set(__self__, "caching_enabled", caching_enabled) if canary_setting is not None: pulumi.set(__self__, "canary_setting", canary_setting) if client_certificate_id is not None: pulumi.set(__self__, "client_certificate_id", client_certificate_id) if data_trace_enabled is not None: pulumi.set(__self__, "data_trace_enabled", data_trace_enabled) if description is not None: pulumi.set(__self__, "description", description) if documentation_version is not None: pulumi.set(__self__, "documentation_version", documentation_version) if logging_level is not None: pulumi.set(__self__, "logging_level", logging_level) if method_settings is not None: pulumi.set(__self__, "method_settings", method_settings) if metrics_enabled is not None: pulumi.set(__self__, "metrics_enabled", metrics_enabled) if tags is not None: pulumi.set(__self__, "tags", tags) if throttling_burst_limit is not None: pulumi.set(__self__, "throttling_burst_limit", throttling_burst_limit) if throttling_rate_limit is not None: pulumi.set(__self__, "throttling_rate_limit", throttling_rate_limit) if tracing_enabled is not None: pulumi.set(__self__, "tracing_enabled", tracing_enabled) if variables is not None: pulumi.set(__self__, "variables", variables) @property @pulumi.getter(name="accessLogSetting") def access_log_setting(self) -> Optional['outputs.DeploymentAccessLogSetting']: """ Specifies settings for logging access in this stage. """ return pulumi.get(self, "access_log_setting") @property @pulumi.getter(name="cacheClusterEnabled") def cache_cluster_enabled(self) -> Optional[bool]: """ Indicates whether cache clustering is enabled for the stage. """ return pulumi.get(self, "cache_cluster_enabled") @property @pulumi.getter(name="cacheClusterSize") def cache_cluster_size(self) -> Optional[str]: """ The size of the stage's cache cluster. """ return pulumi.get(self, "cache_cluster_size") @property @pulumi.getter(name="cacheDataEncrypted") def cache_data_encrypted(self) -> Optional[bool]: """ The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses. """ return pulumi.get(self, "cache_data_encrypted") @property @pulumi.getter(name="cacheTtlInSeconds") def cache_ttl_in_seconds(self) -> Optional[int]: """ The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses. """ return pulumi.get(self, "cache_ttl_in_seconds") @property @pulumi.getter(name="cachingEnabled") def caching_enabled(self) -> Optional[bool]: """ Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses. """ return pulumi.get(self, "caching_enabled") @property @pulumi.getter(name="canarySetting") def canary_setting(self) -> Optional['outputs.DeploymentCanarySetting']: """ Specifies settings for the canary deployment in this stage. """ return pulumi.get(self, "canary_setting") @property @pulumi.getter(name="clientCertificateId") def client_certificate_id(self) -> Optional[str]: """ The identifier of the client certificate that API Gateway uses to call your integration endpoints in the stage. """ return pulumi.get(self, "client_certificate_id") @property @pulumi.getter(name="dataTraceEnabled") def data_trace_enabled(self) -> Optional[bool]: """ Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs. """ return pulumi.get(self, "data_trace_enabled") @property @pulumi.getter def description(self) -> Optional[str]: """ A description of the purpose of the stage. """ return pulumi.get(self, "description") @property @pulumi.getter(name="documentationVersion") def documentation_version(self) -> Optional[str]: """ The version identifier of the API documentation snapshot. """ return pulumi.get(self, "documentation_version") @property @pulumi.getter(name="loggingLevel") def logging_level(self) -> Optional[str]: """ The logging level for this method. For valid values, see the loggingLevel property of the Stage resource in the Amazon API Gateway API Reference. """ return pulumi.get(self, "logging_level") @property @pulumi.getter(name="methodSettings") def method_settings(self) -> Optional[Sequence['outputs.DeploymentMethodSetting']]: """ Configures settings for all of the stage's methods. """ return pulumi.get(self, "method_settings") @property @pulumi.getter(name="metricsEnabled") def metrics_enabled(self) -> Optional[bool]: """ Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage. """ return pulumi.get(self, "metrics_enabled") @property @pulumi.getter def tags(self) -> Optional[Sequence['outputs.DeploymentTag']]: """ An array of arbitrary tags (key-value pairs) to associate with the stage. """ return pulumi.get(self, "tags") @property @pulumi.getter(name="throttlingBurstLimit") def throttling_burst_limit(self) -> Optional[int]: """ The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account. """ return pulumi.get(self, "throttling_burst_limit") @property @pulumi.getter(name="throttlingRateLimit") def throttling_rate_limit(self) -> Optional[float]: """ The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account. """ return pulumi.get(self, "throttling_rate_limit") @property @pulumi.getter(name="tracingEnabled") def tracing_enabled(self) -> Optional[bool]: """ Specifies whether active tracing with X-ray is enabled for this stage. """ return pulumi.get(self, "tracing_enabled") @property @pulumi.getter def variables(self) -> Optional[Any]: """ A map that defines the stage variables. Variable names must consist of alphanumeric characters, and the values must match the following regular expression: [A-Za-z0-9-._~:/?#&=,]+. """ return pulumi.get(self, "variables") @pulumi.output_type class DeploymentTag(dict): def __init__(__self__, *, key: str, value: str): """ :param str key: The key name of the tag :param str value: The value for the tag """ pulumi.set(__self__, "key", key) pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> str: """ The key name of the tag """ return pulumi.get(self, "key") @property @pulumi.getter def value(self) -> str: """ The value for the tag """ return pulumi.get(self, "value") @pulumi.output_type class DocumentationPartLocation(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "statusCode": suggest = "status_code" if suggest: pulumi.log.warn(f"Key '{key}' not found in DocumentationPartLocation. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: DocumentationPartLocation.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: DocumentationPartLocation.__key_warning(key) return super().get(key, default) def __init__(__self__, *, method: Optional[str] = None, name: Optional[str] = None, path: Optional[str] = None, status_code: Optional[str] = None, type: Optional[str] = None): if method is not None: pulumi.set(__self__, "method", method) if name is not None: pulumi.set(__self__, "name", name) if path is not None: pulumi.set(__self__, "path", path) if status_code is not None: pulumi.set(__self__, "status_code", status_code) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter def method(self) -> Optional[str]: return pulumi.get(self, "method") @property @pulumi.getter def name(self) -> Optional[str]: return pulumi.get(self, "name") @property @pulumi.getter def path(self) -> Optional[str]: return pulumi.get(self, "path") @property @pulumi.getter(name="statusCode") def status_code(self) -> Optional[str]: return pulumi.get(self, "status_code") @property @pulumi.getter def type(self) -> Optional[str]: return pulumi.get(self, "type") @pulumi.output_type class DomainNameEndpointConfiguration(dict): def __init__(__self__, *, types: Optional[Sequence[str]] = None): if types is not None: pulumi.set(__self__, "types", types) @property @pulumi.getter def types(self) -> Optional[Sequence[str]]: return pulumi.get(self, "types") @pulumi.output_type class DomainNameMutualTlsAuthentication(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "truststoreUri": suggest = "truststore_uri" elif key == "truststoreVersion": suggest = "truststore_version" if suggest: pulumi.log.warn(f"Key '{key}' not found in DomainNameMutualTlsAuthentication. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: DomainNameMutualTlsAuthentication.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: DomainNameMutualTlsAuthentication.__key_warning(key) return super().get(key, default) def __init__(__self__, *, truststore_uri: Optional[str] = None, truststore_version: Optional[str] = None): if truststore_uri is not None: pulumi.set(__self__, "truststore_uri", truststore_uri) if truststore_version is not None: pulumi.set(__self__, "truststore_version", truststore_version) @property @pulumi.getter(name="truststoreUri") def truststore_uri(self) -> Optional[str]: return pulumi.get(self, "truststore_uri") @property @pulumi.getter(name="truststoreVersion") def truststore_version(self) -> Optional[str]: return pulumi.get(self, "truststore_version") @pulumi.output_type class DomainNameTag(dict): def __init__(__self__, *, key: Optional[str] = None, value: Optional[str] = None): if key is not None: pulumi.set(__self__, "key", key) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> Optional[str]: return pulumi.get(self, "key") @property @pulumi.getter def value(self) -> Optional[str]: return pulumi.get(self, "value") @pulumi.output_type class MethodIntegration(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "cacheKeyParameters": suggest = "cache_key_parameters" elif key == "cacheNamespace": suggest = "cache_namespace" elif key == "connectionId": suggest = "connection_id" elif key == "connectionType": suggest = "connection_type" elif key == "contentHandling": suggest = "content_handling" elif key == "integrationHttpMethod": suggest = "integration_http_method" elif key == "integrationResponses": suggest = "integration_responses" elif key == "passthroughBehavior": suggest = "passthrough_behavior" elif key == "requestParameters": suggest = "request_parameters" elif key == "requestTemplates": suggest = "request_templates" elif key == "timeoutInMillis": suggest = "timeout_in_millis" if suggest: pulumi.log.warn(f"Key '{key}' not found in MethodIntegration. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: MethodIntegration.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: MethodIntegration.__key_warning(key) return super().get(key, default) def __init__(__self__, *, type: 'MethodIntegrationType', cache_key_parameters: Optional[Sequence[str]] = None, cache_namespace: Optional[str] = None, connection_id: Optional[str] = None, connection_type: Optional['MethodIntegrationConnectionType'] = None, content_handling: Optional['MethodIntegrationContentHandling'] = None, credentials: Optional[str] = None, integration_http_method: Optional[str] = None, integration_responses: Optional[Sequence['outputs.MethodIntegrationResponse']] = None, passthrough_behavior: Optional['MethodIntegrationPassthroughBehavior'] = None, request_parameters: Optional[Any] = None, request_templates: Optional[Any] = None, timeout_in_millis: Optional[int] = None, uri: Optional[str] = None): """ :param 'MethodIntegrationType' type: The type of backend that your method is running. :param Sequence[str] cache_key_parameters: A list of request parameters whose values API Gateway caches. :param str cache_namespace: An API-specific tag group of related cached parameters. :param str connection_id: The ID of the VpcLink used for the integration when connectionType=VPC_LINK, otherwise undefined. :param 'MethodIntegrationConnectionType' connection_type: The type of the network connection to the integration endpoint. :param 'MethodIntegrationContentHandling' content_handling: Specifies how to handle request payload content type conversions. :param str credentials: The credentials that are required for the integration. :param str integration_http_method: The integration's HTTP method type. :param Sequence['MethodIntegrationResponse'] integration_responses: The response that API Gateway provides after a method's backend completes processing a request. :param 'MethodIntegrationPassthroughBehavior' passthrough_behavior: Indicates when API Gateway passes requests to the targeted backend. :param Any request_parameters: The request parameters that API Gateway sends with the backend request. :param Any request_templates: A map of Apache Velocity templates that are applied on the request payload. :param int timeout_in_millis: Custom timeout between 50 and 29,000 milliseconds. :param str uri: The Uniform Resource Identifier (URI) for the integration. """ pulumi.set(__self__, "type", type) if cache_key_parameters is not None: pulumi.set(__self__, "cache_key_parameters", cache_key_parameters) if cache_namespace is not None: pulumi.set(__self__, "cache_namespace", cache_namespace) if connection_id is not None: pulumi.set(__self__, "connection_id", connection_id) if connection_type is not None: pulumi.set(__self__, "connection_type", connection_type) if content_handling is not None: pulumi.set(__self__, "content_handling", content_handling) if credentials is not None: pulumi.set(__self__, "credentials", credentials) if integration_http_method is not None: pulumi.set(__self__, "integration_http_method", integration_http_method) if integration_responses is not None: pulumi.set(__self__, "integration_responses", integration_responses) if passthrough_behavior is not None: pulumi.set(__self__, "passthrough_behavior", passthrough_behavior) if request_parameters is not None: pulumi.set(__self__, "request_parameters", request_parameters) if request_templates is not None: pulumi.set(__self__, "request_templates", request_templates) if timeout_in_millis is not None: pulumi.set(__self__, "timeout_in_millis", timeout_in_millis) if uri is not None: pulumi.set(__self__, "uri", uri) @property @pulumi.getter def type(self) -> 'MethodIntegrationType': """ The type of backend that your method is running. """ return pulumi.get(self, "type") @property @pulumi.getter(name="cacheKeyParameters") def cache_key_parameters(self) -> Optional[Sequence[str]]: """ A list of request parameters whose values API Gateway caches. """ return pulumi.get(self, "cache_key_parameters") @property @pulumi.getter(name="cacheNamespace") def cache_namespace(self) -> Optional[str]: """ An API-specific tag group of related cached parameters. """ return pulumi.get(self, "cache_namespace") @property @pulumi.getter(name="connectionId") def connection_id(self) -> Optional[str]: """ The ID of the VpcLink used for the integration when connectionType=VPC_LINK, otherwise undefined. """ return pulumi.get(self, "connection_id") @property @pulumi.getter(name="connectionType") def connection_type(self) -> Optional['MethodIntegrationConnectionType']: """ The type of the network connection to the integration endpoint. """ return pulumi.get(self, "connection_type") @property @pulumi.getter(name="contentHandling") def content_handling(self) -> Optional['MethodIntegrationContentHandling']: """ Specifies how to handle request payload content type conversions. """ return pulumi.get(self, "content_handling") @property @pulumi.getter def credentials(self) -> Optional[str]: """ The credentials that are required for the integration. """ return pulumi.get(self, "credentials") @property @pulumi.getter(name="integrationHttpMethod") def integration_http_method(self) -> Optional[str]: """ The integration's HTTP method type. """ return pulumi.get(self, "integration_http_method") @property @pulumi.getter(name="integrationResponses") def integration_responses(self) -> Optional[Sequence['outputs.MethodIntegrationResponse']]: """ The response that API Gateway provides after a method's backend completes processing a request. """ return pulumi.get(self, "integration_responses") @property @pulumi.getter(name="passthroughBehavior") def passthrough_behavior(self) -> Optional['MethodIntegrationPassthroughBehavior']: """ Indicates when API Gateway passes requests to the targeted backend. """ return pulumi.get(self, "passthrough_behavior") @property @pulumi.getter(name="requestParameters") def request_parameters(self) -> Optional[Any]: """ The request parameters that API Gateway sends with the backend request. """ return pulumi.get(self, "request_parameters") @property @pulumi.getter(name="requestTemplates") def request_templates(self) -> Optional[Any]: """ A map of Apache Velocity templates that are applied on the request payload. """ return pulumi.get(self, "request_templates") @property @pulumi.getter(name="timeoutInMillis") def timeout_in_millis(self) -> Optional[int]: """ Custom timeout between 50 and 29,000 milliseconds. """ return pulumi.get(self, "timeout_in_millis") @property @pulumi.getter def uri(self) -> Optional[str]: """ The Uniform Resource Identifier (URI) for the integration. """ return pulumi.get(self, "uri") @pulumi.output_type class MethodIntegrationResponse(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "statusCode": suggest = "status_code" elif key == "contentHandling": suggest = "content_handling" elif key == "responseParameters": suggest = "response_parameters" elif key == "responseTemplates": suggest = "response_templates" elif key == "selectionPattern": suggest = "selection_pattern" if suggest: pulumi.log.warn(f"Key '{key}' not found in MethodIntegrationResponse. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: MethodIntegrationResponse.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: MethodIntegrationResponse.__key_warning(key) return super().get(key, default) def __init__(__self__, *, status_code: str, content_handling: Optional['MethodIntegrationResponseContentHandling'] = None, response_parameters: Optional[Any] = None, response_templates: Optional[Any] = None, selection_pattern: Optional[str] = None): """ :param str status_code: The status code that API Gateway uses to map the integration response to a MethodResponse status code. :param 'MethodIntegrationResponseContentHandling' content_handling: Specifies how to handle request payload content type conversions. :param Any response_parameters: The response parameters from the backend response that API Gateway sends to the method response. :param Any response_templates: The templates that are used to transform the integration response body. Specify templates as key-value pairs (string-to-string mappings), with a content type as the key and a template as the value. :param str selection_pattern: A regular expression that specifies which error strings or status codes from the backend map to the integration response. """ pulumi.set(__self__, "status_code", status_code) if content_handling is not None: pulumi.set(__self__, "content_handling", content_handling) if response_parameters is not None: pulumi.set(__self__, "response_parameters", response_parameters) if response_templates is not None: pulumi.set(__self__, "response_templates", response_templates) if selection_pattern is not None: pulumi.set(__self__, "selection_pattern", selection_pattern) @property @pulumi.getter(name="statusCode") def status_code(self) -> str: """ The status code that API Gateway uses to map the integration response to a MethodResponse status code. """ return pulumi.get(self, "status_code") @property @pulumi.getter(name="contentHandling") def content_handling(self) -> Optional['MethodIntegrationResponseContentHandling']: """ Specifies how to handle request payload content type conversions. """ return pulumi.get(self, "content_handling") @property @pulumi.getter(name="responseParameters") def response_parameters(self) -> Optional[Any]: """ The response parameters from the backend response that API Gateway sends to the method response. """ return pulumi.get(self, "response_parameters") @property @pulumi.getter(name="responseTemplates") def response_templates(self) -> Optional[Any]: """ The templates that are used to transform the integration response body. Specify templates as key-value pairs (string-to-string mappings), with a content type as the key and a template as the value. """ return pulumi.get(self, "response_templates") @property @pulumi.getter(name="selectionPattern") def selection_pattern(self) -> Optional[str]: """ A regular expression that specifies which error strings or status codes from the backend map to the integration response. """ return pulumi.get(self, "selection_pattern") @pulumi.output_type class MethodResponse(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "statusCode": suggest = "status_code" elif key == "responseModels": suggest = "response_models" elif key == "responseParameters": suggest = "response_parameters" if suggest: pulumi.log.warn(f"Key '{key}' not found in MethodResponse. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: MethodResponse.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: MethodResponse.__key_warning(key) return super().get(key, default) def __init__(__self__, *, status_code: str, response_models: Optional[Any] = None, response_parameters: Optional[Any] = None): """ :param str status_code: The method response's status code, which you map to an IntegrationResponse. :param Any response_models: The resources used for the response's content type. Specify response models as key-value pairs (string-to-string maps), with a content type as the key and a Model resource name as the value. :param Any response_parameters: Response parameters that API Gateway sends to the client that called a method. Specify response parameters as key-value pairs (string-to-Boolean maps), with a destination as the key and a Boolean as the value. """ pulumi.set(__self__, "status_code", status_code) if response_models is not None: pulumi.set(__self__, "response_models", response_models) if response_parameters is not None: pulumi.set(__self__, "response_parameters", response_parameters) @property @pulumi.getter(name="statusCode") def status_code(self) -> str: """ The method response's status code, which you map to an IntegrationResponse. """ return pulumi.get(self, "status_code") @property @pulumi.getter(name="responseModels") def response_models(self) -> Optional[Any]: """ The resources used for the response's content type. Specify response models as key-value pairs (string-to-string maps), with a content type as the key and a Model resource name as the value. """ return pulumi.get(self, "response_models") @property @pulumi.getter(name="responseParameters") def response_parameters(self) -> Optional[Any]: """ Response parameters that API Gateway sends to the client that called a method. Specify response parameters as key-value pairs (string-to-Boolean maps), with a destination as the key and a Boolean as the value. """ return pulumi.get(self, "response_parameters") @pulumi.output_type class RestApiEndpointConfiguration(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "vpcEndpointIds": suggest = "vpc_endpoint_ids" if suggest: pulumi.log.warn(f"Key '{key}' not found in RestApiEndpointConfiguration. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: RestApiEndpointConfiguration.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: RestApiEndpointConfiguration.__key_warning(key) return super().get(key, default) def __init__(__self__, *, types: Optional[Sequence[str]] = None, vpc_endpoint_ids: Optional[Sequence[str]] = None): if types is not None: pulumi.set(__self__, "types", types) if vpc_endpoint_ids is not None: pulumi.set(__self__, "vpc_endpoint_ids", vpc_endpoint_ids) @property @pulumi.getter def types(self) -> Optional[Sequence[str]]: return pulumi.get(self, "types") @property @pulumi.getter(name="vpcEndpointIds") def vpc_endpoint_ids(self) -> Optional[Sequence[str]]: return pulumi.get(self, "vpc_endpoint_ids") @pulumi.output_type class RestApiS3Location(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "eTag": suggest = "e_tag" if suggest: pulumi.log.warn(f"Key '{key}' not found in RestApiS3Location. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: RestApiS3Location.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: RestApiS3Location.__key_warning(key) return super().get(key, default) def __init__(__self__, *, bucket: Optional[str] = None, e_tag: Optional[str] = None, key: Optional[str] = None, version: Optional[str] = None): if bucket is not None: pulumi.set(__self__, "bucket", bucket) if e_tag is not None: pulumi.set(__self__, "e_tag", e_tag) if key is not None: pulumi.set(__self__, "key", key) if version is not None: pulumi.set(__self__, "version", version) @property @pulumi.getter def bucket(self) -> Optional[str]: return pulumi.get(self, "bucket") @property @pulumi.getter(name="eTag") def e_tag(self) -> Optional[str]: return pulumi.get(self, "e_tag") @property @pulumi.getter def key(self) -> Optional[str]: return pulumi.get(self, "key") @property @pulumi.getter def version(self) -> Optional[str]: return pulumi.get(self, "version") @pulumi.output_type class RestApiTag(dict): def __init__(__self__, *, key: str, value: str): pulumi.set(__self__, "key", key) pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> str: return pulumi.get(self, "key") @property @pulumi.getter def value(self) -> str: return pulumi.get(self, "value") @pulumi.output_type class StageAccessLogSetting(dict): """ Specifies settings for logging access in this stage. """ @staticmethod def __key_warning(key: str): suggest = None if key == "destinationArn": suggest = "destination_arn" if suggest: pulumi.log.warn(f"Key '{key}' not found in StageAccessLogSetting. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: StageAccessLogSetting.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: StageAccessLogSetting.__key_warning(key) return super().get(key, default) def __init__(__self__, *, destination_arn: Optional[str] = None, format: Optional[str] = None): """ Specifies settings for logging access in this stage. :param str destination_arn: The Amazon Resource Name (ARN) of the CloudWatch Logs log group or Kinesis Data Firehose delivery stream to receive access logs. If you specify a Kinesis Data Firehose delivery stream, the stream name must begin with amazon-apigateway-. This parameter is required to enable access logging. :param str format: A single line format of the access logs of data, as specified by selected $context variables (https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html#context-variable-reference). The format must include at least $context.requestId. This parameter is required to enable access logging. """ if destination_arn is not None: pulumi.set(__self__, "destination_arn", destination_arn) if format is not None: pulumi.set(__self__, "format", format) @property @pulumi.getter(name="destinationArn") def destination_arn(self) -> Optional[str]: """ The Amazon Resource Name (ARN) of the CloudWatch Logs log group or Kinesis Data Firehose delivery stream to receive access logs. If you specify a Kinesis Data Firehose delivery stream, the stream name must begin with amazon-apigateway-. This parameter is required to enable access logging. """ return pulumi.get(self, "destination_arn") @property @pulumi.getter def format(self) -> Optional[str]: """ A single line format of the access logs of data, as specified by selected $context variables (https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html#context-variable-reference). The format must include at least $context.requestId. This parameter is required to enable access logging. """ return pulumi.get(self, "format") @pulumi.output_type class StageCanarySetting(dict): """ Specifies settings for the canary deployment in this stage. """ @staticmethod def __key_warning(key: str): suggest = None if key == "deploymentId": suggest = "deployment_id" elif key == "percentTraffic": suggest = "percent_traffic" elif key == "stageVariableOverrides": suggest = "stage_variable_overrides" elif key == "useStageCache": suggest = "use_stage_cache" if suggest: pulumi.log.warn(f"Key '{key}' not found in StageCanarySetting. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: StageCanarySetting.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: StageCanarySetting.__key_warning(key) return super().get(key, default) def __init__(__self__, *, deployment_id: Optional[str] = None, percent_traffic: Optional[float] = None, stage_variable_overrides: Optional[Any] = None, use_stage_cache: Optional[bool] = None): """ Specifies settings for the canary deployment in this stage. :param str deployment_id: The identifier of the deployment that the stage points to. :param float percent_traffic: The percentage (0-100) of traffic diverted to a canary deployment. :param Any stage_variable_overrides: Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values. :param bool use_stage_cache: Whether the canary deployment uses the stage cache or not. """ if deployment_id is not None: pulumi.set(__self__, "deployment_id", deployment_id) if percent_traffic is not None: pulumi.set(__self__, "percent_traffic", percent_traffic) if stage_variable_overrides is not None: pulumi.set(__self__, "stage_variable_overrides", stage_variable_overrides) if use_stage_cache is not None: pulumi.set(__self__, "use_stage_cache", use_stage_cache) @property @pulumi.getter(name="deploymentId") def deployment_id(self) -> Optional[str]: """ The identifier of the deployment that the stage points to. """ return pulumi.get(self, "deployment_id") @property @pulumi.getter(name="percentTraffic") def percent_traffic(self) -> Optional[float]: """ The percentage (0-100) of traffic diverted to a canary deployment. """ return pulumi.get(self, "percent_traffic") @property @pulumi.getter(name="stageVariableOverrides") def stage_variable_overrides(self) -> Optional[Any]: """ Stage variables overridden for a canary release deployment, including new stage variables introduced in the canary. These stage variables are represented as a string-to-string map between stage variable names and their values. """ return pulumi.get(self, "stage_variable_overrides") @property @pulumi.getter(name="useStageCache") def use_stage_cache(self) -> Optional[bool]: """ Whether the canary deployment uses the stage cache or not. """ return pulumi.get(self, "use_stage_cache") @pulumi.output_type class StageMethodSetting(dict): """ Configures settings for all methods in a stage. """ @staticmethod def __key_warning(key: str): suggest = None if key == "cacheDataEncrypted": suggest = "cache_data_encrypted" elif key == "cacheTtlInSeconds": suggest = "cache_ttl_in_seconds" elif key == "cachingEnabled": suggest = "caching_enabled" elif key == "dataTraceEnabled": suggest = "data_trace_enabled" elif key == "httpMethod": suggest = "http_method" elif key == "loggingLevel": suggest = "logging_level" elif key == "metricsEnabled": suggest = "metrics_enabled" elif key == "resourcePath": suggest = "resource_path" elif key == "throttlingBurstLimit": suggest = "throttling_burst_limit" elif key == "throttlingRateLimit": suggest = "throttling_rate_limit" if suggest: pulumi.log.warn(f"Key '{key}' not found in StageMethodSetting. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: StageMethodSetting.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: StageMethodSetting.__key_warning(key) return super().get(key, default) def __init__(__self__, *, cache_data_encrypted: Optional[bool] = None, cache_ttl_in_seconds: Optional[int] = None, caching_enabled: Optional[bool] = None, data_trace_enabled: Optional[bool] = None, http_method: Optional[str] = None, logging_level: Optional[str] = None, metrics_enabled: Optional[bool] = None, resource_path: Optional[str] = None, throttling_burst_limit: Optional[int] = None, throttling_rate_limit: Optional[float] = None): """ Configures settings for all methods in a stage. :param bool cache_data_encrypted: Indicates whether the cached responses are encrypted. :param int cache_ttl_in_seconds: The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses. :param bool caching_enabled: Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses. :param bool data_trace_enabled: Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs. :param str http_method: The HTTP method. You can use an asterisk (*) as a wildcard to apply method settings to multiple methods. :param str logging_level: The logging level for this method. For valid values, see the loggingLevel property of the Stage (https://docs.aws.amazon.com/apigateway/api-reference/resource/stage/#loggingLevel) resource in the Amazon API Gateway API Reference. :param bool metrics_enabled: Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage. :param str resource_path: The resource path for this method. Forward slashes (/) are encoded as ~1 and the initial slash must include a forward slash. For example, the path value /resource/subresource must be encoded as /~1resource~1subresource. To specify the root path, use only a slash (/). You can use an asterisk (*) as a wildcard to apply method settings to multiple methods. :param int throttling_burst_limit: The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account. :param float throttling_rate_limit: The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account. """ if cache_data_encrypted is not None: pulumi.set(__self__, "cache_data_encrypted", cache_data_encrypted) if cache_ttl_in_seconds is not None: pulumi.set(__self__, "cache_ttl_in_seconds", cache_ttl_in_seconds) if caching_enabled is not None: pulumi.set(__self__, "caching_enabled", caching_enabled) if data_trace_enabled is not None: pulumi.set(__self__, "data_trace_enabled", data_trace_enabled) if http_method is not None: pulumi.set(__self__, "http_method", http_method) if logging_level is not None: pulumi.set(__self__, "logging_level", logging_level) if metrics_enabled is not None: pulumi.set(__self__, "metrics_enabled", metrics_enabled) if resource_path is not None: pulumi.set(__self__, "resource_path", resource_path) if throttling_burst_limit is not None: pulumi.set(__self__, "throttling_burst_limit", throttling_burst_limit) if throttling_rate_limit is not None: pulumi.set(__self__, "throttling_rate_limit", throttling_rate_limit) @property @pulumi.getter(name="cacheDataEncrypted") def cache_data_encrypted(self) -> Optional[bool]: """ Indicates whether the cached responses are encrypted. """ return pulumi.get(self, "cache_data_encrypted") @property @pulumi.getter(name="cacheTtlInSeconds") def cache_ttl_in_seconds(self) -> Optional[int]: """ The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses. """ return pulumi.get(self, "cache_ttl_in_seconds") @property @pulumi.getter(name="cachingEnabled") def caching_enabled(self) -> Optional[bool]: """ Indicates whether responses are cached and returned for requests. You must enable a cache cluster on the stage to cache responses. """ return pulumi.get(self, "caching_enabled") @property @pulumi.getter(name="dataTraceEnabled") def data_trace_enabled(self) -> Optional[bool]: """ Indicates whether data trace logging is enabled for methods in the stage. API Gateway pushes these logs to Amazon CloudWatch Logs. """ return pulumi.get(self, "data_trace_enabled") @property @pulumi.getter(name="httpMethod") def http_method(self) -> Optional[str]: """ The HTTP method. You can use an asterisk (*) as a wildcard to apply method settings to multiple methods. """ return pulumi.get(self, "http_method") @property @pulumi.getter(name="loggingLevel") def logging_level(self) -> Optional[str]: """ The logging level for this method. For valid values, see the loggingLevel property of the Stage (https://docs.aws.amazon.com/apigateway/api-reference/resource/stage/#loggingLevel) resource in the Amazon API Gateway API Reference. """ return pulumi.get(self, "logging_level") @property @pulumi.getter(name="metricsEnabled") def metrics_enabled(self) -> Optional[bool]: """ Indicates whether Amazon CloudWatch metrics are enabled for methods in the stage. """ return pulumi.get(self, "metrics_enabled") @property @pulumi.getter(name="resourcePath") def resource_path(self) -> Optional[str]: """ The resource path for this method. Forward slashes (/) are encoded as ~1 and the initial slash must include a forward slash. For example, the path value /resource/subresource must be encoded as /~1resource~1subresource. To specify the root path, use only a slash (/). You can use an asterisk (*) as a wildcard to apply method settings to multiple methods. """ return pulumi.get(self, "resource_path") @property @pulumi.getter(name="throttlingBurstLimit") def throttling_burst_limit(self) -> Optional[int]: """ The number of burst requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account. """ return pulumi.get(self, "throttling_burst_limit") @property @pulumi.getter(name="throttlingRateLimit") def throttling_rate_limit(self) -> Optional[float]: """ The number of steady-state requests per second that API Gateway permits across all APIs, stages, and methods in your AWS account. """ return pulumi.get(self, "throttling_rate_limit") @pulumi.output_type class StageTag(dict): """ Identify and categorize resources. """ def __init__(__self__, *, key: str, value: str): """ Identify and categorize resources. :param str key: The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. :param str value: The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. """ pulumi.set(__self__, "key", key) pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> str: """ The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. """ return pulumi.get(self, "key") @property @pulumi.getter def value(self) -> str: """ The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. """ return pulumi.get(self, "value") @pulumi.output_type class UsagePlanApiStage(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "apiId": suggest = "api_id" if suggest: pulumi.log.warn(f"Key '{key}' not found in UsagePlanApiStage. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: UsagePlanApiStage.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: UsagePlanApiStage.__key_warning(key) return super().get(key, default) def __init__(__self__, *, api_id: Optional[str] = None, stage: Optional[str] = None, throttle: Optional[Any] = None): """ :param str api_id: The ID of an API that is in the specified Stage property that you want to associate with the usage plan. :param str stage: The name of the stage to associate with the usage plan. :param Any throttle: Map containing method-level throttling information for an API stage in a usage plan. The key for the map is the path and method for which to configure custom throttling, for example, '/pets/GET'. Duplicates are not allowed. """ if api_id is not None: pulumi.set(__self__, "api_id", api_id) if stage is not None: pulumi.set(__self__, "stage", stage) if throttle is not None: pulumi.set(__self__, "throttle", throttle) @property @pulumi.getter(name="apiId") def api_id(self) -> Optional[str]: """ The ID of an API that is in the specified Stage property that you want to associate with the usage plan. """ return pulumi.get(self, "api_id") @property @pulumi.getter def stage(self) -> Optional[str]: """ The name of the stage to associate with the usage plan. """ return pulumi.get(self, "stage") @property @pulumi.getter def throttle(self) -> Optional[Any]: """ Map containing method-level throttling information for an API stage in a usage plan. The key for the map is the path and method for which to configure custom throttling, for example, '/pets/GET'. Duplicates are not allowed. """ return pulumi.get(self, "throttle") @pulumi.output_type class UsagePlanQuotaSettings(dict): def __init__(__self__, *, limit: Optional[int] = None, offset: Optional[int] = None, period: Optional[str] = None): """ :param int limit: The maximum number of requests that users can make within the specified time period. :param int offset: For the initial time period, the number of requests to subtract from the specified limit. When you first implement a usage plan, the plan might start in the middle of the week or month. With this property, you can decrease the limit for this initial time period. :param str period: The time period for which the maximum limit of requests applies, such as DAY or WEEK. For valid values, see the period property for the UsagePlan resource in the Amazon API Gateway REST API Reference. """ if limit is not None: pulumi.set(__self__, "limit", limit) if offset is not None: pulumi.set(__self__, "offset", offset) if period is not None: pulumi.set(__self__, "period", period) @property @pulumi.getter def limit(self) -> Optional[int]: """ The maximum number of requests that users can make within the specified time period. """ return pulumi.get(self, "limit") @property @pulumi.getter def offset(self) -> Optional[int]: """ For the initial time period, the number of requests to subtract from the specified limit. When you first implement a usage plan, the plan might start in the middle of the week or month. With this property, you can decrease the limit for this initial time period. """ return pulumi.get(self, "offset") @property @pulumi.getter def period(self) -> Optional[str]: """ The time period for which the maximum limit of requests applies, such as DAY or WEEK. For valid values, see the period property for the UsagePlan resource in the Amazon API Gateway REST API Reference. """ return pulumi.get(self, "period") @pulumi.output_type class UsagePlanTag(dict): def __init__(__self__, *, key: str, value: str): """ :param str key: The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. :param str value: The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. """ pulumi.set(__self__, "key", key) pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> str: """ The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. """ return pulumi.get(self, "key") @property @pulumi.getter def value(self) -> str: """ The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. """ return pulumi.get(self, "value") @pulumi.output_type class UsagePlanThrottleSettings(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "burstLimit": suggest = "burst_limit" elif key == "rateLimit": suggest = "rate_limit" if suggest: pulumi.log.warn(f"Key '{key}' not found in UsagePlanThrottleSettings. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: UsagePlanThrottleSettings.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: UsagePlanThrottleSettings.__key_warning(key) return super().get(key, default) def __init__(__self__, *, burst_limit: Optional[int] = None, rate_limit: Optional[float] = None): """ :param int burst_limit: The maximum API request rate limit over a time ranging from one to a few seconds. The maximum API request rate limit depends on whether the underlying token bucket is at its full capacity. :param float rate_limit: The API request steady-state rate limit (average requests per second over an extended period of time). """ if burst_limit is not None: pulumi.set(__self__, "burst_limit", burst_limit) if rate_limit is not None: pulumi.set(__self__, "rate_limit", rate_limit) @property @pulumi.getter(name="burstLimit") def burst_limit(self) -> Optional[int]: """ The maximum API request rate limit over a time ranging from one to a few seconds. The maximum API request rate limit depends on whether the underlying token bucket is at its full capacity. """ return pulumi.get(self, "burst_limit") @property @pulumi.getter(name="rateLimit") def rate_limit(self) -> Optional[float]: """ The API request steady-state rate limit (average requests per second over an extended period of time). """ return pulumi.get(self, "rate_limit") @pulumi.output_type class VpcLinkTag(dict): def __init__(__self__, *, key: str, value: str): pulumi.set(__self__, "key", key) pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> str: return pulumi.get(self, "key") @property @pulumi.getter def value(self) -> str: return pulumi.get(self, "value")
42.85493
389
0.655427
81,224
0.978485
0
0
81,784
0.985231
0
0
39,551
0.476461
f7f110d1e3f278e009edf38f3492952620bab08d
619
py
Python
bin/training_data/redmagic_ds_training_data.py
mclaughlin6464/pearce
746f2bf4bf45e904d66996e003043661a01423ba
[ "MIT" ]
null
null
null
bin/training_data/redmagic_ds_training_data.py
mclaughlin6464/pearce
746f2bf4bf45e904d66996e003043661a01423ba
[ "MIT" ]
16
2016-11-04T22:24:32.000Z
2018-05-01T22:53:39.000Z
bin/training_data/redmagic_ds_training_data.py
mclaughlin6464/pearce
746f2bf4bf45e904d66996e003043661a01423ba
[ "MIT" ]
3
2016-10-04T08:07:52.000Z
2019-05-03T23:50:01.000Z
#!/.conda/envs/hodemulator/bin/python from pearce.emulator import make_training_data from pearce.emulator import DEFAULT_PARAMS as ordered_params ordered_params['f_c'] = (0.05, .5) ordered_params['logMmin'] = (11.5, 13.0)#(13.0, 14.5) ordered_params['sigma_logM'] = (0.05, 1.0) ordered_params['logM1'] = (12.0, 15.0) ordered_params['alpha'] = (0.8, 1.5) ordered_params.update({'mean_occupation_centrals_assembias_param1':( -1.0, 1.0), 'mean_occupation_satellites_assembias_param1':( -1.0, 1.0)}) make_training_data('/u/ki/swmclau2/Git/pearce/bin/training_data/ds_redmagic.cfg',ordered_params)
38.6875
96
0.726979
0
0
0
0
0
0
0
0
239
0.386107
f7f1a1740efc36292fbb917d24b84a88544cbd25
40,478
py
Python
src/legohdl/workspace.py
c-rus/legoHDL
d7d77c05514c8d6dc1070c4efe589f392307daac
[ "MIT" ]
6
2021-12-16T05:40:37.000Z
2022-02-07T15:04:39.000Z
src/legohdl/workspace.py
c-rus/legoHDL
d7d77c05514c8d6dc1070c4efe589f392307daac
[ "MIT" ]
61
2021-09-28T03:05:13.000Z
2022-01-16T00:03:14.000Z
src/legohdl/workspace.py
c-rus/legoHDL
d7d77c05514c8d6dc1070c4efe589f392307daac
[ "MIT" ]
1
2021-12-16T07:03:18.000Z
2021-12-16T07:03:18.000Z
# ------------------------------------------------------------------------------ # Project: legohdl # Script: workspace.py # Author: Chase Ruskin # Description: # The Workspace class. A Workspace object has a path and a list of available # vendors. This is what the user keeps their work's scope within for a given # "organization". # ------------------------------------------------------------------------------ import os, shutil, glob import logging as log from datetime import datetime from .vendor import Vendor from .apparatus import Apparatus as apt from .cfg import Cfg, Section, Key from .map import Map from .git import Git from .block import Block class Workspace: #store all workspaces in dictionary Jar = Map() #active-workspace is a workspace object _ActiveWorkspace = None DIR = apt.fs(apt.HIDDEN+"workspaces/") LOG_FILE = "refresh.log" MIN_RATE = -1 MAX_RATE = 1440 def __init__(self, name, path, vendors=[], ask=True): ''' Create a workspace instance. Parameters: name (str): the identity for the workspace path (str): the local path where blocks will be looked for vendors ([str]): the list of vendors that are tied to this workspace ask (bool): will ask user if wishing to enter workspace path Returns: None ''' self._name = name #do not create workspace if the name is already taken if(self.getName().lower() in self.Jar.keys()): log.error("Skipping workspace "+self.getName()+" due to duplicate naming conflict.") return #set the path self._path = '' self.setPath(path) #do not create workspace if the path is empty if(self.getPath() == ''): if(ask == False): log.error("Skipping workspace "+self.getName()+" due to empty local path.") return else: #keep asking to set path until one is decided/input try: path = input("Enter path for workspace "+self.getName()+": ") except KeyboardInterrupt: apt.CFG.remove('workspace.'+self.getName()) Workspace.save(inc_active=False) print() exit(log.info("Workspace not created.")) while(self.setPath(path) == False): try: path = input("Enter path for workspace "+self.getName()+": ") except KeyboardInterrupt: apt.CFG.remove('workspace.'+self.getName()) Workspace.save(inc_active=False) print() exit(log.info("Workspace not created.")) self._ws_dir = apt.fs(self.DIR+self.getName()+"/") #ensure all workspace hidden directories exist if(os.path.isdir(self.getDir()) == False): log.info("Setting up workspace "+self.getName()+"...") os.makedirs(self.getDir(), exist_ok=True) #create workspace's cache where installed blocks will be stored os.makedirs(self.getDir()+"cache", exist_ok=True) #create the refresh log if DNE if(os.path.isfile(self.getDir()+self.LOG_FILE) == False): open(self.getDir()+self.LOG_FILE, 'w').close() self._vendors = [] #find all vendor objects by name and store in list for vndr in vendors: if(vndr.lower() in Vendor.Jar.keys()): self._vendors += [Vendor.Jar[vndr]] else: log.warning("Could not link unknown vendor "+vndr+" to "+self.getName()+".") pass #add to class Jar self.Jar[self.getName()] = self pass def setPath(self, p): ''' Set the workspace's local path to a new value. Will ask user if okay to create the path if DNE. Parameters: p (str): the path string Returns: (bool): true if successfully changed the path attribute ''' #cannot set an empty path if(p == '' or p == None): log.info("Local path for workspace "+self.getName()+" cannot be empty.") return False p = apt.fs(p) #create the workspace's local path if it does not exist if(os.path.exists(p) == False): #prompt user carry_on = apt.confirmation("Workspace "+self.getName()+"'s local path does not exist. Create "+p+"?") if(carry_on): os.makedirs(p, exist_ok=True) self._path = p return True else: log.info("Did not set "+p+" as local path.") return False else: self._path = p return True def setName(self, n): ''' Change the workspace's name if the name is not already taken. Parameters: n (str): new name for workspace Returns: (bool): true if name successfully altered and updated in Jar ''' if(n == '' or n == None): log.error("Workspace name cannot be empty.") return False if(n.lower() in self.Jar.keys()): log.error("Cannot rename workspace to "+n+" due to name conflict.") return False else: #remove old name from Jar if(self.getName().lower() in self.Jar.keys()): del self.Jar[self.getName()] #rename hidden directory if exists new_dir = apt.fs(self.DIR+n+"/") if(hasattr(self, "_ws_dir")): os.rename(self.getDir(), new_dir) #set the hidden workspace directory self._ws_dir = new_dir #change to new name self._name = n #update the Jar self.Jar[self.getName()] = self return True def remove(self): ''' Removes the workspace object from the Jar and its hidden directory. Parameters: None Returns: None ''' log.info("Removing workspace "+self.getName()+"...") #delete the hidden workspace directory shutil.rmtree(self.getDir(), onerror=apt.rmReadOnly) #remove from class Jar del self.Jar[self.getName()] #remove from cfg file apt.CFG.remove('workspace.'+self.getName()) apt.CFG.write() pass def linkVendor(self, vndr): ''' Attempts to add a vendor to the workspace's vendor list. Parameters: vndr (str): name of the vendor to add Returns: (bool): true if the vendor list was modified (successful add) ''' if(vndr.lower() in Vendor.Jar.keys()): vndr_obj = Vendor.Jar[vndr] if(vndr_obj in self.getVendors()): log.info("Vendor "+vndr_obj.getName()+" is already linked to this workspace.") return False else: log.info("Linking vendor "+vndr_obj.getName()+" to the workspace...") self._vendors += [vndr_obj] return True else: log.warning("Could not link unknown vendor "+vndr+" to "+self.getName()+".") return False def setVendors(self, vndrs): ''' Overrides entire _vendors attr by setting it equal to 'vndrs'. Parameters: vndrs ([str]): list of vendors Returns: (bool): success if all vendors listed were added ''' #reset vendors list self._vendors = [] success = True #iterate through every given vendor for vndr in vndrs: #verify the vendor exists if(vndr.lower() in Vendor.Jar.keys()): vndr_obj = Vendor.Jar[vndr] #check if the vendor has already been linked if(vndr_obj in self.getVendors()): log.info("Vendor "+vndr_obj.getName()+" is already linked to this workspace.") #link the vendor to this workspace else: log.info("Linking vendor "+vndr_obj.getName()+" to the workspace...") self._vendors += [vndr_obj] else: log.warning("Could not link unknown vendor "+vndr+" to "+self.getName()+".") sucess = False return success def unlinkVendor(self, vndr): ''' Attempts to remove a vendor from the workspace's vendor list. Parameters: vndr (str): name of the vendor to remove Returns: (bool): true if the vendor list was modified (successful remove) ''' if(vndr.lower() in Vendor.Jar.keys()): vndr_obj = Vendor.Jar[vndr] if(vndr_obj not in self.getVendors()): log.info("Vendor "+vndr_obj.getName()+" is already unlinked from the workspace.") return False else: log.info("Unlinking vendor "+vndr_obj.getName()+" from the workspace...") self._vendors.remove(vndr_obj) return True else: log.warning("Could not unlink unknown vendor "+vndr+" from "+self.getName()+".") return False def loadBlocks(self, id_dsgns=False): ''' Loads all blocks found at all levels: dnld (workspace path), instl (workspace cache), avail (workspace vendors). When id_dsgns is True, this method uses the 'multi-develop' setting to determine which level has precedence in loadHDL(). 'multi-develop' set to False will only loadHDL() from cache. 'multi-develop' set to True will first try to loadHDL() from dnld, and if DNE, then try to loadHDL() from block's cache. Either way, if inside a current block, that block's HDL will be loaded over its cache. Dynamically creates _visible_blocks ([Block]) attribute to be reused. Parameters: id_dsgns (bool): identify design units (loadHDL) from blocks Returns: _visible_blocks ([Block]): list of all block objects in cache or path ''' if(hasattr(self, "_visible_blocks")): return self._visible_blocks self._visible_blocks = [] #read the setting for multi-develop mult_dev = apt.getMultiDevelop() #1. Search for downloaded blocks #glob on the local workspace path #print("Local Blocks on:",self.getPath()) marker_files = glob.glob(self.getPath()+"**/*/"+apt.MARKER, recursive=True) #iterate through all found downloads for mf in marker_files: b = Block(mf, self, Block.Level.DNLD) #if the user is within a current block, load the HDL from its DNLD level (not INSTL) if(mult_dev == True or Block.getCurrent(bypass=True) == b): self._visible_blocks += [b] if(id_dsgns): b.loadHDL() pass #2. Search for installed blocks #glob on the workspace cache path #print("Cache Blocks on:",self.getCachePath()) marker_files = glob.glob(self.getCachePath()+"**/*/"+apt.MARKER, recursive=True) #iterate through all found installations for mf in marker_files: #the block must also have a valid git repository at its root root,_ = os.path.split(mf) #note: only the head installation has the git repository if(Git.isValidRepo(root, remote=False)): b = Block(mf, self, Block.Level.INSTL) #get the spot for this block's download dnld_b = Block.Inventory[b.M()][b.L()][b.N()][Block.Level.DNLD.value] #add this block if a download DNE or the dnld does not match current when #not in multi-develop mode if(dnld_b == None or (mult_dev == False and Block.getCurrent(bypass=True) != dnld_b)): self._visible_blocks += [b] if(id_dsgns): b.loadHDL() pass #3. Search for available blocks #glob on each vendor path marker_files = [] #find all marker files in each of the workspace's vendors for vndr in self.getVendors(): marker_files += glob.glob(vndr.getVendorDir()+"**/*/"+apt.MARKER, recursive=True) #iterate through all found availables for mf in marker_files: b = Block(mf, self, Block.Level.AVAIL) #do not add this block to list of visible blocks because it has no #units associated with it, only metadata pass #4. ID all specific version blocks if identifying designs (except current block) spec_vers_blocks = [] for vis_block in self._visible_blocks: if(vis_block == Block.getCurrent(bypass=True)): continue for spec_block in vis_block.getInstalls().values(): spec_vers_blocks += [spec_block] if(id_dsgns): spec_block.loadHDL() pass pass self._visible_blocks += spec_vers_blocks return self._visible_blocks def shortcut(self, title, req_entity=False, visibility=True, ref_current=True): ''' Returns the Block from a shortened title. If title is empty and 'ref_current' is set, then tries to refer to the current block. Sometimes an entity is required for certain commands; so it can be assumed entity (instead of block name) if only thing given. Parameters: title (str): partial or full M.L.N with optional E attached req_entity (bool): determine if only thing given then it is an entity visibility (bool): determine if to only look for visible blocks ref_current (bool): determine if to try to assign empty title to current block Returns: (Block): the identified block from the shortened title ''' if(title == None): title = '' #split into pieces pieces = title.split('.') sects = ['']*3 diff = 3 - len(pieces) for i in range(len(pieces)-1, -1, -1): sects[diff+i] = pieces[i] #check final piece if it has an entity attached entity = '' if(sects[2].count(apt.ENTITY_DELIM)): i = sects[2].find(apt.ENTITY_DELIM) entity = sects[2][i+1:] sects[2] = sects[2][:i] #assume only name given is actually the entity elif(req_entity): entity = sects[2] sects[2] = '' # [!] load all necessary blocks before searching blocks = self.loadBlocks() #use all blocks when visibility is off :todo: is this design intent? if(visibility == False): blocks = Block.getAllBlocks() #track list of possible blocks as moving up the chain possible_blocks = [] #search for an entity if(len(entity)): #collect list of all entities reg = Map() reg[entity] = [] #iterate through every block and create a mapping for their entity names for bk in blocks: #get the entity names from this block es = bk.loadHDL(returnnames=True) #print(es) #create mappings of entity names to their block owners for e in es: if(e.lower() not in reg.keys()): reg[e] = [] reg[e] += [bk] #see how many blocks were fit to entity name's mapping num_blocks = len(reg[entity]) #algorithm only detected one possible solution if(num_blocks == 1): #make sure rest of sections are correct before returning result potential = reg[entity][0] title = potential.getTitle(index=2, dist=2) #verify each part of block identifier matches what was requested for i in range(len(sects)): #print(sects[i]) if(len(sects[i]) and sects[i].lower() != title[i].lower()): return None pass return potential #algorithm detected multiple possible solutions (cannot infer) elif(num_blocks > 1): possible_blocks = reg[entity] #only was given an entity name, algorithm cannot solve requested entity if(len(sects[2]) == 0): log.info("Ambiguous unit; conflicts with") #display the units/titles that conflict with input for bk in reg[entity]: print('\t '+bk.getFull()+":"+entity) print() exit() #no blocks matched the entity name being passed else: return None pass #search through all block names for start in range(len(sects)-1, -1, -1): term = sects[start] #exit loop if next term is empty if(len(term) == 0): break reg = Map() reg[term] = [] for bk in blocks: t = bk.getTitle(index=start, dist=0)[0] #store the block under the given section name if(t.lower() not in reg.keys()): reg[t] = [] reg[t] += [bk] #count how many blocks occupy this same name num_blocks = len(reg[term]) #algorithm only detected one possible solution if(num_blocks == 1): #make sure rest of sections are correct before returning result potential = reg[term][0] title = potential.getTitle(index=2, dist=2) #verify each part of block identifier matches what was requested for i in range(len(sects)): #print(sects[i]) if(len(sects[i]) and sects[i].lower() != title[i].lower()): return None pass return potential #algorithm detected multiple solutions (cannot infer on this step) elif(num_blocks > 1): #compare with blocks for a match and dwindle down choices next_blocks = [] for bk in reg[term]: if(bk in possible_blocks or (start == len(sects)-1 and entity == '')): next_blocks += [bk] #dwindled down to a single block if(len(next_blocks) == 1): #print("FOUND:",next_blocks[0].getTitle(index=2, dist=2)) return next_blocks[0] #carry on to using next title section if(len(sects[start-1])): #continue to using next term possible_blocks = next_blocks continue else: #ran out of guesses...report the conflicting titles/units if(req_entity): log.info("Ambiguous unit; conflicts with") else: log.info("Ambiguous title; conflicts with") for bk in reg[term]: if(req_entity): print('\t '+bk.getFull()+":"+entity) else: print('\t '+bk.getFull()) exit(print()) pass #using the current block if title is empty string if(ref_current and (title == '' or title == None)): return Block.getCurrent() #return None if all attempts have failed and not returned anything yet return None def decodeUnits(self): ''' Decodes every available unit to get the complete graphing data structure. Parameters: None Returns: None ''' blocks = self.loadBlocks() #print(blocks) log.info("Collecting all unit data...") for b in blocks: us = b.loadHDL() for u in us.values(): u.getLanguageFile().decode(u, recursive=False) log.info("done.") pass def listBlocks(self, title, alpha=False, instl=False, dnld=False, avail=False): ''' Print a formatted table of the available blocks. Parameters: title (str): block title to be broken into parts for searching alpha (bool): determine if to alphabetize the block list order (L.N.V) instl (bool): determine if to capture only blocks that are installed dnld (bool): determine if to capture only blocks that are downloaded avail (bool): determine if to capture blocks available from vendor Returns: None ''' #[!] load the necessary blocks self.loadBlocks() #collect if multi-develop is on mult_dev = apt.getMultiDevelop() #split the title into parts M,L,N,_ = Block.snapTitle(title, inc_ent=False) #get all blocks from the catalog #store each block's text line in a map to sort keys for alpha flag catalog = Map() #iterate through every vendor for vndr_k,vndrs in Block.Inventory.items(): if(vndr_k.startswith(M.lower()) == False): continue #iterate through every library for lib_k,libs in vndrs.items(): if(lib_k.startswith(L.lower()) == False): continue #iterate through every block for blk_k,lvls in libs.items(): if(blk_k.startswith(N.lower()) == False): continue downloaded = installed = available = ' ' disp_d = disp_i = disp_a = False #if none were set on command-line default to display everything if((dnld or instl or avail) == False): dnld = instl = avail = True #with each lower level, overwrite the block object to print if(lvls[Block.Level.AVAIL.value] != None): bk = lvls[Block.Level.AVAIL.value] available = 'A' disp_a = True if(lvls[Block.Level.INSTL.value] != None): bk = lvls[Block.Level.INSTL.value] installed = 'I' disp_i = True if(lvls[Block.Level.DNLD.value] != None): if(dnld): bk = lvls[Block.Level.DNLD.value] downloaded = 'D' # if(mult_dev): # downloaded = 'D' # installed = installed.lower() disp_d = True #one condition pair must be true to display the block if((disp_a and avail) or (disp_i and instl) or (disp_d and dnld)): pass else: continue #character to separate different status bits spacer = ' ' #format the status column's data sts = downloaded + spacer + installed + spacer + available #leave version empty if its been unreleased v = '' if(bk.getVersion() == '0.0.0') else bk.getVersion() #check if can be updated #prioritize installation level for checking updates instllr = bk.getLvlBlock(Block.Level.INSTL) cmp_v = instllr.getVersion() if(instllr != None and mult_dev == False) else bk.getVersion() #a '^' is an update symbol indicating the latest referenced version (dnld or instl) is not the actually the latest version found if(Block.cmpVer(bk.getHighestAvailVersion(), cmp_v) != cmp_v): sts = sts+' ^' v = cmp_v #format the data to print to the console and store in catalog (L.N.V str format) catalog[bk.L()+'.'+bk.N()+'.'+bk.M()] = '{:<16}'.format(bk.L())+' '+'{:<20}'.format(bk.N())+' '+'{:<8}'.format(sts)+' '+'{:<10}'.format(v)+' '+'{:<16}'.format(bk.M()) pass pass keys = list(catalog.keys()) #check if to sort by alphabet if(alpha): keys.sort() #print(keys) print('{:<16}'.format("Library"),'{:<20}'.format("Block"),'{:<8}'.format("Status"+("*"*int(mult_dev))),'{:<10}'.format("Version"),'{:<16}'.format("Vendor")) print("-"*16+" "+"-"*20+" "+"-"*8+" "+"-"*10+" "+"-"*16) #iterate through catalog and print each textline for k in keys: print(catalog[k]) pass def listUnits(self, title, alpha=False, usable=False, ignore_tb=False): ''' Print a formatted table of all the design units. Parameters: title (str): block title to be broken into parts for searching alpha (bool): determine if to alphabetize the block list order (E.V.L.N) usable (bool): determine if to display units that can be used ignore_tb (bool): determine if to ignore testbench files Returns: None ''' #[!] load blocks into inventory visible = self.loadBlocks() #:todo: add flag to print 'variations' of an entity/unit (what specific version names exist) #todo: print status of the unit and which status is usable (D or I) M,L,N,V,E = Block.snapTitle(title, inc_ent=True) #print(M,L,N,V,E) #store each entity's print line in map (key = <unit>:<block-id>) to ensure uniqueness catalog = Map() for bk in Block.getAllBlocks(): #for lvl in Block.Inventory[bk.M()][bk.L()][bk.N()]: block_title = bk.getFull(inc_ver=False) if(bk.M().lower().startswith(M.lower()) == False): continue if(bk.L().lower().startswith(L.lower()) == False): continue if(bk.N().lower().startswith(N.lower()) == False): continue #collect all units if(apt.getMultiDevelop() == False): if(bk.getLvlBlock(Block.Level.INSTL) != None): bk = bk.getLvlBlock(Block.Level.INSTL) #skip this block if only displaying usable units and multi-develop off elif(usable): continue units = bk.loadHDL(returnnames=False).values() for u in units: if(len(E) and u.E().lower().startswith(E.lower()) == False): continue if(ignore_tb and u.isTb()): continue #format if unit is visible/usable vis = '-' if(bk in visible): vis = 'yes' #format design unit name according to its natural language dsgn = u.getDesign().name.lower() if(u.getLang() == u.Language.VERILOG and dsgn == 'entity'): dsgn = 'module' catalog[u.E()+':'+block_title] = '{:<22}'.format(u.E())+' '+'{:<7}'.format(vis)+' '+'{:<10}'.format(dsgn)+' '+'{:<38}'.format(block_title) pass pass keys = list(catalog.keys()) #check if to sort by alphabet if(alpha): keys.sort() #print to console print('{:<22}'.format("Unit"),'{:<7}'.format("Usable"),'{:<10}'.format("Type"),'{:<38}'.format("Block")) print("-"*22+" "+"-"*7+" "+"-"*10+" "+"-"*38) for k in keys: print(catalog[k]) pass pass @classmethod def tidy(cls): ''' Removes any stale hidden workspace directories that aren't mapped to a workspace found in the class Jar container. Parameters: None Returns: None ''' #list all hidden workspace directories hidden_dirs = os.listdir(cls.DIR) for hd in hidden_dirs: if(hd.lower() not in cls.Jar.keys()): log.info("Removing stale workspace data for "+hd+"...") if(os.path.isdir(cls.DIR+hd)): shutil.rmtree(cls.DIR+hd, onerror=apt.rmReadOnly) #remove all files from workspace directory else: os.remove(cls.DIR+hd) pass def autoRefresh(self, rate): ''' Automatically refreshes all vendors for the given workspace. Reads its log file to determine if past next interval for refresh. Parameters: rate (int): how often to ask a refresh within a 24-hour period Returns: None ''' def timeToFloat(prt): ''' Converts a time object into a float type. Parameters: prt (datetime): iso format of current time Returns: (float): 0.00 (inclusive) - 24.00 (exclusive) ''' time_stamp = str(prt).split(' ')[1] time_sects = time_stamp.split(':') hrs = int(time_sects[0]) #convert to 'hours'.'minutes' time_fmt = (float(hrs)+(float(float(time_sects[1])/60))) return time_fmt refresh = False last_punch = None stage = 1 cur_time = datetime.now() #do not perform refresh if the rate is 0 if(rate == 0): return #always refresh if the rate is set below 0 (-1) elif(rate <= self.MIN_RATE): refresh = True #divide the 24 hour period into even checkpoints max_hours = float(24) spacing = float(max_hours / rate) intervals = [] for i in range(rate): intervals += [spacing*i] #ensure log file exists if(os.path.exists(self.getDir()+self.LOG_FILE) == False): open(self.getDir()+self.LOG_FILE, 'w').close() #read log file #read when the last refresh time occurred with open(self.getDir()+self.LOG_FILE, 'r') as log_file: #read the latest date data = log_file.readlines() #no refreshes have occurred so automatically need a refresh if(len(data) == 0): last_punch = cur_time refresh = True else: last_punch = datetime.fromisoformat(data[0]) #determine if its time to refresh #get latest time that was punched last_time_fmt = timeToFloat(last_punch) #determine the next checkpoint available for today next_checkpoint = max_hours for i in range(len(intervals)): if(last_time_fmt < intervals[i]): next_checkpoint = intervals[i] stage = i + 1 break #print('next checkpoint',next_checkpoint) cur_time_fmt = timeToFloat(cur_time) #check if the time has occurred on a previous day, (automatically update because its a new day) next_day = cur_time.year > last_punch.year or cur_time.month > last_punch.month or cur_time.day > last_punch.day #print(next_day) #print("currently",cur_time_fmt) #determine if the current time has passed the next checkpoint or if its a new day if(next_day or cur_time_fmt >= next_checkpoint): last_punch = cur_time refresh = True log_file.close() #determine if its time to refresh if(refresh): #display what interval is being refreshed on the day infoo = "("+str(stage)+"/"+str(rate)+")" if(rate > 0) else '' log.info("Automatically refreshing workspace "+self.getName()+" vendors... "+infoo) #refresh all vendors attached to this workspace for vndr in self.getVendors(): vndr.refresh() pass #write updated time value to log file with open(self.getDir()+self.LOG_FILE, 'w') as lf: lf.write(str(cur_time)) pass @classmethod def load(cls): '''Load all workspaces from settings.''' wspcs = apt.CFG.get('workspace', dtype=Section) for ws in wspcs.keys(): #skip over immediate keys if(isinstance(wspcs[ws], Section) == False): continue path = '' vendors = '()' #verify that a path key and vendors key exists under each workspace apt.CFG.set('workspace.'+ws+'.path', path, override=False) apt.CFG.set('workspace.'+ws+'.vendors', vendors, override=False) #retrieve path and vendors keys if('path' in wspcs[ws].keys()): path = wspcs[ws]['path']._val if('vendors' in wspcs[ws].keys()): vendors = Cfg.castList(wspcs[ws]['vendors']._val) #create Workspace objects Workspace(wspcs[ws]._name, path, vendors) pass #save if made any changes if(apt.CFG._modified): apt.CFG.write() pass @classmethod def save(cls, inc_active=True): ''' Serializes the Workspace objects and saves them to the settings dictionary. Parameters: inc_active (bool): determine if to save the active workspace to settings Returns: None ''' serialized = {} #serialize the Workspace objects into dictionary format for settings for ws in cls.Jar.values(): #do not save any workspace that has no path if(ws.getPath() == ''): continue serialized[ws.getName()] = {} serialized[ws.getName()]['path'] = ws.getPath() serialized[ws.getName()]['vendors'] = Cfg.castStr(ws.getVendors(returnnames=True, lowercase=False), tab_cnt=2, drop_list=False) #update settings dictionary apt.CFG.set('workspace', Section(serialized), override=True) #update active workspace if(inc_active): if(cls.getActive() != None): apt.CFG.set('general.active-workspace', cls.getActive().getName()) else: apt.CFG.set('general.active-workspace', '') apt.save() pass @classmethod def inWorkspace(cls): ''' Determine if an active workspace is selected. Parameters: None Returns: (bool): true if ActiveWorkspace is not None ''' return cls._ActiveWorkspace != None @classmethod def setActiveWorkspace(cls, ws): ''' Set the active workspace after initializing all workspaces into Jar. If the input name is invalid, it will set the first workspace in the Jar as active if one is not already assigned. Parameters: ws (str): workspace name Returns: (bool): true if active-workspace was set ''' #properly set the active workspace from one found in Jar if(ws != None and ws.lower() in cls.Jar.keys()): re_assign = (cls._ActiveWorkspace != None) #set the active workspace obj from found workspace cls._ActiveWorkspace = cls.Jar[ws] #only give prompt if reassigning the active-workspace if(re_assign): log.info("Assigning workspace "+cls._ActiveWorkspace.getName()+" as active workspace...") return True #try to randomly assign active workspace if not already assigned. elif(len(cls.Jar.keys()) and cls._ActiveWorkspace == None): random_ws = list(cls.Jar.keys())[0] cls._ActiveWorkspace = cls.Jar[random_ws] msgi = "No active workspace set." if(ws != ''): msgi = "Workspace "+ws+" does not exist." log.info(msgi+" Auto-assigning active workspace to "+cls._ActiveWorkspace.getName()+"...") return True #still was not able to set the active workspace with the given argument elif(cls._ActiveWorkspace != None): log.info("Workspace "+ws+" does not exist. Keeping "+cls._ActiveWorkspace.getName()+" as active.") else: log.error("No workspace set as active.") return False def isLinked(self): '''Returns if any vendors are tied to this workspace (bool).''' return len(self.getVendors()) def getPath(self): '''Returns the local path where downloaded blocks are located (str).''' return self._path def getDir(self): '''Returns the base hidden directory where the workspace data is kept (str).''' return self._ws_dir def getCachePath(self): '''Returns the hidden directory where workspace installations are kept. (str).''' return self.getDir()+"cache/" def getName(self): '''Returns the workspace's identifier (str).''' return self._name def isActive(self): '''Returns is this workspace is the active workspace (bool).''' return self == self.getActive() def getVendors(self, returnnames=False, lowercase=True): ''' Return the vendor objects associated with the given workspace. Parameters: returnnames (bool): true will return vendor names lowercase (bool): true will return lower-case names if returnnames is enabled Returns: ([Vendor]) or ([str]): list of available vendors ''' if(returnnames): vndr_names = [] for vndr in self._vendors: name = vndr.getName() if(lowercase): name = name.lower() vndr_names += [name] return vndr_names else: return self._vendors @classmethod def printList(cls): ''' Prints formatted list for workspaces with vendor availability and which is active. Parameters: None Returns: None ''' print('{:<16}'.format("Workspace"),'{:<6}'.format("Active"),'{:<40}'.format("Path"),'{:<14}'.format("Vendors")) print("-"*16+" "+"-"*6+" "+"-"*40+" "+"-"*14+" ") for ws in cls.Jar.values(): vndrs = apt.listToStr(ws.getVendors(returnnames=True)) act = 'yes' if(ws == cls.getActive()) else '-' print('{:<16}'.format(ws.getName()),'{:<6}'.format(act),'{:<40}'.format(ws.getPath()),'{:<14}'.format(vndrs)) pass pass @classmethod def printAll(cls): for key,ws in cls.Jar.items(): print('key:',key) print(ws) @classmethod def getActive(cls): '''Returns the active workspace and will exit on error (Workspace).''' if(cls._ActiveWorkspace == None): exit(log.error("Not in a workspace!")) return cls._ActiveWorkspace # uncomment to use for debugging # def __str__(self): # return f''' # ID: {hex(id(self))} # Name: {self.getName()} # Path: {self.getPath()} # Active: {self.isActive()} # Hidden directory: {self.getDir()} # Linked to: {self.isLinked()} # Vendors: {self.getVendors(returnnames=True)} # ''' pass
38.079022
186
0.531424
39,809
0.983473
0
0
6,101
0.150724
0
0
16,914
0.417857
f7f1c343e2c46298649ddf9fe556e96b2bec9514
3,871
py
Python
ev_de.py
avinashmnit30/Electric-Vehicle-Optimal-Charging
7f09bdbb9904285ddbbfeaa28cf402f7ef6f4cb4
[ "BSD-3-Clause" ]
7
2018-03-09T11:19:39.000Z
2022-01-19T13:45:20.000Z
ev_de.py
avinashmnit30/Electric-Vehicle-Optimal-Charging
7f09bdbb9904285ddbbfeaa28cf402f7ef6f4cb4
[ "BSD-3-Clause" ]
null
null
null
ev_de.py
avinashmnit30/Electric-Vehicle-Optimal-Charging
7f09bdbb9904285ddbbfeaa28cf402f7ef6f4cb4
[ "BSD-3-Clause" ]
1
2022-03-03T12:08:52.000Z
2022-03-03T12:08:52.000Z
# -*- coding: utf-8 -*- """ Created on Wed Dec 16 18:01:24 2015 @author: Avinash """ import numpy as np from numpy import * import numpy from math import * import ev_charge_schedule_modification1 as ev #import ev_charge_schedule.static as func1 #import ev_charge_schedule.dynamic as func2 import time #from numba import double from numba.decorators import autojit func1=ev.static func=autojit(func1) mode=1 runs=1 maxiter=2000 F=0.5 # Mutation Factor between 0 to 2 CR=0.2 # Probability 1. Put 0.9 if parameters are dependent while 0.2 if parameters are independent(seperable) N=40 D=100*24 # Number of particles ev.global_var(var_set=0,N_veh=int(D/float(24))) # boundary constraints ub=numpy.random.random(size=(1,D))[0] lb=numpy.random.random(size=(1,D))[0] i=0 while i<D: ub[i]=8.8 lb[i]=2.2 i+=1 fitness_val=numpy.zeros(shape=(runs,maxiter)) best_pos=numpy.zeros(shape=(runs,D)) for run_no in range(runs): # target vector initializtion x=numpy.random.uniform(size=(N,D)) i=0 while i<N: j=0 while j<D: x[i][j]=lb[j]+x[i][j]*(ub[j]-lb[j]) j+=1 i+=1 v=np.zeros_like(x) # donar vectors u=np.zeros_like(x) # trail vector g=numpy.zeros(shape=(1,D))[0] # best vector found so far # target vector initial fitness evaluation x_fit=numpy.random.uniform(size=(1,N))[0] i=0 while i<N: x_fit[i]=func(x[i],mode=mode) i+=1 u_fit=np.zeros_like(x_fit) j=0 i=1 while i<N: if x_fit[j]>x_fit[i]: j=i i+=1 g_fit=x_fit[j] g=x[j].copy() time1=time.time() it=0 while it<maxiter: # Mutation stage for i in range(N): r1=i while r1==i: r1=np.random.randint(low=0,high=N) r2=i while r2==i or r2==r1: r2=np.random.randint(low=0,high=N) r3=i while r3==i or r3==r1 or r3==r2: r3=np.random.randint(low=0,high=N) v[i]=x[r1]+(x[r2]-x[r3])*F for j in range(D): # if v[i][j]>ub[j]: # v[i][j]=v[i][j]-(1+numpy.random.rand())*(v[i][j]-ub[j]) # if v[i][j]<lb[j]: # v[i][j]=v[i][j]-(1+numpy.random.rand())*(v[i][j]-lb[j]) # if v[i][j]>ub[j]: # v[i][j]=ub[j] # if v[i][j]<lb[j]: # v[i][j]=lb[j] if v[i][j]>ub[j]: #v[i][j]=v[i][j]-1.1*(v[i][j]-ub[j]) v[i][j]=lb[j]+numpy.random.random()*(ub[j]-lb[j]) if v[i][j]<lb[j]: v[i][j]=lb[j]+numpy.random.random()*(ub[j]-lb[j]) #v[i][j]=v[i][j]-1.1*(v[i][j]-lb[j]) # Recombination stage for i in range(N): for j in range(D): if np.random.random()<=CR or j==numpy.random.randint(0,D): u[i][j]=v[i][j] else: u[i][j]=x[i][j] # Selection stage for i in range(N): u_fit[i]=func(u[i],mode=mode) if u_fit[i]<x_fit[i]: x[i]=u[i].copy() x_fit[i]=u_fit[i] if u_fit[i]<g_fit: g=u[i].copy() g_fit=u_fit[i] fitness_val[run_no][it]=g_fit print it,g_fit it+=1 best_pos[run_no]=g.copy() time2=time.time() print time2-time1 run_no+=1 numpy.savetxt("DE_fitness_d1_m2"+str(mode)+str(D)+".csv",fitness_val,delimiter=",") numpy.savetxt("DE_bestpos_d1_m2"+str(mode)+str(D)+".csv",best_pos,delimiter=",")
29.105263
112
0.482046
0
0
0
0
0
0
0
0
1,034
0.267114
f7f1da41a1909260bbd83fee7efec53538a5f960
775
py
Python
var/spack/repos/builtin/packages/memaxes/package.py
xiki-tempula/spack
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
[ "ECL-2.0", "Apache-2.0", "MIT" ]
9
2018-04-18T07:51:40.000Z
2021-09-10T03:56:57.000Z
var/spack/repos/builtin/packages/memaxes/package.py
xiki-tempula/spack
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
[ "ECL-2.0", "Apache-2.0", "MIT" ]
907
2018-04-18T11:17:57.000Z
2022-03-31T13:20:25.000Z
var/spack/repos/builtin/packages/memaxes/package.py
xiki-tempula/spack
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
[ "ECL-2.0", "Apache-2.0", "MIT" ]
29
2018-11-05T16:14:23.000Z
2022-02-03T16:07:09.000Z
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Memaxes(Package): """MemAxes is a visualizer for sampled memory trace data.""" homepage = "https://github.com/llnl/MemAxes" version('0.5', sha256='9858f0f675b50e347d0b88545558e5d6b4333347c762b15d399b8d8004d7b68b', url='https://github.com/llnl/MemAxes/archive/v0.5.tar.gz') depends_on('cmake@2.8.9:', type='build') depends_on("qt@5:") def install(self, spec, prefix): with working_dir('spack-build', create=True): cmake('..', *std_cmake_args) make() make("install")
31
93
0.672258
554
0.714839
0
0
0
0
0
0
464
0.59871
f7f61f99b14ff05744c7eb403d860339bcd27eae
3,970
py
Python
auth/decorators.py
dongboyan77/quay
8018e5bd80f17e6d855b58b7d5f2792d92675905
[ "Apache-2.0" ]
null
null
null
auth/decorators.py
dongboyan77/quay
8018e5bd80f17e6d855b58b7d5f2792d92675905
[ "Apache-2.0" ]
null
null
null
auth/decorators.py
dongboyan77/quay
8018e5bd80f17e6d855b58b7d5f2792d92675905
[ "Apache-2.0" ]
null
null
null
import logging from functools import wraps from flask import request, session from prometheus_client import Counter from auth.basic import validate_basic_auth from auth.oauth import validate_bearer_auth from auth.cookie import validate_session_cookie from auth.signedgrant import validate_signed_grant from util.http import abort logger = logging.getLogger(__name__) authentication_count = Counter( "quay_authentication_attempts_total", "number of authentication attempts accross the registry and API", labelnames=["auth_kind", "success"], ) def _auth_decorator(pass_result=False, handlers=None): """ Builds an auth decorator that runs the given handlers and, if any return successfully, sets up the auth context. The wrapped function will be invoked *regardless of success or failure of the auth handler(s)* """ def processor(func): @wraps(func) def wrapper(*args, **kwargs): auth_header = request.headers.get("authorization", "") result = None for handler in handlers: result = handler(auth_header) # If the handler was missing the necessary information, skip it and try the next one. if result.missing: continue # Check for a valid result. if result.auth_valid: logger.debug("Found valid auth result: %s", result.tuple()) # Set the various pieces of the auth context. result.apply_to_context() # Log the metric. authentication_count.labels(result.kind, True).inc() break # Otherwise, report the error. if result.error_message is not None: # Log the failure. authentication_count.labels(result.kind, False).inc() break if pass_result: kwargs["auth_result"] = result return func(*args, **kwargs) return wrapper return processor process_oauth = _auth_decorator(handlers=[validate_bearer_auth, validate_session_cookie]) process_auth = _auth_decorator(handlers=[validate_signed_grant, validate_basic_auth]) process_auth_or_cookie = _auth_decorator(handlers=[validate_basic_auth, validate_session_cookie]) process_basic_auth = _auth_decorator(handlers=[validate_basic_auth], pass_result=True) process_basic_auth_no_pass = _auth_decorator(handlers=[validate_basic_auth]) def require_session_login(func): """ Decorates a function and ensures that a valid session cookie exists or a 401 is raised. If a valid session cookie does exist, the authenticated user and identity are also set. """ @wraps(func) def wrapper(*args, **kwargs): result = validate_session_cookie() if result.has_nonrobot_user: result.apply_to_context() authentication_count.labels(result.kind, True).inc() return func(*args, **kwargs) elif not result.missing: authentication_count.labels(result.kind, False).inc() abort(401, message="Method requires login and no valid login could be loaded.") return wrapper def extract_namespace_repo_from_session(func): """ Extracts the namespace and repository name from the current session (which must exist) and passes them into the decorated function as the first and second arguments. If the session doesn't exist or does not contain these arugments, a 400 error is raised. """ @wraps(func) def wrapper(*args, **kwargs): if "namespace" not in session or "repository" not in session: logger.error("Unable to load namespace or repository from session: %s", session) abort(400, message="Missing namespace in request") return func(session["namespace"], session["repository"], *args, **kwargs) return wrapper
35.446429
101
0.668766
0
0
0
0
1,978
0.498237
0
0
1,295
0.326196
f7f6435a685ce7599500c328cd1e055481aa5830
5,353
py
Python
ddpm_proteins/utils.py
lucidrains/ddpm-proteins
88bfacbd3cbdc4e38585fab420106f56e890c5f7
[ "MIT" ]
61
2021-06-14T16:41:54.000Z
2022-03-23T14:09:46.000Z
ddpm_proteins/utils.py
lucidrains/ddpm-proteins
88bfacbd3cbdc4e38585fab420106f56e890c5f7
[ "MIT" ]
null
null
null
ddpm_proteins/utils.py
lucidrains/ddpm-proteins
88bfacbd3cbdc4e38585fab420106f56e890c5f7
[ "MIT" ]
5
2021-06-15T11:51:47.000Z
2022-03-18T08:01:48.000Z
import os from PIL import Image import seaborn as sn import matplotlib.pyplot as plt import torch import torch.nn.functional as F from sidechainnet.utils.sequence import ProteinVocabulary from einops import rearrange # general functions def exists(val): return val is not None def default(val, d): return val if exists(val) else d def broadcat(tensors, dim = -1): num_tensors = len(tensors) shape_lens = set(list(map(lambda t: len(t.shape), tensors))) assert len(shape_lens) == 1, 'tensors must all have the same number of dimensions' shape_len = list(shape_lens)[0] dim = (dim + shape_len) if dim < 0 else dim dims = list(zip(*map(lambda t: list(t.shape), tensors))) expandable_dims = [(i, val) for i, val in enumerate(dims) if i != dim] assert all([*map(lambda t: len(set(t[1])) <= 2, expandable_dims)]), 'invalid dimensions for broadcastable concatentation' max_dims = list(map(lambda t: (t[0], max(t[1])), expandable_dims)) expanded_dims = list(map(lambda t: (t[0], (t[1],) * num_tensors), max_dims)) expanded_dims.insert(dim, (dim, dims[dim])) expandable_shapes = list(zip(*map(lambda t: t[1], expanded_dims))) tensors = list(map(lambda t: t[0].expand(*t[1]), zip(tensors, expandable_shapes))) return torch.cat(tensors, dim = dim) # singleton msa transformer msa_instances = None def get_msa_transformer(): global msa_instances if not exists(msa_instances): msa_model, alphabet = torch.hub.load("facebookresearch/esm", "esm_msa1_t12_100M_UR50S") batch_converter = alphabet.get_batch_converter() return msa_model, batch_converter return msa_instances # MSA embedding related functions VOCAB = ProteinVocabulary() def ids_to_aa_str(x): assert isinstance(x, list), 'input must be a list' id2aa = VOCAB._int2char is_char = lambda c: isinstance(c, str) and len(c) == 1 out = [] for el in x: if isinstance(el, list): out.append(ids_to_aa_str(el)) elif isinstance(el, int): out.append(id2aa[el]) else: raise TypeError('type must be either list or character') if all(map(is_char, out)): return ''.join(out) return out def aa_str_to_embed_input(x): assert isinstance(x, list), 'input must be a list' out = [] for el in x: if isinstance(el, list): out.append(aa_str_to_embed_input(el)) elif isinstance(el, str): out.append((None, el)) else: raise TypeError('type must be either list or string') return out def apc(x): a1 = x.sum(-1, keepdims=True) a2 = x.sum(-2, keepdims=True) a12 = x.sum((-1, -2), keepdims=True) avg = a1 * a2 avg.div_(a12) normalized = x - avg return normalized def symmetrize(x): return x + x.transpose(-1, -2) def pad_image_to(tensor, size, value = 0.): remainder = size - tensor.shape[-1] tensor = F.pad(tensor, (0, remainder, 0, remainder), value = value) return tensor # getting a single MSA attention embedding, with caching CACHE_PATH = default(os.getenv('CACHE_PATH'), os.path.expanduser('~/.cache.ddpm-proteins')) FETCH_FROM_CACHE = not exists(os.getenv('CLEAR_CACHE')) os.makedirs(CACHE_PATH, exist_ok = True) @torch.no_grad() def get_msa_attention_embedding( model, batch_converter, aa_str, id, fetch_msas_fn = lambda t: [], cache = True ): device = next(model.parameters()).device cache_full_path = os.path.join(CACHE_PATH, f'{id}.pt') if cache and FETCH_FROM_CACHE and os.path.exists(cache_full_path): try: loaded = torch.load(cache_full_path).to(device) except: loaded = None if exists(loaded): return loaded msas = default(fetch_msas_fn(aa_str), []) seq_with_msas = [aa_str, *msas] embed_inputs = aa_str_to_embed_input(seq_with_msas) _, _, msa_batch_tokens = batch_converter(embed_inputs) results = model(msa_batch_tokens.to(device), need_head_weights = True) attentions = results['row_attentions'] attentions = attentions[..., 1:, 1:] attentions = rearrange(attentions, 'b l h m n -> b (l h) m n') attentions = apc(symmetrize(attentions)) if cache: print(f'caching to {cache_full_path}') torch.save(attentions, cache_full_path) return attentions def get_msa_attention_embeddings( model, batch_converter, seqs, ids, fetch_msas_fn = lambda t: [], cache = True ): n = seqs.shape[1] seqs = rearrange(seqs, 'b n -> b () n') aa_strs = ids_to_aa_str(seqs.cpu().tolist()) embeds_list = [get_msa_attention_embedding(model, batch_converter, aa, seq_id, cache = cache) for aa, seq_id in zip(aa_strs, ids)] embeds_list = [pad_image_to(embed, n) for embed in embeds_list] embeds = torch.cat(embeds_list, dim = 0) return embeds # training utils def cycle(loader, thres = 256): while True: for data in loader: if data.seqs.shape[1] <= thres: yield data def save_heatmap(tensor, filepath, dpi = 200, return_image = False): heatmap = sn.heatmap(tensor.cpu().numpy()) figure = heatmap.get_figure() figure.savefig(filepath, dpi = dpi) plt.clf() if not return_image: return return Image.open(filepath)
29.092391
134
0.655707
0
0
146
0.027274
1,105
0.206426
0
0
572
0.106856
f7f93aac7b9d793ef23c38a97b1f3ca8216eaa8d
24,348
py
Python
samples/python/efficientdet/create_onnx.py
L-Net-1992/TensorRT
34b664d404001bd724cb56b52a6e0e05e1fd97f2
[ "Apache-2.0" ]
null
null
null
samples/python/efficientdet/create_onnx.py
L-Net-1992/TensorRT
34b664d404001bd724cb56b52a6e0e05e1fd97f2
[ "Apache-2.0" ]
null
null
null
samples/python/efficientdet/create_onnx.py
L-Net-1992/TensorRT
34b664d404001bd724cb56b52a6e0e05e1fd97f2
[ "Apache-2.0" ]
null
null
null
# # SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import sys import argparse import logging import tensorflow as tf import onnx_graphsurgeon as gs import numpy as np import onnx from onnx import shape_inference from tf2onnx import tfonnx, optimizer, tf_loader import onnx_utils logging.basicConfig(level=logging.INFO) logging.getLogger("EfficientDetGraphSurgeon").setLevel(logging.INFO) log = logging.getLogger("EfficientDetGraphSurgeon") class EfficientDetGraphSurgeon: def __init__(self, saved_model_path): """ Constructor of the EfficientDet Graph Surgeon object, to do the conversion of an EfficientDet TF saved model to an ONNX-TensorRT parsable model. :param saved_model_path: The path pointing to the TensorFlow saved model to load. """ saved_model_path = os.path.realpath(saved_model_path) assert os.path.exists(saved_model_path) # Use tf2onnx to convert saved model to an initial ONNX graph. graph_def, inputs, outputs = tf_loader.from_saved_model(saved_model_path, None, None, "serve", ["serving_default"]) log.info("Loaded saved model from {}".format(saved_model_path)) with tf.Graph().as_default() as tf_graph: tf.import_graph_def(graph_def, name="") with tf_loader.tf_session(graph=tf_graph): onnx_graph = tfonnx.process_tf_graph(tf_graph, input_names=inputs, output_names=outputs, opset=11) onnx_model = optimizer.optimize_graph(onnx_graph).make_model("Converted from {}".format(saved_model_path)) self.graph = gs.import_onnx(onnx_model) assert self.graph log.info("TF2ONNX graph created successfully") # Fold constants via ONNX-GS that TF2ONNX may have missed self.graph.fold_constants() # Try to auto-detect by finding if nodes match a specific name pattern expected for either of the APIs. self.api = None if len([node for node in self.graph.nodes if "class_net/" in node.name]) > 0: self.api = "AutoML" elif len([node for node in self.graph.nodes if "/WeightSharedConvolutionalClassHead/" in node.name]) > 0: self.api = "TFOD" assert self.api log.info("Graph was detected as {}".format(self.api)) def sanitize(self): """ Sanitize the graph by cleaning any unconnected nodes, do a topological resort, and fold constant inputs values. When possible, run shape inference on the ONNX graph to determine tensor shapes. """ for i in range(3): count_before = len(self.graph.nodes) self.graph.cleanup().toposort() try: for node in self.graph.nodes: for o in node.outputs: o.shape = None model = gs.export_onnx(self.graph) model = shape_inference.infer_shapes(model) self.graph = gs.import_onnx(model) except Exception as e: log.info("Shape inference could not be performed at this time:\n{}".format(e)) try: self.graph.fold_constants(fold_shapes=True) except TypeError as e: log.error("This version of ONNX GraphSurgeon does not support folding shapes, please upgrade your " "onnx_graphsurgeon module. Error:\n{}".format(e)) raise count_after = len(self.graph.nodes) if count_before == count_after: # No new folding occurred in this iteration, so we can stop for now. break def save(self, output_path): """ Save the ONNX model to the given location. :param output_path: Path pointing to the location where to write out the updated ONNX model. """ self.graph.cleanup().toposort() model = gs.export_onnx(self.graph) output_path = os.path.realpath(output_path) os.makedirs(os.path.dirname(output_path), exist_ok=True) onnx.save(model, output_path) log.info("Saved ONNX model to {}".format(output_path)) def update_preprocessor(self, input_format, input_size, preprocessor="imagenet"): """ Remove all the pre-processing nodes in the ONNX graph and leave only the image normalization essentials. :param input_format: The input data format, either "NCHW" or "NHWC". :param input_size: The input size as a comma-separated string in H,W format, e.g. "512,512". :param preprocessor: The preprocessor to use, either "imagenet" for imagenet mean and stdev normalization, or "scale_range" for uniform [-1,+1] range normalization. """ # Update the input and output tensors shape input_size = input_size.split(",") assert len(input_size) == 2 for i in range(len(input_size)): input_size[i] = int(input_size[i]) assert input_size[i] >= 1 assert input_format in ["NCHW", "NHWC"] if input_format == "NCHW": self.graph.inputs[0].shape = ['N', 3, input_size[0], input_size[1]] if input_format == "NHWC": self.graph.inputs[0].shape = ['N', input_size[0], input_size[1], 3] self.graph.inputs[0].dtype = np.float32 self.graph.inputs[0].name = "input" log.info("ONNX graph input shape: {} [{} format]".format(self.graph.inputs[0].shape, input_format)) self.sanitize() # Find the initial nodes of the graph, whatever the input is first connected to, and disconnect them for node in [node for node in self.graph.nodes if self.graph.inputs[0] in node.inputs]: node.inputs.clear() # Convert to NCHW format if needed input_tensor = self.graph.inputs[0] if input_format == "NHWC": input_tensor = self.graph.transpose("preprocessor/transpose", input_tensor, [0, 3, 1, 2]) assert preprocessor in ["imagenet", "scale_range"] preprocessed_tensor = None if preprocessor == "imagenet": # RGB Normalizers. The per-channel values are given with shape [1, 3, 1, 1] for proper NCHW shape broadcasting scale_val = 1 / np.asarray([255], dtype=np.float32) mean_val = -1 * np.expand_dims(np.asarray([0.485, 0.456, 0.406], dtype=np.float32), axis=(0, 2, 3)) stddev_val = 1 / np.expand_dims(np.asarray([0.229, 0.224, 0.225], dtype=np.float32), axis=(0, 2, 3)) # y = (x * scale + mean) * stddev --> y = x * scale * stddev + mean * stddev scale_out = self.graph.elt_const("Mul", "preprocessor/scale", input_tensor, scale_val * stddev_val) mean_out = self.graph.elt_const("Add", "preprocessor/mean", scale_out, mean_val * stddev_val) preprocessed_tensor = mean_out[0] if preprocessor == "scale_range": # RGB Normalizers. The per-channel values are given with shape [1, 3, 1, 1] for proper NCHW shape broadcasting scale_val = 2 / np.asarray([255], dtype=np.float32) offset_val = np.expand_dims(np.asarray([-1, -1, -1], dtype=np.float32), axis=(0, 2, 3)) # y = (x * scale + mean) * stddev --> y = x * scale * stddev + mean * stddev scale_out = self.graph.elt_const("Mul", "preprocessor/scale", input_tensor, scale_val) range_out = self.graph.elt_const("Add", "preprocessor/range", scale_out, offset_val) preprocessed_tensor = range_out[0] # Find the first stem conv node of the graph, and connect the normalizer directly to it stem_name = None if self.api == "AutoML": stem_name = "/stem/" if self.api == "TFOD": stem_name = "/stem_conv2d/" stem = [node for node in self.graph.nodes if node.op == "Conv" and stem_name in node.name][0] log.info("Found {} node '{}' as stem entry".format(stem.op, stem.name)) stem.inputs[0] = preprocessed_tensor self.sanitize() def update_shapes(self): # Reshape nodes have the batch dimension as a fixed value of 1, they should use the batch size instead # Output-Head reshapes use [1, -1, C], corrected reshape value should be [-1, V, C] for node in [node for node in self.graph.nodes if node.op == "Reshape"]: shape_in = node.inputs[0].shape if shape_in is None or len(shape_in) not in [4,5]: # TFOD graphs have 5-dim inputs on this Reshape continue if type(node.inputs[1]) != gs.Constant: continue shape_out = node.inputs[1].values if len(shape_out) != 3 or shape_out[0] != 1 or shape_out[1] != -1: continue volume = shape_in[1] * shape_in[2] * shape_in[3] / shape_out[2] if len(shape_in) == 5: volume *= shape_in[4] shape_corrected = np.asarray([-1, volume, shape_out[2]], dtype=np.int64) node.inputs[1] = gs.Constant("{}_shape".format(node.name), values=shape_corrected) log.info("Updating Output-Head Reshape node {} to {}".format(node.name, node.inputs[1].values)) # Other Reshapes only need to change the first dim to -1, as long as there are no -1's already for node in [node for node in self.graph.nodes if node.op == "Reshape"]: if type(node.inputs[1]) != gs.Constant or node.inputs[1].values[0] != 1 or -1 in node.inputs[1].values: continue node.inputs[1].values[0] = -1 log.info("Updating Reshape node {} to {}".format(node.name, node.inputs[1].values)) # Resize nodes try to calculate the output shape dynamically, it's more optimal to pre-compute the shape if self.api == "AutoML": # Resize on a BiFPN will always be 2x, but grab it from the graph just in case for node in [node for node in self.graph.nodes if node.op == "Resize"]: if len(node.inputs) < 4 or node.inputs[0].shape is None: continue scale_h, scale_w = None, None if type(node.inputs[3]) == gs.Constant: # The sizes input is already folded if len(node.inputs[3].values) != 4: continue scale_h = node.inputs[3].values[2] / node.inputs[0].shape[2] scale_w = node.inputs[3].values[3] / node.inputs[0].shape[3] if type(node.inputs[3]) == gs.Variable: # The sizes input comes from Shape+Slice+Concat concat = node.i(3) if concat.op != "Concat": continue if type(concat.inputs[1]) != gs.Constant or len(concat.inputs[1].values) != 2: continue scale_h = concat.inputs[1].values[0] / node.inputs[0].shape[2] scale_w = concat.inputs[1].values[1] / node.inputs[0].shape[3] scales = np.asarray([1, 1, scale_h, scale_w], dtype=np.float32) del node.inputs[3] node.inputs[2] = gs.Constant(name="{}_scales".format(node.name), values=scales) log.info("Updating Resize node {} to {}".format(node.name, scales)) self.sanitize() def update_network(self): """ Updates the graph to replace certain nodes in the main EfficientDet network: - the global average pooling nodes are optimized when running for TFOD models. """ if self.api == "TFOD": for reduce in [node for node in self.graph.nodes if node.op == "ReduceMean"]: # TFOD models have their ReduceMean nodes applied with some redundant transposes that can be # optimized away for better performance # Make sure the correct subgraph is being replaced, basically search for this: # X > Transpose (0,2,3,1) > ReduceMean (1,2) > Reshape (?,1,1,?) > Reshape (?,?,1,1) > Conv > Y # And change to this: # X > ReduceMean (2,3) > Conv > Y transpose = reduce.i() if transpose.op != "Transpose" or transpose.attrs['perm'] != [0, 2, 3, 1]: continue if len(reduce.attrs['axes']) != 2 or reduce.attrs['axes'] != [1, 2]: continue reshape1 = reduce.o() if reshape1.op != "Reshape" or len(reshape1.inputs[1].values) != 4: continue if reshape1.inputs[1].values[1] != 1 or reshape1.inputs[1].values[2] != 1: continue reshape2 = reshape1.o() if reshape2.op != "Reshape" or len(reshape2.inputs[1].values) != 4: continue if reshape2.inputs[1].values[2] != 1 or reshape2.inputs[1].values[3] != 1: continue conv = reshape2.o() if conv.op != "Conv": continue # If all the checks above pass, then this node sequence can be optimized by just the ReduceMean itself # operating on a different set of axes input_tensor = transpose.inputs[0] # Input tensor to the Transpose reduce.inputs[0] = input_tensor # Forward the Transpose input to the ReduceMean node output_tensor = reduce.outputs[0] # Output tensor of the ReduceMean conv.inputs[0] = output_tensor # Forward the ReduceMean output to the Conv node reduce.attrs['axes'] = [2, 3] # Update the axes that ReduceMean operates on reduce.attrs['keepdims'] = 1 # Keep the reduced dimensions log.info("Optimized subgraph around ReduceMean node '{}'".format(reduce.name)) def update_nms(self, threshold=None, detections=None): """ Updates the graph to replace the NMS op by BatchedNMS_TRT TensorRT plugin node. :param threshold: Override the score threshold attribute. If set to None, use the value in the graph. :param detections: Override the max detections attribute. If set to None, use the value in the graph. """ def find_head_concat(name_scope): # This will find the concatenation node at the end of either Class Net or Box Net. These concatenation nodes # bring together prediction data for each of 5 scales. # The concatenated Class Net node will have shape [batch_size, num_anchors, num_classes], # and the concatenated Box Net node has the shape [batch_size, num_anchors, 4]. # These concatenation nodes can be be found by searching for all Concat's and checking if the node two # steps above in the graph has a name that begins with either "box_net/..." or "class_net/...". for node in [node for node in self.graph.nodes if node.op == "Transpose" and name_scope in node.name]: concat = self.graph.find_descendant_by_op(node, "Concat") assert concat and len(concat.inputs) == 5 log.info("Found {} node '{}' as the tip of {}".format(concat.op, concat.name, name_scope)) return concat def extract_anchors_tensor(split): # This will find the anchors that have been hardcoded somewhere within the ONNX graph. # The function will return a gs.Constant that can be directly used as an input to the NMS plugin. # The anchor tensor shape will be [1, num_anchors, 4]. Note that '1' is kept as first dim, regardless of # batch size, as it's not necessary to replicate the anchors for all images in the batch. # The anchors are available (one per coordinate) hardcoded as constants within certain box decoder nodes. # Each of these four constants have shape [1, num_anchors], so some numpy operations are used to expand the # dims and concatenate them as needed. # These constants can be found by starting from the Box Net's split operation , and for each coordinate, # walking down in the graph until either an Add or Mul node is found. The second input on this nodes will # be the anchor data required. def get_anchor_np(output_idx, op): node = self.graph.find_descendant_by_op(split.o(0, output_idx), op) assert node val = np.squeeze(node.inputs[1].values) return np.expand_dims(val.flatten(), axis=(0, 2)) anchors_y = get_anchor_np(0, "Add") anchors_x = get_anchor_np(1, "Add") anchors_h = get_anchor_np(2, "Mul") anchors_w = get_anchor_np(3, "Mul") anchors = np.concatenate([anchors_y, anchors_x, anchors_h, anchors_w], axis=2) return gs.Constant(name="nms/anchors:0", values=anchors) self.sanitize() head_names = [] if self.api == "AutoML": head_names = ["class_net/", "box_net/"] if self.api == "TFOD": head_names = ["/WeightSharedConvolutionalClassHead/", "/WeightSharedConvolutionalBoxHead/"] # There are five nodes at the bottom of the graph that provide important connection points: # 1. Find the concat node at the end of the class net (multi-scale class predictor) class_net = find_head_concat(head_names[0]) class_net_tensor = class_net.outputs[0] # 2. Find the concat node at the end of the box net (multi-scale localization predictor) box_net = find_head_concat(head_names[1]) box_net_tensor = box_net.outputs[0] # 3. Find the split node that separates the box net coordinates and feeds them into the box decoder. box_net_split = self.graph.find_descendant_by_op(box_net, "Split") assert box_net_split and len(box_net_split.outputs) == 4 # 4. Find the concat node at the end of the box decoder. box_decoder = self.graph.find_descendant_by_op(box_net_split, "Concat") assert box_decoder and len(box_decoder.inputs) == 4 box_decoder_tensor = box_decoder.outputs[0] # 5. Find the NMS node. nms_node = self.graph.find_node_by_op("NonMaxSuppression") # Extract NMS Configuration num_detections = int(nms_node.inputs[2].values) if detections is None else detections iou_threshold = float(nms_node.inputs[3].values) score_threshold = float(nms_node.inputs[4].values) if threshold is None else threshold num_classes = class_net.i().inputs[1].values[-1] normalized = True if self.api == "TFOD" else False # NMS Inputs and Attributes # NMS expects these shapes for its input tensors: # box_net: [batch_size, number_boxes, 4] # class_net: [batch_size, number_boxes, number_classes] # anchors: [1, number_boxes, 4] (if used) nms_op = None nms_attrs = None nms_inputs = None # EfficientNMS TensorRT Plugin # Fusing the decoder will always be faster, so this is the default NMS method supported. In this case, # three inputs are given to the NMS TensorRT node: # - The box predictions (from the Box Net node found above) # - The class predictions (from the Class Net node found above) # - The default anchor coordinates (from the extracted anchor constants) # As the original tensors from EfficientDet will be used, the NMS code type is set to 1 (Center+Size), # because this is the internal box coding format used by the network. anchors_tensor = extract_anchors_tensor(box_net_split) nms_inputs = [box_net_tensor, class_net_tensor, anchors_tensor] nms_op = "EfficientNMS_TRT" nms_attrs = { 'plugin_version': "1", 'background_class': -1, 'max_output_boxes': num_detections, 'score_threshold': max(0.01, score_threshold), # Keep threshold to at least 0.01 for better efficiency 'iou_threshold': iou_threshold, 'score_activation': True, 'box_coding': 1, } nms_output_classes_dtype = np.int32 # NMS Outputs nms_output_num_detections = gs.Variable(name="num_detections", dtype=np.int32, shape=['N', 1]) nms_output_boxes = gs.Variable(name="detection_boxes", dtype=np.float32, shape=['N', num_detections, 4]) nms_output_scores = gs.Variable(name="detection_scores", dtype=np.float32, shape=['N', num_detections]) nms_output_classes = gs.Variable(name="detection_classes", dtype=nms_output_classes_dtype, shape=['N', num_detections]) nms_outputs = [nms_output_num_detections, nms_output_boxes, nms_output_scores, nms_output_classes] # Create the NMS Plugin node with the selected inputs. The outputs of the node will also become the final # outputs of the graph. self.graph.plugin( op=nms_op, name="nms/non_maximum_suppression", inputs=nms_inputs, outputs=nms_outputs, attrs=nms_attrs) log.info("Created NMS plugin '{}' with attributes: {}".format(nms_op, nms_attrs)) self.graph.outputs = nms_outputs self.sanitize() def main(args): effdet_gs = EfficientDetGraphSurgeon(args.saved_model) if args.tf2onnx: effdet_gs.save(args.tf2onnx) effdet_gs.update_preprocessor(args.input_format, args.input_size, args.preprocessor) effdet_gs.update_shapes() effdet_gs.update_network() effdet_gs.update_nms(args.nms_threshold, args.nms_detections) effdet_gs.save(args.onnx) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-m", "--saved_model", required=True, help="The TensorFlow saved model directory to load") parser.add_argument("-o", "--onnx", required=True, help="The output ONNX model file to write") parser.add_argument("-f", "--input_format", default="NHWC", choices=["NHWC", "NCHW"], help="Set the input data format of the graph, either NCHW or NHWC, default: NHWC") parser.add_argument("-i", "--input_size", default="512,512", help="Set the input shape of the graph, as a comma-separated dimensions in H,W format, " "default: 512,512") parser.add_argument("-p", "--preprocessor", default="imagenet", choices=["imagenet", "scale_range"], help="Set the preprocessor to apply on the graph, either 'imagenet' for standard mean " "subtraction and stdev normalization, or 'scale_range' for uniform [-1,+1] " "normalization as is used in the AdvProp models, default: imagenet") parser.add_argument("-t", "--nms_threshold", type=float, help="Override the NMS score threshold, default: use the original value in the model") parser.add_argument("-d", "--nms_detections", type=int, help="Override the NMS max detections, default: use the original value in the model") parser.add_argument("--tf2onnx", help="The path where to save the intermediate ONNX graph generated by tf2onnx, useful" "for graph debugging purposes, default: not saved") args = parser.parse_args() main(args)
53.986696
122
0.619065
21,081
0.865821
0
0
0
0
0
0
10,028
0.411861
f7f9d815fd74248ee87d991bd107aab15b47f8cc
618
py
Python
easy/867-transpose-matrix.py
wanglongjiang/leetcode
c61d2e719e81575cfb5bde9d64e15cee7cf01ef3
[ "MIT" ]
2
2021-03-14T11:38:26.000Z
2021-03-14T11:38:30.000Z
easy/867-transpose-matrix.py
wanglongjiang/leetcode
c61d2e719e81575cfb5bde9d64e15cee7cf01ef3
[ "MIT" ]
null
null
null
easy/867-transpose-matrix.py
wanglongjiang/leetcode
c61d2e719e81575cfb5bde9d64e15cee7cf01ef3
[ "MIT" ]
1
2022-01-17T19:33:23.000Z
2022-01-17T19:33:23.000Z
''' 转置矩阵 给你一个二维整数数组 matrix, 返回 matrix 的 转置矩阵 。 矩阵的 转置 是指将矩阵的主对角线翻转,交换矩阵的行索引与列索引。 ''' from typing import List ''' 思路:简单问题,原矩阵大小为m*n,创建一个n*m大小的新矩阵,按照行列转化的方式将旧矩阵数据复制过去 ''' class Solution: def transpose(self, matrix: List[List[int]]) -> List[List[int]]: m = len(matrix) n = len(matrix[0]) newMatrix = [[]] * n for i in range(n): newMatrix[i] = [0] * m for j in range(m): newMatrix[i][j] = matrix[j][i] return newMatrix s = Solution() print(s.transpose([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) print(s.transpose([[1, 2, 3], [4, 5, 6]]))
20.6
68
0.548544
329
0.403186
0
0
0
0
0
0
343
0.420343
f7fa229686aa6986aa8b8f8a1dc2ccded74af095
5,940
py
Python
adam_visual_perception/head_gaze_estimator.py
isi-vista/adam-visual-perception
8ad6ed883b184b5407a1bf793617b226c78b3a13
[ "MIT" ]
1
2020-07-21T10:52:26.000Z
2020-07-21T10:52:26.000Z
adam_visual_perception/head_gaze_estimator.py
isi-vista/adam-visual-perception
8ad6ed883b184b5407a1bf793617b226c78b3a13
[ "MIT" ]
null
null
null
adam_visual_perception/head_gaze_estimator.py
isi-vista/adam-visual-perception
8ad6ed883b184b5407a1bf793617b226c78b3a13
[ "MIT" ]
2
2020-07-21T15:30:42.000Z
2021-01-20T21:54:09.000Z
from adam_visual_perception import LandmarkDetector from adam_visual_perception.utility import * import numpy as np import math import cv2 import os import sys class HeadGazeEstimator: """ A class for estimating gaze ray from facial landmarks """ def __init__(self, write_video=False): # 3D model points. self.model_points = np.array( [ (0.0, 0.0, 0.0), # Nose tip (0.0, -330.0, -65.0), # Chin (-225.0, 170.0, -135.0), # Left eye left corner (225.0, 170.0, -135.0), # Right eye right corne (-150.0, -150.0, -125.0), # Left Mouth corner (150.0, -150.0, -125.0), # Right mouth corner ] ) self.dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion """ Parameters ---------- write_video : bool, optional Write the resulting OpenCV video """ self.write_video = write_video self.landmark_detector = LandmarkDetector(write_video=False) def get_gaze_rays(self, filename, bbox_history=None, show=True): """ Get the gaze rays for the given video file """ # Get the landmarks for the entire video landmark_map = self.landmark_detector.detect(filename, show=False) # Capture the video cap = cv2.VideoCapture(filename) frame_no = 0 gaze_angles = {} # Loop over the frames from the video stream while True: success, frame = cap.read() if not success: if frame_no == 0: print("Failed to read video") sys.exit(1) else: break if frame_no == 0: # Camera internals size = frame.shape focal_length = size[1] center = (size[1] / 2, size[0] / 2) camera_matrix = np.array( [ [focal_length, 0, center[0]], [0, focal_length, center[1]], [0, 0, 1], ], dtype="double", ) if self.write_video: # Initialize our video writer fourcc = cv2.VideoWriter_fourcc(*"mp4v") par_path = os.path.abspath(os.path.join(filename, os.pardir)) dir_path = par_path + "_pnp" if not os.path.isdir(dir_path): os.makedirs(dir_path) video_path = os.path.join(dir_path, os.path.basename(filename)) writer = cv2.VideoWriter( video_path, fourcc, 30, (frame.shape[1], frame.shape[0]), True ) if frame_no in landmark_map: # 2D image points. image_points = np.array( [ landmark_map[frame_no][33], # Nose tip landmark_map[frame_no][8], # Chin landmark_map[frame_no][36], # Left eye left corner landmark_map[frame_no][45], # Right eye right corne landmark_map[frame_no][48], # Left Mouth corner landmark_map[frame_no][54], # Right mouth corner ], dtype="double", ) # We use this to draw a line sticking out of the nose success, rotation_vector, translation_vector = cv2.solvePnP( self.model_points, image_points, camera_matrix, self.dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE, ) nose_end_point2D, jacobian = cv2.projectPoints( np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, self.dist_coeffs, ) for p in image_points: cv2.circle(frame, (int(p[0]), int(p[1])), 1, (255, 0, 0), -1) for p in landmark_map[frame_no]: if p in image_points: continue cv2.circle(frame, (int(p[0]), int(p[1])), 1, (0, 0, 255), -1) p1 = (int(image_points[0][0]), int(image_points[0][1])) p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1])) lenAB = math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2) length = lenAB * 3 C_x = int(p2[0] + (p2[0] - p1[0]) / lenAB * length) C_y = int(p2[1] + (p2[1] - p1[1]) / lenAB * length) cv2.line(frame, p1, (C_x, C_y), (0, 255, 0), 2) if bbox_history is not None and (self.write_video or show): bboxes = bbox_history[frame_no] for i, bbox in enumerate(bboxes): x, y = int(bbox[0]), int(bbox[1]) w, h = int(bbox[2]), int(bbox[3]) cv2.circle( frame, (int(x + w / 2), int(y + h / 2)), 5, (0, 0, 255), -1 ) # Store in the return dictionary gaze_angles[frame_no] = (p1, p2) # Show the frame if the flag is on if show: cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF # Write the video if the flag is on if self.write_video: writer.write(frame) frame_no += 1 # Cleanup cv2.destroyAllWindows() if self.write_video: writer.release() return gaze_angles
35.783133
87
0.458754
5,777
0.972559
0
0
0
0
0
0
897
0.15101
f7fa5e91400000b4953ab8022408df2a80e3be82
3,388
py
Python
pypoca/cogs/general.py
leandcesar/PyPoca
416f690faad0b511ca9d04b012af35256ee95089
[ "MIT" ]
1
2021-11-22T04:22:08.000Z
2021-11-22T04:22:08.000Z
pypoca/cogs/general.py
leandcesar/PyPoca
416f690faad0b511ca9d04b012af35256ee95089
[ "MIT" ]
null
null
null
pypoca/cogs/general.py
leandcesar/PyPoca
416f690faad0b511ca9d04b012af35256ee95089
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import disnake from disnake.ext import commands from pypoca.config import COLOR, URLS from pypoca.database import Server from pypoca.ext import ALL, DEFAULT, Choice, Option class General(commands.Cog): def __init__(self, bot: commands.Bot): self.bot = bot @commands.slash_command(name="ping", description=DEFAULT["COMMAND_PING_DESC"]) async def slash_ping(self, inter: disnake.ApplicationCommandInteraction, hide: Choice.boolean = Option.hide): server = Server.get_by_id(inter.guild.id) locale = ALL[server.language] if server else DEFAULT latency = int(self.bot.latency * 1000) description = locale["COMMAND_PING_REPLY"] + f": {latency}ms" embed = disnake.Embed(description=description, color=COLOR) await inter.send(embed=embed, ephemeral=hide) @commands.slash_command(name="help", description=DEFAULT["COMMAND_HELP_DESC"]) async def slash_help(self, inter: disnake.ApplicationCommandInteraction, hide: Choice.boolean = Option.hide): server = Server.get_by_id(inter.guild.id) locale = ALL[server.language] if server else DEFAULT BLANK = "<:blank:914183315056111627>" description = f""" **/movie** {BLANK} **discover** {locale["COMMAND_MOVIE_DISCOVER_DESC"]} {BLANK} **find** {locale["COMMAND_MOVIE_FIND_DESC"]} {BLANK} **popular** {locale["COMMAND_MOVIE_POPULAR_DESC"]} {BLANK} **search** {locale["COMMAND_MOVIE_SEARCH_DESC"]} {BLANK} **top** {locale["COMMAND_MOVIE_TOP_DESC"]} {BLANK} **trending** {locale["COMMAND_MOVIE_TRENDING_DESC"]} {BLANK} **upcoming** {locale["COMMAND_MOVIE_UPCOMING_DESC"]} **/tv** {BLANK} **discover** {locale["COMMAND_TV_DISCOVER_DESC"]} {BLANK} **popular** {locale["COMMAND_TV_POPULAR_DESC"]} {BLANK} **search** {locale["COMMAND_TV_SEARCH_DESC"]} {BLANK} **top** {locale["COMMAND_TV_TOP_DESC"]} {BLANK} **trending** {locale["COMMAND_TV_TRENDING_DESC"]} {BLANK} **upcoming** {locale["COMMAND_TV_UPCOMING_DESC"]} **/people** {BLANK} **popular** {locale["COMMAND_PERSON_POPULAR_DESC"]} {BLANK} **search** {locale["COMMAND_PERSON_SEARCH_DESC"]} {BLANK} **trending** {locale["COMMAND_PERSON_TRENDING_DESC"]} **/game** {BLANK} **frame** {locale["COMMAND_GAME_FRAME_DESC"]} {BLANK} **higher** {locale["COMMAND_GAME_HIGHER_DESC"]} **/setting** {BLANK} **language** {locale["COMMAND_LANGUAGE_DESC"]} """ buttons = [ {"style": 5, "label": locale["COMMAND_HELP_BUTTON_INVITE"], "url": URLS["invite"]}, {"style": 5, "label": locale["COMMAND_HELP_BUTTON_VOTE"], "url": URLS["vote"]}, {"style": 5, "label": locale["COMMAND_HELP_BUTTON_SERVER"], "url": URLS["server"]}, {"style": 5, "label": locale["COMMAND_HELP_BUTTON_SITE"], "url": URLS["site"]}, ] embed = disnake.Embed(description=description, color=COLOR) view = disnake.ui.View() [view.add_item(disnake.ui.Button(**button)) for button in buttons] await inter.send(embed=embed, view=view, ephemeral=hide) def setup(bot: commands.Bot) -> None: bot.add_cog(General(bot))
47.055556
113
0.626328
3,117
0.920012
0
0
3,011
0.888725
2,845
0.839728
1,788
0.527745
f7fab2882ba44013b1ca7273273e6b041c1e46c3
1,301
py
Python
costor_server/storage/api/views/authcheck.py
rphi/costor
081de65778d404cf7a22c5524bf89a146fa8326b
[ "CNRI-Python" ]
2
2019-12-31T16:49:36.000Z
2021-02-17T09:47:41.000Z
costor_server/storage/api/views/authcheck.py
rphi/costor
081de65778d404cf7a22c5524bf89a146fa8326b
[ "CNRI-Python" ]
null
null
null
costor_server/storage/api/views/authcheck.py
rphi/costor
081de65778d404cf7a22c5524bf89a146fa8326b
[ "CNRI-Python" ]
null
null
null
from rest_framework.decorators import api_view, permission_classes from rest_framework.parsers import MultiPartParser from rest_framework.response import Response from rest_framework import permissions from rest_framework.exceptions import APIException from rest_framework.decorators import parser_classes from django.shortcuts import get_object_or_404 from manager.models import Agent @api_view(['GET']) @permission_classes([permissions.AllowAny]) def auth_check(request): if not request.user.is_authenticated: raise APIException( detail="You aren't authenticated.", code=403 ) #print(request.GET) if 'agent' not in request.GET: return Response(f'Authenticated as {request.user.username} with no agent') agent = Agent.objects.filter(name=request.GET['agent']) if not agent.exists(): raise APIException( detail="Can't find that agent", code=404 ) agent = agent.first() if request.user not in agent.users.all(): raise APIException( detail=f'Authenticated as {request.user.username} but you don\'t have permission for agent {agent.name}', code=403 ) return Response(f'Authenticated as {request.user.username} for agent {agent.name}')
30.97619
117
0.704074
0
0
0
0
911
0.700231
0
0
308
0.236741
f7facb852a3db388a7c69659114114ea83276164
12,295
py
Python
tensorflow_probability/python/experimental/mcmc/sample_fold.py
rupei/probability
4aa1ee652853a19c4e80d39216c3fa535ed3e589
[ "Apache-2.0" ]
null
null
null
tensorflow_probability/python/experimental/mcmc/sample_fold.py
rupei/probability
4aa1ee652853a19c4e80d39216c3fa535ed3e589
[ "Apache-2.0" ]
null
null
null
tensorflow_probability/python/experimental/mcmc/sample_fold.py
rupei/probability
4aa1ee652853a19c4e80d39216c3fa535ed3e589
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Drivers for streaming reductions framework.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import warnings # Dependency imports import tensorflow.compat.v2 as tf from tensorflow_probability.python.experimental.mcmc import sample as exp_sample_lib from tensorflow_probability.python.experimental.mcmc import sample_discarding_kernel from tensorflow_probability.python.experimental.mcmc import tracing_reducer from tensorflow_probability.python.experimental.mcmc import with_reductions from tensorflow_probability.python.mcmc import sample from tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import __all__ = [ 'sample_chain', 'sample_fold', ] def sample_fold( num_steps, current_state, previous_kernel_results=None, kernel=None, reducer=None, num_burnin_steps=0, num_steps_between_results=0, parallel_iterations=10, seed=None, name=None, ): """Computes the requested reductions over the `kernel`'s samples. To wit, runs the given `kernel` for `num_steps` steps, and consumes the stream of samples with the given `Reducer`s' `one_step` method(s). This runs in constant memory (unless a given `Reducer` builds a large structure). The driver internally composes the correct onion of `WithReductions` and `SampleDiscardingKernel` to implement the requested optionally thinned reduction; however, the kernel results of those applied Transition Kernels will not be returned. Hence, if warm-restarting reductions is desired, one should manually build the Transition Kernel onion and use `tfp.experimental.mcmc.step_kernel`. An arbitrary collection of `reducer` can be provided, and the resulting finalized statistic(s) will be returned in an identical structure. Args: num_steps: Integer or scalar `Tensor` representing the number of `Reducer` steps. current_state: `Tensor` or Python `list` of `Tensor`s representing the current state(s) of the Markov chain(s). previous_kernel_results: A `Tensor` or a nested collection of `Tensor`s. Warm-start for the auxiliary state needed by the given `kernel`. If not supplied, `sample_fold` will cold-start with `kernel.bootstrap_results`. kernel: An instance of `tfp.mcmc.TransitionKernel` which implements one step of the Markov chain. reducer: A (possibly nested) structure of `Reducer`s to be evaluated on the `kernel`'s samples. If no reducers are given (`reducer=None`), then `None` will be returned in place of streaming calculations. num_burnin_steps: Integer or scalar `Tensor` representing the number of chain steps to take before starting to collect results. Defaults to 0 (i.e., no burn-in). num_steps_between_results: Integer or scalar `Tensor` representing the number of chain steps between collecting a result. Only one out of every `num_steps_between_samples + 1` steps is included in the returned results. Defaults to 0 (i.e., no thinning). parallel_iterations: The number of iterations allowed to run in parallel. It must be a positive integer. See `tf.while_loop` for more details. seed: Optional seed for reproducible sampling. name: Python `str` name prefixed to Ops created by this function. Default value: `None` (i.e., 'mcmc_sample_fold'). Returns: reduction_results: A (possibly nested) structure of finalized reducer statistics. The structure identically mimics that of `reducer`. end_state: The final state of the Markov chain(s). final_kernel_results: `collections.namedtuple` of internal calculations used to advance the supplied `kernel`. These results do not include the kernel results of `WithReductions` or `SampleDiscardingKernel`. """ with tf.name_scope(name or 'mcmc_sample_fold'): num_steps = tf.convert_to_tensor( num_steps, dtype=tf.int32, name='num_steps') current_state = tf.nest.map_structure( lambda x: tf.convert_to_tensor(x, name='current_state'), current_state) reducer_was_none = False if reducer is None: reducer = [] reducer_was_none = True reduction_kernel = with_reductions.WithReductions( inner_kernel=sample_discarding_kernel.SampleDiscardingKernel( inner_kernel=kernel, num_burnin_steps=num_burnin_steps, num_steps_between_results=num_steps_between_results), reducer=reducer, ) end_state, final_kernel_results = exp_sample_lib.step_kernel( num_steps=num_steps, current_state=current_state, previous_kernel_results=previous_kernel_results, kernel=reduction_kernel, return_final_kernel_results=True, parallel_iterations=parallel_iterations, seed=seed, name=name, ) reduction_results = nest.map_structure_up_to( reducer, lambda r, s: r.finalize(s), reducer, final_kernel_results.streaming_calculations, check_types=False) if reducer_was_none: reduction_results = None return (reduction_results, end_state, final_kernel_results.inner_results.inner_results) def _trace_kernel_results(current_state, kernel_results): del current_state return kernel_results def sample_chain( num_results, current_state, previous_kernel_results=None, kernel=None, num_burnin_steps=0, num_steps_between_results=0, trace_fn=_trace_kernel_results, return_final_kernel_results=False, parallel_iterations=10, seed=None, name=None, ): """Implements Markov chain Monte Carlo via repeated `TransitionKernel` steps. This function samples from a Markov chain at `current_state` whose stationary distribution is governed by the supplied `TransitionKernel` instance (`kernel`). This function can sample from multiple chains, in parallel. (Whether or not there are multiple chains is dictated by the `kernel`.) The `current_state` can be represented as a single `Tensor` or a `list` of `Tensors` which collectively represent the current state. Since MCMC states are correlated, it is sometimes desirable to produce additional intermediate states, and then discard them, ending up with a set of states with decreased autocorrelation. See [Owen (2017)][1]. Such 'thinning' is made possible by setting `num_steps_between_results > 0`. The chain then takes `num_steps_between_results` extra steps between the steps that make it into the results. The extra steps are never materialized, and thus do not increase memory requirements. In addition to returning the chain state, this function supports tracing of auxiliary variables used by the kernel. The traced values are selected by specifying `trace_fn`. By default, all kernel results are traced but in the future the default will be changed to no results being traced, so plan accordingly. See below for some examples of this feature. Args: num_results: Integer number of Markov chain draws. current_state: `Tensor` or Python `list` of `Tensor`s representing the current state(s) of the Markov chain(s). previous_kernel_results: A `Tensor` or a nested collection of `Tensor`s representing internal calculations made within the previous call to this function (or as returned by `bootstrap_results`). kernel: An instance of `tfp.mcmc.TransitionKernel` which implements one step of the Markov chain. num_burnin_steps: Integer number of chain steps to take before starting to collect results. Default value: 0 (i.e., no burn-in). num_steps_between_results: Integer number of chain steps between collecting a result. Only one out of every `num_steps_between_samples + 1` steps is included in the returned results. The number of returned chain states is still equal to `num_results`. Default value: 0 (i.e., no thinning). trace_fn: A callable that takes in the current chain state and the previous kernel results and return a `Tensor` or a nested collection of `Tensor`s that is then traced along with the chain state. return_final_kernel_results: If `True`, then the final kernel results are returned alongside the chain state and the trace specified by the `trace_fn`. parallel_iterations: The number of iterations allowed to run in parallel. It must be a positive integer. See `tf.while_loop` for more details. seed: Optional, a seed for reproducible sampling. name: Python `str` name prefixed to Ops created by this function. Default value: `None` (i.e., 'experimental_mcmc_sample_chain'). Returns: checkpointable_states_and_trace: if `return_final_kernel_results` is `True`. The return value is an instance of `CheckpointableStatesAndTrace`. all_states: if `return_final_kernel_results` is `False` and `trace_fn` is `None`. The return value is a `Tensor` or Python list of `Tensor`s representing the state(s) of the Markov chain(s) at each result step. Has same shape as input `current_state` but with a prepended `num_results`-size dimension. states_and_trace: if `return_final_kernel_results` is `False` and `trace_fn` is not `None`. The return value is an instance of `StatesAndTrace`. #### References [1]: Art B. Owen. Statistically efficient thinning of a Markov chain sampler. _Technical Report_, 2017. http://statweb.stanford.edu/~owen/reports/bestthinning.pdf """ with tf.name_scope(name or 'experimental_mcmc_sample_chain'): if not kernel.is_calibrated: warnings.warn('supplied `TransitionKernel` is not calibrated. Markov ' 'chain may not converge to intended target distribution.') if trace_fn is None: trace_fn = lambda *args: () no_trace = True else: no_trace = False if trace_fn is sample_chain.__defaults__[4]: warnings.warn('Tracing all kernel results by default is deprecated. Set ' 'the `trace_fn` argument to None (the future default ' 'value) or an explicit callback that traces the values ' 'you are interested in.') # `WithReductions` assumes all its reducers want to reduce over the # immediate inner results of its kernel results. However, # We don't care about the kernel results of `SampleDiscardingKernel`; hence, # we evaluate the `trace_fn` on a deeper level of inner results. def real_trace_fn(curr_state, kr): return curr_state, trace_fn(curr_state, kr.inner_results) trace_reducer = tracing_reducer.TracingReducer( trace_fn=real_trace_fn, size=num_results ) trace_results, _, final_kernel_results = sample_fold( num_steps=num_results, current_state=current_state, previous_kernel_results=previous_kernel_results, kernel=kernel, reducer=trace_reducer, num_burnin_steps=num_burnin_steps, num_steps_between_results=num_steps_between_results, parallel_iterations=parallel_iterations, seed=seed, name=name, ) all_states, trace = trace_results if return_final_kernel_results: return sample.CheckpointableStatesAndTrace( all_states=all_states, trace=trace, final_kernel_results=final_kernel_results) else: if no_trace: return all_states else: return sample.StatesAndTrace(all_states=all_states, trace=trace)
43.140351
85
0.727938
0
0
0
0
0
0
0
0
8,255
0.671411
f7facc8714f2358ff5e4f5bf725d3516243bec69
10,025
py
Python
algos/custom_ppo2.py
Ottawa-Autonomous-Vehicle-Group/learning-to-drive-in-5-minutes
fb82bc77593605711289e03f95dcfb6d3ea9e6c3
[ "MIT" ]
1
2020-08-02T20:47:44.000Z
2020-08-02T20:47:44.000Z
algos/custom_ppo2.py
vijpandaturtle/learning-to-drive-in-5-minutes
fb82bc77593605711289e03f95dcfb6d3ea9e6c3
[ "MIT" ]
null
null
null
algos/custom_ppo2.py
vijpandaturtle/learning-to-drive-in-5-minutes
fb82bc77593605711289e03f95dcfb6d3ea9e6c3
[ "MIT" ]
null
null
null
import time from collections import deque import gym import numpy as np from stable_baselines import logger, PPO2 from stable_baselines.a2c.utils import total_episode_reward_logger from stable_baselines.common import explained_variance, TensorboardWriter from stable_baselines.common.runners import AbstractEnvRunner from stable_baselines.ppo2.ppo2 import get_schedule_fn, safe_mean, swap_and_flatten class PPO2WithVAE(PPO2): """ Custom PPO2 version. Notable changes: - optimization is done after each episode and not after n steps """ def learn(self, total_timesteps, callback=None, log_interval=1, tb_log_name="PPO2"): # Transform to callable if needed self.learning_rate = get_schedule_fn(self.learning_rate) self.cliprange = get_schedule_fn(self.cliprange) with TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name) as writer: self._setup_learn() runner = Runner(env=self.env, model=self, n_steps=self.n_steps, gamma=self.gamma, lam=self.lam) self.episode_reward = np.zeros((self.n_envs,)) ep_info_buf = deque(maxlen=100) t_first_start = time.time() n_timesteps = 0 # nupdates = total_timesteps // self.n_batch for timestep in range(1, total_timesteps + 1): assert self.n_batch % self.nminibatches == 0 batch_size = self.n_batch // self.nminibatches t_start = time.time() frac = 1.0 - timestep / total_timesteps lr_now = self.learning_rate(frac) cliprangenow = self.cliprange(frac) # true_reward is the reward without discount obs, returns, masks, actions, values, neglogpacs, states, ep_infos, true_reward = runner.run() n_timesteps += len(obs) ep_info_buf.extend(ep_infos) mb_loss_vals = [] if states is None: # nonrecurrent version inds = np.arange(self.n_batch) for epoch_num in range(self.noptepochs): np.random.shuffle(inds) for start in range(0, self.n_batch, batch_size): # timestep = ((update * self.noptepochs * self.n_batch + epoch_num * self.n_batch + start) // # batch_size) end = start + batch_size mbinds = inds[start:end] slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs)) mb_loss_vals.append(self._train_step(lr_now, cliprangenow, *slices, writer=writer, update=n_timesteps)) else: # recurrent version assert self.n_envs % self.nminibatches == 0 env_indices = np.arange(self.n_envs) flat_indices = np.arange(self.n_envs * self.n_steps).reshape(self.n_envs, self.n_steps) envs_per_batch = batch_size // self.n_steps for epoch_num in range(self.noptepochs): np.random.shuffle(env_indices) for stan_timestepsrt in range(0, self.n_envs, envs_per_batch): # timestep = ((update * self.noptepochs * self.n_envs + epoch_num * self.n_envs + start) // # envs_per_batch) end = start + envs_per_batch mb_env_inds = env_indices[start:end] mb_flat_inds = flat_indices[mb_env_inds].ravel() slices = (arr[mb_flat_inds] for arr in (obs, returns, masks, actions, values, neglogpacs)) mb_states = states[mb_env_inds] mb_loss_vals.append(self._train_step(lr_now, cliprangenow, *slices, update=n_timesteps, writer=writer, states=mb_states)) loss_vals = np.mean(mb_loss_vals, axis=0) t_now = time.time() fps = int(self.n_batch / (t_now - t_start)) if writer is not None: self.episode_reward = total_episode_reward_logger(self.episode_reward, true_reward.reshape((self.n_envs, self.n_steps)), masks.reshape((self.n_envs, self.n_steps)), writer, n_timesteps) if self.verbose >= 1 and (timestep % log_interval == 0 or timestep == 1): explained_var = explained_variance(values, returns) logger.logkv("total_timesteps", n_timesteps) logger.logkv("fps", fps) logger.logkv("explained_variance", float(explained_var)) logger.logkv('ep_rewmean', safe_mean([ep_info['r'] for ep_info in ep_info_buf])) logger.logkv('eplenmean', safe_mean([ep_info['l'] for ep_info in ep_info_buf])) logger.logkv('time_elapsed', t_start - t_first_start) for (loss_val, loss_name) in zip(loss_vals, self.loss_names): logger.logkv(loss_name, loss_val) logger.dumpkvs() if callback is not None: # Only stop training if return value is False, not when it is None. This is for backwards # compatibility with callbacks that have no return statement. if callback(locals(), globals()) is False: break if n_timesteps > total_timesteps: break return self class Runner(AbstractEnvRunner): def __init__(self, *, env, model, n_steps, gamma, lam): """ A runner to learn the policy of an environment for a model :param env: (Gym environment) The environment to learn from :param model: (Model) The model to learn :param n_steps: (int) The number of steps to run for each environment :param gamma: (float) Discount factor :param lam: (float) Factor for trade-off of bias vs variance for Generalized Advantage Estimator """ super().__init__(env=env, model=model, n_steps=n_steps) self.lam = lam self.gamma = gamma def run(self): """ Run a learning step of the model :return: - observations: (np.ndarray) the observations - rewards: (np.ndarray) the rewards - masks: (numpy bool) whether an episode is over or not - actions: (np.ndarray) the actions - values: (np.ndarray) the value function output - negative log probabilities: (np.ndarray) - states: (np.ndarray) the internal states of the recurrent policies - infos: (dict) the extra information of the model """ # mb stands for minibatch mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [], [], [], [], [], [] mb_states = self.states ep_infos = [] while True: actions, values, self.states, neglogpacs = self.model.step(self.obs, self.states, self.dones) mb_obs.append(self.obs.copy()) mb_actions.append(actions) mb_values.append(values) mb_neglogpacs.append(neglogpacs) mb_dones.append(self.dones) clipped_actions = actions # Clip the actions to avoid out of bound error if isinstance(self.env.action_space, gym.spaces.Box): clipped_actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high) self.obs[:], rewards, self.dones, infos = self.env.step(clipped_actions) for info in infos: maybe_ep_info = info.get('episode') if maybe_ep_info is not None: ep_infos.append(maybe_ep_info) mb_rewards.append(rewards) if self.dones: print("Episode finished. Reward: {:.2f} {} Steps".format(np.sum(mb_rewards), len(mb_rewards))) if len(mb_rewards) >= self.n_steps: break # batch of steps to batch of rollouts mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype) mb_rewards = np.asarray(mb_rewards, dtype=np.float32) mb_actions = np.asarray(mb_actions) mb_values = np.asarray(mb_values, dtype=np.float32) mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32) mb_dones = np.asarray(mb_dones, dtype=np.bool) last_values = self.model.value(self.obs, self.states, self.dones) # discount/bootstrap off value fn mb_advs = np.zeros_like(mb_rewards) true_reward = np.copy(mb_rewards) last_gae_lam = 0 for step in reversed(range(self.n_steps)): if step == self.n_steps - 1: nextnonterminal = 1.0 - self.dones nextvalues = last_values else: nextnonterminal = 1.0 - mb_dones[step + 1] nextvalues = mb_values[step + 1] delta = mb_rewards[step] + self.gamma * nextvalues * nextnonterminal - mb_values[step] mb_advs[step] = last_gae_lam = delta + self.gamma * self.lam * nextnonterminal * last_gae_lam mb_returns = mb_advs + mb_values mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward = \ map(swap_and_flatten, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward)) return mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_states, ep_infos, true_reward
52.213542
121
0.572569
9,617
0.959302
0
0
0
0
0
0
1,949
0.194414
f7fafc3eca2a0d5f684ce78dbf8d565f8e0da8a0
787
py
Python
craw/modules/trail/trails/feeds/urlvir.py
xuluhang/DomainBlockList
e9e69138ffdba6a73741fe204306f1f0b66eff19
[ "MIT" ]
19
2019-11-25T09:02:15.000Z
2021-07-24T12:05:28.000Z
craw/modules/trail/trails/feeds/urlvir.py
xuluhang/DomainBlockList
e9e69138ffdba6a73741fe204306f1f0b66eff19
[ "MIT" ]
1
2019-11-25T09:06:08.000Z
2019-11-25T09:06:08.000Z
craw/modules/trail/trails/feeds/urlvir.py
xuluhang/DomainBlockList
e9e69138ffdba6a73741fe204306f1f0b66eff19
[ "MIT" ]
10
2019-11-26T02:42:02.000Z
2021-08-28T07:16:08.000Z
#!/usr/bin/env python2 """ Copyright (c) 2014-2019 Maltrail developers (https://github.com/stamparm/maltrail/) See the file 'LICENSE' for copying permission """ from craw.modules.trail.plugins.util import wget_content __url__ = "http://www.urlvir.com/export-hosts/" __check__ = "Updated on" __info__ = "malware" __reference__ = "urlvir.com" maintainer_url = __reference__ maintainer = "urlvir" list_source_url = __url__ category = __info__ def fetch(): retval = {} content = wget_content(__url__) if __check__ in content: for line in content.split('\n'): line = line.strip() if not line or line.startswith('#') or '.' not in line: continue retval[line.strip()] = (__info__, __reference__) return retval
23.848485
83
0.66709
0
0
0
0
0
0
0
0
247
0.31385
f7fb1109bf89db5bf87c82699fc7b9493c2500d3
1,035
py
Python
tests/continuous_integration.py
kfaRabi/online-judge-tools
79de8d37e1aa78a7c4c82c6a666f1f1602caf545
[ "MIT" ]
null
null
null
tests/continuous_integration.py
kfaRabi/online-judge-tools
79de8d37e1aa78a7c4c82c6a666f1f1602caf545
[ "MIT" ]
null
null
null
tests/continuous_integration.py
kfaRabi/online-judge-tools
79de8d37e1aa78a7c4c82c6a666f1f1602caf545
[ "MIT" ]
null
null
null
import os import subprocess import sys import unittest # TODO: these command should be written at once, at only .travis.yml or at only here paths = ['oj', 'onlinejudge', 'setup.py', 'tests'] class ContinuousIntegrationTest(unittest.TestCase): """A dummy test to run the commands same to CI on local environments""" @unittest.skipIf('CI' in os.environ, 'the same command is call from .travis.yml') def test_isort(self): subprocess.check_call(['isort', '--check-only', '--diff', '--recursive'] + paths, stdout=sys.stdout, stderr=sys.stderr) @unittest.skipIf('CI' in os.environ, 'the same command is call from .travis.yml') def test_yapf(self): output = subprocess.check_output(['yapf', '--diff', '--recursive'] + paths, stderr=sys.stderr) self.assertEqual(output, b'') @unittest.skipIf('CI' in os.environ, 'the same command is call from .travis.yml') def test_mypy(self): subprocess.check_call(['mypy', '--show-traceback'] + paths, stdout=sys.stdout, stderr=sys.stderr)
39.807692
127
0.68599
839
0.810628
0
0
694
0.670531
0
0
426
0.411594
f7fbd980831ccec066261d37e528035e5f2d7c7a
12,278
py
Python
open-hackathon-client/src/client/config_sample.py
overbest/open-hackathon
62e085fbe603bcb00ca56d2b96cfc43bf44c710b
[ "MIT" ]
null
null
null
open-hackathon-client/src/client/config_sample.py
overbest/open-hackathon
62e085fbe603bcb00ca56d2b96cfc43bf44c710b
[ "MIT" ]
null
null
null
open-hackathon-client/src/client/config_sample.py
overbest/open-hackathon
62e085fbe603bcb00ca56d2b96cfc43bf44c710b
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # # ----------------------------------------------------------------------------------- # Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved. # # The MIT License (MIT) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # ----------------------------------------------------------------------------------- # "javascript" section for javascript. see @app.route('/config.js') in app/views.py # NOTE: all following key/secrets for test purpose. HOSTNAME = "http://localhost" # host name of the UI site # hacking.kaiyuanshe.cn is used for wechat oauth login # HOSTNAME = "http://hacking.kaiyuanshe.cn" # HOSTNAME = "http://open-hackathon-dev.chinacloudapp.cn" # host name of the UI site # HOSTNAME = "http://hacking.kaiyuanshe.cn" QQ_OAUTH_STATE = "openhackathon" # todo state should be constant. Actually it should be unguessable to prevent CSFA HACKATHON_API_ENDPOINT = "http://localhost:15000" # HACKATHON_API_ENDPOINT = "http://open-hackathon-dev.chinacloudapp.cn:15000" # HACKATHON_API_ENDPOINT = "http://hacking.kaiyuanshe.cn:15000" # github key for `localhost` GITHUB_CLIENT_ID = "b44f3d47bdeb26b9c4e6" GITHUB_CLIENT_SECRET = "98de14161c4b2ed3ea7a19787d62cda73b8e292c" # github oauth key for `open-hackathon-dev.chinacloudapp.cn` # GITHUB_CLIENT_ID = "b8e407813350f26bf537" # GITHUB_CLIENT_SECRET = "daa78ae27e13c9f5b4a884bd774cadf2f75a199f" QQ_CLIENT_ID = "101200890" QQ_CLIENT_SECRET = "88ad67bd4521c4cc47136854781cb9b5" QQ_META_CONTENT = "274307566465013314076545663016134754100636" WECHAT_APP_ID = "wxe75b8aef71c2059f" WECHAT_SECRET = "4532b90750f4c7bc70fcfbc42d881622" WECHAT_OAUTH_STATE = "openhackathon" # NOTE: may be should be same as QQ_OAUTH_STATE? WEIBO_CLIENT_ID = "479757037" WEIBO_CLIENT_SECRET = "efc5e75ff8891be37d90b4eaec5c02de" WEIBO_META_CONTENT = "ae884e09bc02b700" LIVE_CLIENT_ID = "000000004414E0A6" LIVE_CLIENT_SECRET = "b4mkfVqjtwHY2wJh0T4tj74lxM5LgAT2" ALAUDA_CLIENT_ID = "4VR9kzNZVyWcnk9OnAwMuSus7xOOcozJIpic6W6y" ALAUDA_CLIENT_SECRET = "E5PUL5h9feLlEirec5HQhjIzYecv7vVbEBjWLBkRMoCoFXdvS1PzNmd4AAeNgu4M2AJ87uGnnJaoDLCcDuVxkBoHRWCn6LmfB4SKK1Dty1SkGukkTcZPEk9wpHLSiRQ3" Config = { "environment": "local", "app": { "secret_key": "secret_key" }, "login": { "github": { "client_id": GITHUB_CLIENT_ID, "access_token_url": 'https://github.com/login/oauth/access_token?client_id=%s&client_secret=%s&redirect_uri=%s/github&code=' % ( GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET, HOSTNAME), "user_info_url": 'https://api.github.com/user?access_token=', "emails_info_url": 'https://api.github.com/user/emails?access_token=' }, "qq": { "client_id": QQ_CLIENT_ID, "meta_content": QQ_META_CONTENT, "access_token_url": 'https://graph.qq.com/oauth2.0/token?grant_type=authorization_code&client_id=%s&client_secret=%s&redirect_uri=%s/qq&code=' % ( QQ_CLIENT_ID, QQ_CLIENT_SECRET, HOSTNAME), "openid_url": 'https://graph.qq.com/oauth2.0/me?access_token=', "user_info_url": 'https://graph.qq.com/user/get_user_info?access_token=%s&oauth_consumer_key=%s&openid=%s' }, "wechat": { "client_id": WECHAT_APP_ID, "access_token_url": "https://api.weixin.qq.com/sns/oauth2/access_token?appid=%s&secret=%s&code=%%s&grant_type=authorization_code" % ( WECHAT_APP_ID, WECHAT_SECRET), "user_info_url": "https://api.weixin.qq.com/sns/userinfo?access_token=%s&openid=%s" }, "weibo": { "client_id": WEIBO_CLIENT_ID, "meta_content": WEIBO_META_CONTENT, "user_info_url": 'https://api.weibo.com/2/users/show.json?access_token=', "email_info_url": 'https://api.weibo.com/2/account/profile/email.json?access_token=', "access_token_url": 'https://api.weibo.com/oauth2/access_token?client_id=%s&client_secret=%s&grant_type=authorization_code&redirect_uri=%s/weibo&code=' % ( WEIBO_CLIENT_ID, WEIBO_CLIENT_SECRET, HOSTNAME) }, "live": { "client_id": LIVE_CLIENT_ID, "client_secret": LIVE_CLIENT_SECRET, "redirect_uri": '%s/live' % HOSTNAME, "access_token_url": 'https://login.live.com/oauth20_token.srf', "user_info_url": 'https://apis.live.net/v5.0/me?access_token=' }, "alauda": { "client_id": ALAUDA_CLIENT_ID, "client_secret": ALAUDA_CLIENT_SECRET, "redirect_uri": '%s/alauda' % HOSTNAME, "access_token_url": 'http://console.int.alauda.io/oauth/token' }, "provider_enabled": ["github", "wechat"], "session_valid_time_minutes": 60 }, "hackathon-api": { "endpoint": HACKATHON_API_ENDPOINT }, "javascript": { "github": { "authorize_url": "https://github.com/login/oauth/authorize?client_id=%s&redirect_uri=%s/github&scope=user" % ( GITHUB_CLIENT_ID, HOSTNAME) }, "weibo": { "authorize_url": "https://api.weibo.com/oauth2/authorize?client_id=%s&redirect_uri=%s/weibo&scope=all" % ( WEIBO_CLIENT_ID, HOSTNAME) }, "qq": { "authorize_url": "https://graph.qq.com/oauth2.0/authorize?client_id=%s&redirect_uri=%s/qq&scope=get_user_info&state=%s&response_type=code" % ( QQ_CLIENT_ID, HOSTNAME, QQ_OAUTH_STATE) }, "wechat": { "authorize_url": "https://open.weixin.qq.com/connect/qrconnect?appid=%s&redirect_uri=%s/wechat&response_type=code&scope=snsapi_login&state=%s#wechat_redirect" % ( WECHAT_APP_ID, HOSTNAME, WECHAT_OAUTH_STATE) }, "live": { "authorize_url": "https://login.live.com/oauth20_authorize.srf?client_id=%s&scope=wl.basic+,wl.emails&response_type=code&redirect_uri=%s/live" % ( LIVE_CLIENT_ID, HOSTNAME) }, "alauda": { "authorize_url": "http://console.int.alauda.io/oauth/authorize?response_type=code&client_id=%s&state=state&redirect_uri=%s/alauda" % ( ALAUDA_CLIENT_ID, HOSTNAME) }, "hackathon": { "endpoint": HACKATHON_API_ENDPOINT }, "apiconfig": { "proxy": HACKATHON_API_ENDPOINT, "api": { "admin": { "hackathon": { "": ["get", "post", "put", "delete"], "checkname": ["get"], "list": ["get"], "online": ["post"], "applyonline": ["post"], "offline": ["post"], "tags": ["get", "post", "put", "delete"], "config": ["get", "post", "put", "delete"], "administrator": { "": ["put", "post", "delete"], "list": ["get"] }, "template": { "": ["post", "delete"], "list": ["get"], "check": ["get"] }, "organizer": { "": ["get", "post", "put", "delete"] }, "award": { "": ["get", "post", "put", "delete"], "list": ["get"] }, "notice": { "": ["get", "post", "put", "delete"] } }, "registration": { "": ["get", "post", "delete", "put"], "list": ["get"] }, "azure": { "": ["get", "post", "delete", "put"], "checksubid": ["post"] }, "experiment": { "list": ["get"], "": ["post", "put"] }, "team": { "list": ["get"], "score": { "list": ["get"] }, "award": ["get", "post", "delete"] }, "user": { "list": ["get"] }, "hostserver": { "": ["get", "post", "delete", "put"], "list": ["get"] } }, "template": { "": ["get", "post", "delete", "put"], "file": ["post"], "list": ["get"], "check": ["get"] }, "user": { "": ["get"], "login": ["post", "delete"], "experiment": { "": ["get", "post", "delete", "put"] }, "registration": { "": ["put", "post", "get"], "checkemail": ["get"], "list": ["get"] }, "profile": { "": ["post", "put"] }, "picture": { "": ["put"] }, "team": { "member": ["get"] }, "hackathon": { "like": ["get", "post", "delete"] }, "notice": { "read": ["put"] }, "show": { "list": ["get"] }, "file": { "": ["post"] } }, "hackathon": { "": ["get"], "list": ["get"], "stat": ["get"], "template": ["get"], "team": { "list": ["get"] }, "registration": { "list": ["get"] }, "show": { "list": ["get"] }, "grantedawards": ["get"], "notice": { "list": ["get"] } }, "team": { "": ["get", "post", "put", "delete"], "score": ["get", "post", "put"], "member": { "": ["post", "put", "delete"], "list": ["get"] }, "show": ["get", "post", "delete"], "template": ["post", "delete"] }, "talent": { "list": ["get"] }, "grantedawards": ["get"] } } } }
42.93007
174
0.476136
0
0
0
0
0
0
0
0
6,557
0.534045
f7fbf451f7ab0b316753c8ad61a542b73cbff82d
14,904
py
Python
processing_provider/Rast_fillRasterwithPatches.py
geodourados/lftools
4b9d703513bd3d49ac7952014575bf95492a2d90
[ "MIT" ]
1
2022-03-28T22:18:09.000Z
2022-03-28T22:18:09.000Z
processing_provider/Rast_fillRasterwithPatches.py
geodourados/lftools
4b9d703513bd3d49ac7952014575bf95492a2d90
[ "MIT" ]
null
null
null
processing_provider/Rast_fillRasterwithPatches.py
geodourados/lftools
4b9d703513bd3d49ac7952014575bf95492a2d90
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ fillRasterwithPatches.py *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Leandro França' __date__ = '2020-09-01' __copyright__ = '(C) 2020, Leandro França' from PyQt5.QtCore import QCoreApplication, QVariant from qgis.core import (QgsProcessing, QgsFeatureSink, QgsWkbTypes, QgsFields, QgsField, QgsFeature, QgsPointXY, QgsGeometry, QgsProcessingException, QgsProcessingAlgorithm, QgsProcessingParameterString, QgsProcessingParameterField, QgsProcessingParameterBoolean, QgsProcessingParameterCrs, QgsProcessingParameterEnum, QgsFeatureRequest, QgsExpression, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterFileDestination, QgsProcessingParameterMultipleLayers, QgsProcessingParameterRasterLayer, QgsProcessingParameterRasterDestination, QgsApplication, QgsProject, QgsRasterLayer, QgsCoordinateTransform, QgsCoordinateReferenceSystem) from osgeo import osr, gdal_array, gdal #https://gdal.org/python/ from math import floor, ceil import numpy as np from lftools.geocapt.dip import Interpolar from lftools.geocapt.imgs import Imgs import os from qgis.PyQt.QtGui import QIcon class FillRasterwithPatches(QgsProcessingAlgorithm): LOC = QgsApplication.locale()[:2] def translate(self, string): return QCoreApplication.translate('Processing', string) def tr(self, *string): # Traduzir para o portugês: arg[0] - english (translate), arg[1] - português if self.LOC == 'pt': if len(string) == 2: return string[1] else: return self.translate(string[0]) else: return self.translate(string[0]) def createInstance(self): return FillRasterwithPatches() def name(self): return 'fillrasterwithpatches' def displayName(self): return self.tr('Fill with patches', 'Remendar vazios de raster') def group(self): return self.tr('Raster') def groupId(self): return 'raster' def tags(self): return self.tr('fill,hole,raster,cloud,remove,drone,patch').split(',') def icon(self): return QIcon(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'images/raster.png')) txt_en = 'Fills Raster null pixels (no data) with data obtained from other smaller raster layers (Patches).' txt_pt = 'Preenche vazios de Raster (pixels nulos) com dados obtidos de outras camadas raster menores (Remendos).' figure = 'images/tutorial/raster_fill_holes.jpg' def shortHelpString(self): social_BW = Imgs().social_BW footer = '''<div align="center"> <img src="'''+ os.path.join(os.path.dirname(os.path.dirname(__file__)), self.figure) +'''"> </div> <div align="right"> <p align="right"> <b>'''+self.tr('Author: Leandro Franca', 'Autor: Leandro França')+'''</b> </p>'''+ social_BW + '''</div> </div>''' return self.tr(self.txt_en, self.txt_pt) + footer RasterIN ='RasterIN' PATCHES = 'PATCHES' RESAMPLING = 'RESAMPLING' RasterOUT = 'RasterOUT' OPEN = 'OPEN' def initAlgorithm(self, config=None): # INPUT self.addParameter( QgsProcessingParameterRasterLayer( self.RasterIN, self.tr('Input Raster', 'Raster de Entrada'), [QgsProcessing.TypeRaster] ) ) self.addParameter( QgsProcessingParameterMultipleLayers( self.PATCHES, self.tr('Patch Layers', 'Rasters de Remendo'), layerType = QgsProcessing.TypeRaster ) ) interp = [self.tr('Nearest neighbor', 'Vizinho mais próximo'), self.tr('Bilinear'), self.tr('Bicubic', 'Bicúbica')] self.addParameter( QgsProcessingParameterEnum( self.RESAMPLING, self.tr('Interpolation', 'Interpolação'), options = interp, defaultValue= 0 ) ) # OUTPUT self.addParameter( QgsProcessingParameterFileDestination( self.RasterOUT, self.tr('Patched Image', 'Imagem Remendada'), fileFilter = 'GeoTIFF (*.tif)' ) ) self.addParameter( QgsProcessingParameterBoolean( self.OPEN, self.tr('Load patched Image', 'Carregar Imagem Remendada'), defaultValue= True ) ) def processAlgorithm(self, parameters, context, feedback): RasterIN = self.parameterAsRasterLayer( parameters, self.RasterIN, context ) if RasterIN is None: raise QgsProcessingException(self.invalidSourceError(parameters, self.RasterIN)) RasterIN = RasterIN.dataProvider().dataSourceUri() PatchesLayers = self.parameterAsLayerList( parameters, self.PATCHES, context ) reamostragem = self.parameterAsEnum( parameters, self.RESAMPLING, context ) reamostragem = ['nearest','bilinear','bicubic'][reamostragem] RGB_Output = self.parameterAsFileOutput( parameters, self.RasterOUT, context ) Carregar = self.parameterAsBool( parameters, self.OPEN, context ) limiar = 240 # Abrir Raster layer como array image = gdal.Open(RasterIN) prj=image.GetProjection() CRS=osr.SpatialReference(wkt=prj) geotransform = image.GetGeoTransform() n_bands = image.RasterCount # Número de bandas cols = image.RasterXSize # Number of columns rows = image.RasterYSize # Number of rows # Origem e resolucao da imagem ulx, xres, xskew, uly, yskew, yres = geotransform origem = (ulx, uly) resol_X = abs(xres) resol_Y = abs(yres) if n_bands ==1: feedback.pushInfo(self.tr('Opening raster band...', 'Abrindo banda do raster...')) band1 = image.GetRasterBand(1).ReadAsArray() if n_bands >=3: feedback.pushInfo(self.tr('Opening Band R...', 'Abrindo Banda R...')) band1 = image.GetRasterBand(1).ReadAsArray() feedback.pushInfo(self.tr('Opening Band G...', 'Abrindo Banda G...')) band2 = image.GetRasterBand(2).ReadAsArray() feedback.pushInfo(self.tr('Opening Band B...', 'Abrindo Banda B...')) band3 = image.GetRasterBand(3).ReadAsArray() # Transparência if n_bands == 4: feedback.pushInfo(self.tr('Opening Band Alpha...', 'Abrindo Banda Alfa...')) band4 = image.GetRasterBand(4).ReadAsArray() Pixel_Nulo = image.GetRasterBand(1).GetNoDataValue() if Pixel_Nulo == None: Pixel_Nulo = 0 image=None # Fechar imagem # Número de pixels para processamento TAM = 0 for Remendo in PatchesLayers: Rem_Path = Remendo.dataProvider().dataSourceUri() Rem = gdal.Open(Rem_Path) # Rem_cols = Rem.RasterXSize # Number of columns Rem_rows = Rem.RasterYSize # Number of rows TAM += Rem_rows # Remendos total = 100.0 / TAM cont = 0 for Remendo in PatchesLayers: feedback.pushInfo((self.tr('Processing Layer: {}', 'Processando Camada: {}')).format(Remendo)) Rem_Path = Remendo.dataProvider().dataSourceUri() Rem = gdal.Open(Rem_Path) ulx, xres, xskew, uly, yskew, yres = Rem.GetGeoTransform() Rem_origem = (ulx, uly) Rem_resol_X = abs(xres) Rem_resol_Y = abs(yres) Rem_cols = Rem.RasterXSize # Number of columns Rem_rows = Rem.RasterYSize # Number of rows lrx = ulx + (Rem_cols * xres) lry = uly + (Rem_rows * yres) bbox = [ulx, lrx, lry, uly] Rem_nulo = Rem.GetRasterBand(1).GetNoDataValue() if Rem_nulo == None: Rem_nulo = 0 Rem_band1 = Rem.GetRasterBand(1).ReadAsArray() if n_bands >1: Rem_band2 = Rem.GetRasterBand(2).ReadAsArray() Rem_band3 = Rem.GetRasterBand(3).ReadAsArray() # Limites de Varredura row_ini = int(round((origem[1]-uly)/resol_Y - 0.5)) row_fim = int(round((origem[1]-lry)/resol_Y - 0.5)) col_ini = int(round((ulx - origem[0])/resol_X - 0.5)) col_fim = int(round((lrx - origem[0])/resol_X - 0.5)) # Varrer Raster if n_bands == 4: for lin in range(row_ini, row_fim): for col in range(col_ini, col_fim): px_value = band4[lin][col] if px_value == 0 or band1[lin][col] > limiar: # Verificar Limiar X = origem[0] + resol_X*(col + 0.5) Y = origem[1] - resol_Y*(lin + 0.5) band1[lin][col] = Interpolar(X, Y, Rem_band1, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo) band2[lin][col] = Interpolar(X, Y, Rem_band2, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo) band3[lin][col] = Interpolar(X, Y, Rem_band3, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo) cont += 1 feedback.setProgress(int(cont * total)) if feedback.isCanceled(): break elif n_bands == 3: for lin in range(row_ini, row_fim): for col in range(col_ini, col_fim): px_value = band1[lin][col] if px_value == Pixel_Nulo or band1[lin][col] > limiar: # Verificar Limiar X = origem[0] + resol_X*(col + 0.5) Y = origem[1] - resol_Y*(lin + 0.5) band1[lin][col] = Interpolar(X, Y, Rem_band1, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo) band2[lin][col] = Interpolar(X, Y, Rem_band2, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo) band3[lin][col] = Interpolar(X, Y, Rem_band3, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo) cont += 1 feedback.setProgress(int(cont * total)) if feedback.isCanceled(): break elif n_bands == 1: for lin in range(row_ini, row_fim): for col in range(col_ini, col_fim): px_value = band1[lin][col] if px_value == Pixel_Nulo or band1[lin][col] > limiar: # Verificar Limiar X = origem[0] + resol_X*(col + 0.5) Y = origem[1] - resol_Y*(lin + 0.5) band1[lin][col] = Interpolar(X, Y, Rem_band1, Rem_origem, Rem_resol_X, Rem_resol_Y, reamostragem, Rem_nulo) cont += 1 feedback.setProgress(int(cont * total)) if feedback.isCanceled(): break Rem = None # Fechar imagem # Criar imagem RGB feedback.pushInfo(self.tr('Saving Raster...', 'Salvando Raster...')) GDT = gdal_array.NumericTypeCodeToGDALTypeCode(band1.dtype) if n_bands ==1: RASTER = gdal.GetDriverByName('GTiff').Create(RGB_Output, cols, rows, 1, GDT) else: RASTER = gdal.GetDriverByName('GTiff').Create(RGB_Output, cols, rows, 3, GDT) RASTER.SetGeoTransform(geotransform) # specify coords RASTER.SetProjection(CRS.ExportToWkt()) # export coords to file if n_bands ==1: feedback.pushInfo(self.tr('Writing rater band...', 'Escrevendo banda do raster...')) banda = RASTER.GetRasterBand(1) banda.WriteArray(band1) banda.SetNoDataValue(Pixel_Nulo) else: feedback.pushInfo(self.tr('Writing Band R...', 'Escrevendo Banda R...')) bandaR = RASTER.GetRasterBand(1) bandaR.WriteArray(band1) feedback.pushInfo(self.tr('Writing Band G...', 'Escrevendo Banda G...')) bandaG = RASTER.GetRasterBand(2) bandaG.WriteArray(band2) feedback.pushInfo(self.tr('Writing Band B...', 'Escrevendo Banda B...')) bandaB = RASTER.GetRasterBand(3) bandaB.WriteArray(band3) feedback.pushInfo(self.tr('Saving raster...', 'Salvando raster...')) RASTER.FlushCache() # Escrever no disco RASTER = None # Salvar e fechar feedback.pushInfo(self.tr('Operation completed successfully!', 'Operação finalizada com sucesso!')) feedback.pushInfo(self.tr('Leandro Franca - Cartographic Engineer', 'Leandro França - Eng Cart')) self.CAMINHO = RGB_Output self.CARREGAR = Carregar return {self.RasterOUT: RGB_Output} # Carregamento de arquivo de saída CAMINHO = '' CARREGAR = True def postProcessAlgorithm(self, context, feedback): if self.CARREGAR: rlayer = QgsRasterLayer(self.CAMINHO, self.tr('Patched Image', 'Imagem Remendada')) QgsProject.instance().addMapLayer(rlayer) return {}
41.51532
135
0.541063
12,564
0.842091
0
0
0
0
0
0
3,151
0.211193
f7fc84f573aa97d3b828afe66e29e4f49f7bb79c
1,393
py
Python
quantlab/COCO/utils/inference.py
lukasc-ch/QuantLab
7ddcc51ec1131a58269768cd898ce04e8b49beb6
[ "Apache-2.0" ]
6
2019-05-24T17:39:07.000Z
2021-11-06T22:19:55.000Z
quantlab/COCO/utils/inference.py
lukasc-ch/QuantLab
7ddcc51ec1131a58269768cd898ce04e8b49beb6
[ "Apache-2.0" ]
null
null
null
quantlab/COCO/utils/inference.py
lukasc-ch/QuantLab
7ddcc51ec1131a58269768cd898ce04e8b49beb6
[ "Apache-2.0" ]
4
2019-05-24T17:39:15.000Z
2021-04-02T07:13:11.000Z
import matplotlib.patches as patches import matplotlib.pyplot as plt import numpy as np def view_instance(img, gt_label, pr_label=None): img = img.cpu() # gt_label = gt_label.cpu() # pr_label = pr_label.cpu() # c, h, w = img.shape # with open('/home/spmatteo/MSDocuments/QuantLab/COCO/coco.names', 'r') as f: # classes = [line.strip() for line in f.read().splitlines()] # cmap = plt.get_cmap('tab20b') # colors = [cmap(i) for i in np.linspace(0, 1, len(classes)-1)] # fig, ax = plt.subplots(1, figsize=(12, 9)) # ax.imshow(img.permute(1, 2, 0)) # h, w, c # # browse annotations and draw bounding boxes # bboxes = [] # if label is not None: # for i, annotation in enumerate(label): # cls = annotation[6] # if i < 6: # print(annotation, classes[int(cls)]) # color = colors[int(cls)] # bbox = patches.Rectangle((annotation[0], annotation[1]), annotation[2]-annotation[0], annotation[3]-annotation[1], # linewidth=2, edgecolor=color, facecolor='none', label=classes[int(cls)]) # ax.add_patch(bbox) # bboxes.append((bbox, classes[int(cls)], color)) # for bbox in bboxes: # ax.annotate(bbox[1], bbox[0].get_xy(), weight='bold', fontsize=10, color=bbox[2]) # plt.axis('off') # plt.show()
42.212121
128
0.580761
0
0
0
0
0
0
0
0
1,109
0.796123
f7fcc0247bffa7d5ad90651380c319258f099e35
633
py
Python
dockwidhistory.py
kimoamer/Clinic-Manager
53184a4e8f369bf083109d065b2042fc7cf5bfbd
[ "MIT" ]
3
2021-05-12T01:05:12.000Z
2022-02-11T15:43:00.000Z
dockwidhistory.py
kimoamer/Clinic-Manager
53184a4e8f369bf083109d065b2042fc7cf5bfbd
[ "MIT" ]
null
null
null
dockwidhistory.py
kimoamer/Clinic-Manager
53184a4e8f369bf083109d065b2042fc7cf5bfbd
[ "MIT" ]
null
null
null
from PyQt5.QtWidgets import QDialog from PyQt5.QtGui import QFont from PyQt5.QtCore import Qt from dockwina import Ui_Form as docka class Dialog(QDialog, docka): def __init__(self): super(Dialog, self).__init__() QDialog.__init__(self) self.setupUi(self) self.setWindowFlag(Qt.FramelessWindowHint) self.font1 = QFont("Tajawal", 9) self.label2.setFont(self.font1) self.label7.setFont(self.font1) self.label3.setFont(self.font1) self.label5.setFont(self.font1) self.label6.setFont(self.font1) self.label.setFont(self.font1)
33.315789
51
0.665087
493
0.778831
0
0
0
0
0
0
9
0.014218
f7fcf7559948b6752dd0ee377be44bd42c092522
351
py
Python
forest_lite/server/lib/palette.py
uk-gov-mirror/MetOffice.forest-lite
9406b53f7e6a9651eb675e0ac2e5945421b25557
[ "BSD-3-Clause" ]
6
2020-08-05T16:12:57.000Z
2022-01-06T01:34:19.000Z
forest_lite/server/lib/palette.py
uk-gov-mirror/MetOffice.forest-lite
9406b53f7e6a9651eb675e0ac2e5945421b25557
[ "BSD-3-Clause" ]
49
2020-08-14T13:58:32.000Z
2021-06-29T11:42:32.000Z
forest_lite/server/lib/palette.py
uk-gov-mirror/MetOffice.forest-lite
9406b53f7e6a9651eb675e0ac2e5945421b25557
[ "BSD-3-Clause" ]
2
2020-12-03T09:24:13.000Z
2021-04-11T06:10:36.000Z
import bokeh.palettes def all_palettes(): """List of palette definitions""" for name in bokeh.palettes.all_palettes: for number in bokeh.palettes.all_palettes[name]: yield { "name": name, "number": number, "palette": bokeh.palettes.all_palettes[name][number] }
27
68
0.566952
0
0
326
0.928775
0
0
0
0
56
0.159544
f7fdd8880ea99f126ba61a61e3b34ab49ba52b93
1,549
py
Python
runtests.py
ombu/django-sortedm2m
2691cf00174577bc667d5d8c1d42071604ee2095
[ "BSD-3-Clause" ]
null
null
null
runtests.py
ombu/django-sortedm2m
2691cf00174577bc667d5d8c1d42071604ee2095
[ "BSD-3-Clause" ]
null
null
null
runtests.py
ombu/django-sortedm2m
2691cf00174577bc667d5d8c1d42071604ee2095
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals import os, sys, warnings parent = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(0, parent) os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_project.settings") import django from django.core.management import execute_from_command_line if django.VERSION < (1, 6): default_test_apps = [ 'sortedm2m_tests', 'test_south_support', ] else: default_test_apps = [ 'sortedm2m_tests', ] # Only test south support for Django 1.6 and lower. if django.VERSION < (1, 7): default_test_apps += [ 'test_south_support', ] def runtests(*args): if django.VERSION > (1, 8): warnings.simplefilter("error", Warning) warnings.filterwarnings("ignore", module="distutils") try: warnings.filterwarnings("ignore", category=ResourceWarning) except NameError: pass warnings.filterwarnings("ignore", "invalid escape sequence", DeprecationWarning) # Ignore a python 3.6 DeprecationWarning in ModelBase.__new__ that isn't # fixed in Django 1.x if sys.version_info > (3, 6) and django.VERSION < (2,): warnings.filterwarnings( "ignore", "__class__ not set defining", DeprecationWarning) test_apps = list(args or default_test_apps) execute_from_command_line([sys.argv[0], 'test', '--verbosity=1'] + test_apps) if __name__ == '__main__': runtests(*sys.argv[1:])
28.163636
88
0.654616
0
0
0
0
0
0
0
0
443
0.285991