blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
20dd0349455e1e9388db2d86e19ca31baca89048 | 1a758ef862f733d98ddd8ebc8ade5cefd95c24f2 | /coffees/migrations/0008_auto_20160814_0003.py | 5e41ec9efc4be1c908ae538d5189a08874945ae6 | [] | no_license | ajajul/ReactJS_Python | f116b35394666c5b3f2419eb5d8d7aeb077d4a24 | 08310d56fa88f326ddbfdd4b189f2a3a71f76d99 | refs/heads/master | 2020-03-19T03:16:57.510672 | 2018-06-01T10:36:36 | 2018-06-01T10:36:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('coffees', '0007_sharedcoffeesticker'),
]
operations = [
migrations.AddField(
model_name='coffeetype',
name='amount_one_off',
field=models.DecimalField(default=18, verbose_name=b'Amount for One-off', max_digits=6, decimal_places=2),
),
migrations.AlterField(
model_name='coffeetype',
name='amount',
field=models.DecimalField(default=14, verbose_name=b'Amount', max_digits=6, decimal_places=2),
),
]
| [
"web.expert@aol.com"
] | web.expert@aol.com |
dc8f2cec24f68baa30d471ab0336fd32849e72b3 | b3e3284f3d7b66f237e60fdfb1a37db706363139 | /RST/app/ventas/migrations/0006_auto_20181115_1448.py | 2c7d3cc5a4d2ca91049f8615f3723445a2e45792 | [] | no_license | corporacionrst/administracion | 4caf1545c313eb36408850bb4506bbd0bf43d6e6 | 7405442b4f14a589d75a5e04250be123403180ec | refs/heads/master | 2020-04-11T00:04:06.017147 | 2018-12-11T21:46:49 | 2018-12-11T21:46:49 | 161,374,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | # Generated by Django 2.1.2 on 2018-11-15 14:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ventas', '0005_auto_20181114_2140'),
]
operations = [
migrations.AlterField(
model_name='orden',
name='autoriza',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='autoriza_compra', to='usuario.Usuario'),
),
]
| [
"admin@corporacionrst.com"
] | admin@corporacionrst.com |
1bf5340bdc57b8bda96c0813f2160f22f82707cd | 2827d7a837eb29c3cb07793ab6d3d5a753e18669 | /alipay/aop/api/request/AlipayPointBalanceGetRequest.py | b91714684cda57c1edc1a80c68ee348fdaba1f8e | [
"Apache-2.0"
] | permissive | shaobenbin/alipay-sdk-python | 22e809b8f5096bec57d2bb25414f64bdc87fa8b3 | 5232ad74dff2e8a6e0e7646ab3318feefa07a37d | refs/heads/master | 2020-03-21T04:51:39.935692 | 2018-06-21T07:03:31 | 2018-06-21T07:03:31 | 138,131,022 | 0 | 0 | null | 2018-06-21T06:50:24 | 2018-06-21T06:50:24 | null | UTF-8 | Python | false | false | 3,164 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class AlipayPointBalanceGetRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._notify_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.point.balance.get'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
5f2dc18f4472e07b921226d771f81105233c8771 | 399f325971dac17a2748acb336e7f44c345d0abb | /astrolab/model/labels.py | 746d747514e3c88cbfebbce038f2607e69d78f8e | [] | no_license | nasa-nccs-cds/astrolab2 | 230fc5b036c1dd07e711514079aad7b7cdc0233d | a82b9a4e9d2bfce99832db74680818fea5749218 | refs/heads/main | 2023-01-11T08:14:48.065441 | 2020-11-17T22:50:29 | 2020-11-17T22:50:29 | 306,711,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,780 | py | from collections import OrderedDict
from typing import List, Union, Dict, Callable, Tuple, Optional, Any
import collections.abc
from functools import partial
import ipywidgets as ipw
from ..graph.base import ActivationFlow
import traitlets.config as tlc
from astrolab.model.base import AstroConfigurable, Marker
import xarray as xa
import numpy as np
def h2c( hexColor: str ) -> List[float]:
hc = hexColor.strip( "# ")
cv = [ int(hc[i0:i0+2],16) for i0 in range(0,len(hc),2) ]
cv = cv if len(cv) == 4 else cv + [255]
return [ c/255 for c in cv ]
def isIntRGB( color ):
if isinstance(color, collections.abc.Sequence):
for val in color:
if val > 1: return True
return False
def format_color( color: Union[str,List[Union[float,int]]] ) -> List[float]:
if isinstance(color, str): return h2c(color)
elif isIntRGB(color): return [c / 255.0 for c in color]
else: return color
def format_colors( classes: List[Tuple[str,Union[str,List[Union[float,int]]]]] ) -> List[List[float]]:
return [ format_color(color) for (label, color) in classes ]
def set_alphas( colors, alpha ):
return [ set_alpha(color, alpha) for color in colors ]
def set_alpha( color, alpha ):
return color[:3] + [alpha]
class Action:
def __init__(self, type: str, source: str, pids: List[int], cid, **kwargs ):
self.args = kwargs
self.type = type
self.cid=cid
self.source = source
self.pids = pids
def __repr__(self):
return f"A[{self.type}:{self.source} cid:{self.cid} pids:{self.pids}]"
def __eq__(self, action: "Action" ):
return ( self.type == action.type ) and ( self.cid == action.cid ) and ( self.source == action.source ) and ( self.pids == action.pids )
@property
def spec(self):
return dict( atype=self.type, source=self.source , pids=self.pids, cid=self.cid, **self.args )
class LabelsManager(tlc.SingletonConfigurable, AstroConfigurable):
def __init__(self):
super(LabelsManager, self).__init__()
self._colors = None
self._labels = None
self.selectedClass = 0
self._markers: List[Marker] = []
self._flow: ActivationFlow = None
self._actions = []
self._labels_data: xa.DataArray = None
self._selected_class = 0
self._optype = None
self.template = None
self.n_spread_iters = 1
self.wSelectedClass: ipw.HBox = None
self._buttons = []
@property
def current_class(self) -> str:
return self._labels[ self._selected_class ]
@property
def current_cid(self) -> int:
return self._selected_class
def set_selected_class(self, iclass, *args ):
self._selected_class = iclass
for iB, button in enumerate(self._buttons):
if iB == self._selected_class: button.layout = {'border': '3px solid #FFFF00'}
else: button.layout = {'border': '1px solid darkkhaki'}
def gui( self ) -> ipw.DOMWidget:
if self.wSelectedClass is None:
for iC, (color, label) in enumerate(zip( self._colors, self._labels )):
button = ipw.Button( description=label, layout=ipw.Layout( flex='1 1 auto', height="auto"), border= '1px solid dimgrey' )
button.style.button_color = color
button.on_click( partial( self.set_selected_class, iC ) )
self._buttons.append( button )
self.wSelectedClass = ipw.HBox( self._buttons )
self.wSelectedClass.layout = ipw.Layout( flex='1 1 auto', width = "100%" )
self.set_selected_class( 0 )
return self.wSelectedClass
def flow(self) -> Optional[ActivationFlow]:
return self._flow
def addAction(self, type: str, source: str, pids: List[int] = None, cid=None, **kwargs ):
if cid == None: cid = self.selectedClass
new_action = Action(type, source, pids, cid, **kwargs)
if type == "mark": self.addMarker( Marker(pids,cid) )
print(f"ADD ACTION: {new_action}")
self._actions.append( new_action )
def popAction(self) -> Optional[Action]:
try:
action = self._actions.pop()
print( f"POP ACTION: {action}" )
return action
except:
return None
@property
def classification(self) -> np.ndarray:
return self._flow.C
def initLabelsData( self, point_data: xa.DataArray = None ):
nodata_value = -1
if point_data is not None:
self.template = point_data[:,0].squeeze( drop=True )
self.template.attrs = point_data.attrs
if self.template is not None:
self._labels_data: xa.DataArray = xa.full_like( self.template, 0, dtype=np.int32 ).where( self.template.notnull(), nodata_value )
self._labels_data.attrs['_FillValue'] = nodata_value
self._labels_data.name = self.template.attrs['dsid'] + "_labels"
self._labels_data.attrs[ 'long_name' ] = [ "labels" ]
def getMarker( self, pid: int ) -> Optional[Marker]:
for marker in self._markers:
if pid in marker.pids: return marker
return None
def updateLabels(self):
for marker in self._markers:
for pid in marker.pids:
self._labels_data[ pid ] = marker.cid
def labels_data( self ) -> xa.DataArray:
self.updateLabels()
return self._labels_data.copy( self._optype == "distance" )
@classmethod
def getSortedLabels(self, labels_dset: xa.Dataset ) -> Tuple[np.ndarray,np.ndarray]:
labels: np.ndarray = labels_dset['C'].values
distance: np.ndarray = labels_dset['D'].values
indices = np.arange(labels.shape[0])
indexed_labels = np.vstack( [ indices, labels ] ).transpose()
selection = (labels > 0)
filtered_labels = indexed_labels[selection]
filtered_distance = distance[selection]
return filtered_labels, filtered_distance
def spread(self, optype: str, n_iters = None ) -> Optional[xa.Dataset]:
if self._flow is None:
return None
resume = ( optype == "neighbors" ) and ( self._optype == "neighbors" )
if not resume: self._flow.clear()
self._optype = optype
labels_data = self.labels_data()
niters = self.n_spread_iters if n_iters is None else n_iters
return self._flow.spread( labels_data.values, niters )
def clearTransient(self):
if len(self._markers) > 0 and self._markers[-1].cid == 0:
self._markers.pop(-1)
def clearMarkers(self):
self._markers = []
self.initLabelsData()
def refresh(self):
self.clearMarkers()
def addMarker(self, marker: Marker ):
self.clearTransient()
for pid in marker.pids: self.deletePid( pid )
self._markers = list(filter( lambda m: not m.isEmpty(), self._markers ))
self._markers.append(marker)
def popMarker(self) -> Marker:
marker = self._markers.pop( -1 ) if len( self._markers ) else None
return marker
def deletePid(self, pid: int ) -> List[Marker]:
markers = []
for marker in self._markers:
if marker.deletePid( pid ): markers.append( marker )
return markers
@property
def currentMarker(self) -> Marker:
marker = self._markers[ -1 ] if len( self._markers ) else None
return marker
def getMarkers( self ) -> List[Marker]:
return self._markers
@property
def selectedLabel(self):
return self._labels[ self.selectedClass ]
def selectedColor(self, mark: bool ) -> Tuple[int,List[float]]:
icolor = self.selectedClass if mark else 0
return icolor, self._colors[ icolor ]
@property
def colors(self)-> List[Tuple]:
return self._colors
@property
def labels(self) -> List[str]:
return self._labels
@property
def nLabels(self) -> int:
return len(self._labels)
def setLabels(self, labels: List[Tuple[str, str]], **kwargs):
unlabeled_color = kwargs.get( 'unlabeled', "YELLOW" )
label_list = [ ('Unlabeled', unlabeled_color ) ] + labels
for ( label, color ) in labels:
if color.upper() == unlabeled_color: raise Exception( f"{unlabeled_color} is a reserved color")
self._colors = [ item[1] for item in label_list ]
self._labels = [ item[0] for item in label_list ]
def toDict( self, alpha ) -> OrderedDict:
labels_dict = OrderedDict()
for index, label in enumerate(self._labels):
labels_dict[ label ] = set_alpha( self._colors[index], alpha )
return labels_dict
| [
"thomas.maxwell@nasa.gov"
] | thomas.maxwell@nasa.gov |
892133e6a10f09894aeb9a8d4cde9b7b9621cc7d | 0be2afad29a71785d64a0c493fcd2cec803464d8 | /train_visualization/plot.py | 5b32d22cd95c63aa4fffc276e43fc9384d490180 | [
"Apache-2.0"
] | permissive | hereismari/ajna | 7bf9fe58a4b64bc34b953e38936e23992d5e1f49 | e5db9a1cde88aba20e7b5738d8c434b9086721d5 | refs/heads/master | 2021-09-20T14:14:56.677711 | 2018-08-10T16:12:13 | 2018-08-10T16:12:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,723 | py | import matplotlib.pyplot as plt
import csv
import os
def read_and_preprocess_data(filename):
# Lê arquivo e salva csv como dicionário
res = {}
if os.path.exists(filename):
with open(filename) as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
step = int(row['Step'])
loss = float(row['Value'])
if step not in res:
res[step] = (1, loss)
else:
counter = res[step][0] + 1
sum_loss = res[step][1] + loss
res[step] = (counter, sum_loss)
for step in res:
counter = res[step][0]
sum_loss = res[step][1]
res[step] = sum_loss / counter
return res
else:
raise Exception('Arquivo %s não existe' % filename)
def plot_loss(data, loss):
def _plot_loss(d, label):
steps, losses = [], []
for (step, loss) in d.items():
steps.append(step)
losses.append(loss)
plt.plot(steps, losses, label=label)
_plot_loss(data[0], 'Treino')
_plot_loss(data[1], 'Validação')
leg = plt.legend(loc='best', ncol=2, shadow=True, fancybox=True)
leg.get_frame().set_alpha(0.5)
plt.title(loss)
plt.show()
def main():
base_filename = 'run_%s-tag-%s.csv'
for loss in ['heatmaps_mse', 'radius_mse']:
plot_data = []
for dataset in ['train', 'test']:
plot_data.append(read_and_preprocess_data(base_filename % (dataset, loss)))
plot_loss(plot_data, loss)
if __name__ == '__main__':
main()
| [
"mariannelinharesm@gmail.com"
] | mariannelinharesm@gmail.com |
c028dabd00a3e70a7e469bc1878d1f5f8573c78e | a0484a637cf60c223dc846440e11e345541680a5 | /src/kusto/azext_kusto/vendored_sdks/kusto/operations/_data_connection_operations.py | d8ba89afeffbd6aaa0ba5e00bd4acd71d8820992 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | TylerLeonhardt/azure-cli-extensions | c4caacb38d81e3dc84a4483eb4be380f0e3ddc91 | f3bdbd75cc39c2a302d1184c9e1e1fa5b368378d | refs/heads/master | 2022-12-25T07:05:51.282034 | 2020-08-28T13:19:09 | 2020-08-28T13:19:09 | 291,105,411 | 1 | 0 | MIT | 2020-08-28T17:22:41 | 2020-08-28T17:22:40 | null | UTF-8 | Python | false | false | 37,121 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DataConnectionOperations(object):
"""DataConnectionOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~kusto_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_database(
self,
resource_group_name, # type: str
cluster_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.DataConnectionListResult"]
"""Returns the list of data connections of the given Kusto database.
:param resource_group_name: The name of the resource group containing the Kusto cluster.
:type resource_group_name: str
:param cluster_name: The name of the Kusto cluster.
:type cluster_name: str
:param database_name: The name of the database in the Kusto cluster.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataConnectionListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~kusto_management_client.models.DataConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DataConnectionListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-14"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_database.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DataConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_database.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/dataConnections'} # type: ignore
def _data_connection_validation_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
database_name, # type: str
data_connection_name=None, # type: Optional[str]
properties=None, # type: Optional["models.DataConnection"]
**kwargs # type: Any
):
# type: (...) -> "models.DataConnectionValidationListResult"
cls = kwargs.pop('cls', None) # type: ClsType["models.DataConnectionValidationListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_parameters = models.DataConnectionValidation(data_connection_name=data_connection_name, properties=properties)
api_version = "2020-06-14"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._data_connection_validation_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_parameters, 'DataConnectionValidation')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DataConnectionValidationListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_data_connection_validation_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/dataConnectionValidation'} # type: ignore
def begin_data_connection_validation(
self,
resource_group_name, # type: str
cluster_name, # type: str
database_name, # type: str
data_connection_name=None, # type: Optional[str]
properties=None, # type: Optional["models.DataConnection"]
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Checks that the data connection parameters are valid.
:param resource_group_name: The name of the resource group containing the Kusto cluster.
:type resource_group_name: str
:param cluster_name: The name of the Kusto cluster.
:type cluster_name: str
:param database_name: The name of the database in the Kusto cluster.
:type database_name: str
:param data_connection_name: The name of the data connection.
:type data_connection_name: str
:param properties: The data connection properties to validate.
:type properties: ~kusto_management_client.models.DataConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DataConnectionValidationListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~kusto_management_client.models.DataConnectionValidationListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.DataConnectionValidationListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
raw_result = self._data_connection_validation_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
database_name=database_name,
data_connection_name=data_connection_name,
properties=properties,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DataConnectionValidationListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_data_connection_validation.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/dataConnectionValidation'} # type: ignore
def check_name_availability(
self,
resource_group_name, # type: str
cluster_name, # type: str
database_name, # type: str
name, # type: str
type, # type: Union[str, "models.Type"]
**kwargs # type: Any
):
# type: (...) -> "models.CheckNameResult"
"""Checks that the data connection name is valid and is not already in use.
:param resource_group_name: The name of the resource group containing the Kusto cluster.
:type resource_group_name: str
:param cluster_name: The name of the Kusto cluster.
:type cluster_name: str
:param database_name: The name of the database in the Kusto cluster.
:type database_name: str
:param name: Data Connection name.
:type name: str
:param type: The type of resource, Microsoft.Kusto/clusters/databases/dataConnections.
:type type: str or ~kusto_management_client.models.Type
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckNameResult, or the result of cls(response)
:rtype: ~kusto_management_client.models.CheckNameResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CheckNameResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_data_connection_name = models.DataConnectionCheckNameRequest(name=name, type=type)
api_version = "2020-06-14"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.check_name_availability.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_data_connection_name, 'DataConnectionCheckNameRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CheckNameResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_name_availability.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/checkNameAvailability'} # type: ignore
def get(
self,
resource_group_name, # type: str
cluster_name, # type: str
database_name, # type: str
data_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.DataConnection"
"""Returns a data connection.
:param resource_group_name: The name of the resource group containing the Kusto cluster.
:type resource_group_name: str
:param cluster_name: The name of the Kusto cluster.
:type cluster_name: str
:param database_name: The name of the database in the Kusto cluster.
:type database_name: str
:param data_connection_name: The name of the data connection.
:type data_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataConnection, or the result of cls(response)
:rtype: ~kusto_management_client.models.DataConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DataConnection"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-14"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'dataConnectionName': self._serialize.url("data_connection_name", data_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DataConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/dataConnections/{dataConnectionName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
database_name, # type: str
data_connection_name, # type: str
parameters, # type: "models.DataConnection"
**kwargs # type: Any
):
# type: (...) -> "models.DataConnection"
cls = kwargs.pop('cls', None) # type: ClsType["models.DataConnection"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-14"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'dataConnectionName': self._serialize.url("data_connection_name", data_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DataConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DataConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DataConnection', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('DataConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/dataConnections/{dataConnectionName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
cluster_name, # type: str
database_name, # type: str
data_connection_name, # type: str
parameters, # type: "models.DataConnection"
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Creates or updates a data connection.
:param resource_group_name: The name of the resource group containing the Kusto cluster.
:type resource_group_name: str
:param cluster_name: The name of the Kusto cluster.
:type cluster_name: str
:param database_name: The name of the database in the Kusto cluster.
:type database_name: str
:param data_connection_name: The name of the data connection.
:type data_connection_name: str
:param parameters: The data connection parameters supplied to the CreateOrUpdate operation.
:type parameters: ~kusto_management_client.models.DataConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DataConnection or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~kusto_management_client.models.DataConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.DataConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
database_name=database_name,
data_connection_name=data_connection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DataConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/dataConnections/{dataConnectionName}'} # type: ignore
def _update_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
database_name, # type: str
data_connection_name, # type: str
parameters, # type: "models.DataConnection"
**kwargs # type: Any
):
# type: (...) -> "models.DataConnection"
cls = kwargs.pop('cls', None) # type: ClsType["models.DataConnection"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-14"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'dataConnectionName': self._serialize.url("data_connection_name", data_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DataConnection')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DataConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DataConnection', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('DataConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/dataConnections/{dataConnectionName}'} # type: ignore
def begin_update(
self,
resource_group_name, # type: str
cluster_name, # type: str
database_name, # type: str
data_connection_name, # type: str
parameters, # type: "models.DataConnection"
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Updates a data connection.
:param resource_group_name: The name of the resource group containing the Kusto cluster.
:type resource_group_name: str
:param cluster_name: The name of the Kusto cluster.
:type cluster_name: str
:param database_name: The name of the database in the Kusto cluster.
:type database_name: str
:param data_connection_name: The name of the data connection.
:type data_connection_name: str
:param parameters: The data connection parameters supplied to the Update operation.
:type parameters: ~kusto_management_client.models.DataConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DataConnection or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~kusto_management_client.models.DataConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.DataConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
raw_result = self._update_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
database_name=database_name,
data_connection_name=data_connection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DataConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/dataConnections/{dataConnectionName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
database_name, # type: str
data_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-14"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'dataConnectionName': self._serialize.url("data_connection_name", data_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/dataConnections/{dataConnectionName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
cluster_name, # type: str
database_name, # type: str
data_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Deletes the data connection with the given name.
:param resource_group_name: The name of the resource group containing the Kusto cluster.
:type resource_group_name: str
:param cluster_name: The name of the Kusto cluster.
:type cluster_name: str
:param database_name: The name of the database in the Kusto cluster.
:type database_name: str
:param data_connection_name: The name of the data connection.
:type data_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
database_name=database_name,
data_connection_name=data_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/dataConnections/{dataConnectionName}'} # type: ignore
| [
"noreply@github.com"
] | TylerLeonhardt.noreply@github.com |
a0804d01fc82def3e989bd0bba3455642945a6d2 | 2166064a85df8bdf1c780ae2eda1b372bfa078ed | /face_client/face_client.py | ae9a2812e198e5263ff55f975abc762b7fa0afa2 | [
"MIT"
] | permissive | CosmoGlenns/automaticdj | 67016b4804d1adf42dd7905375650af0e4bb724f | 3880c175bc09c17ed9f71ba9902e348a00bb64ef | refs/heads/master | 2020-05-20T19:26:49.993705 | 2013-12-30T20:21:49 | 2013-12-30T20:21:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,305 | py | # -*- coding: utf-8 -*-
#
# Name: face.com Python API client library
# Description: face.com REST API Python client library.
#
# For more information about the API and the return values,
# visit the official documentation at http://developers.face.com/docs/api/.
#
# Author: Tomaž Muraus (http://www.tomaz-muraus.info)
# License: GPL (http://www.gnu.org/licenses/gpl.html)
# Version: 1.0
import urllib
import urllib2
import simplejson as json
import os.path
API_URL = 'http://api.face.com'
class FaceClient(object):
def __init__(self, api_key = None, api_secret = None):
if not api_key or not api_secret:
raise AttributeError('Missing api_key or api_secret argument')
self.api_key = api_key
self.api_secret = api_secret
self.format = 'json'
self.twitter_credentials = None
self.facebook_credentials = None
def set_twitter_user_credentials(self, user = None, password = None):
if not user or not password:
raise AttributeError('Missing Twitter username or password')
self.twitter_credentials = {'twitter_user': user,
'twitter_password': password}
def set_twitter_oauth_credentials(self, user = None, secret = None, token = None):
if not user or not secret or not token:
raise AttributeError('Missing one of the required arguments')
self.twitter_credentials = {'twitter_oauth_user': user,
'twitter_oauth_secret': secret,
'twitter_oauth_token': token}
def set_facebook_credentials(self, user = None, session = None):
if not user or not session:
raise AttributeError('Missing Facebook user or session argument')
self.facebook_credentials = {'fb_user': user,
'fb_session': session}
### Recognition engine methods ###
def faces_detect(self, urls = None, file = None, aggressive=False):
"""
Returns tags for detected faces in one or more photos, with geometric information
of the tag, eyes, nose and mouth, as well as the gender, glasses, and smiling attributes.
http://developers.face.com/docs/api/faces-detect/
"""
if not urls and not file:
raise AttributeError('Missing URLs/filename argument')
if file:
# Check if the file exists
if not os.path.exists(file):
raise IOError('File %s does not exist' % (file))
data = {'file': file}
else:
data = {'urls': urls}
if aggressive:
data['detector'] = 'Aggressive'
response = self.send_request('faces/detect', data)
return response
def faces_status(self, uids = None, namespace = None):
"""
Reports training set status for the specified UIDs.
http://developers.face.com/docs/api/faces-status/
"""
if not uids:
raise AttributeError('Missing user IDs')
(facebook_uids, twitter_uids) = self.__check_user_auth_credentials(uids)
data = {'uids': uids}
self.__append_user_auth_data(data, facebook_uids, twitter_uids)
self.__append_optional_arguments(data, namespace = namespace)
response = self.send_request('faces/status', data)
return response
def faces_recognize(self, uids = None, urls = None, file = None, train = None, \
namespace = None, aggressive = None):
"""
Attempts to detect and recognize one or more user IDs' faces, in one or more photos.
For each detected face, the face.com engine will return the most likely user IDs,
or empty result for unrecognized faces. In addition, each tag includes a threshold
score - any score below this number is considered a low-probability hit.
http://developers.face.com/docs/api/faces-recognize/
"""
if not uids or (not urls and not file):
raise AttributeError('Missing required arguments')
(facebook_uids, twitter_uids) = self.__check_user_auth_credentials(uids)
data = {'uids': uids}
if file:
# Check if the file exists
if not os.path.exists(file):
raise IOError('File %s does not exist' % (file))
data.update({'file': file})
else:
data.update({'urls': urls})
if aggressive:
data['detector'] = 'Aggressive'
self.__append_user_auth_data(data, facebook_uids, twitter_uids)
self.__append_optional_arguments(data, train = train, namespace = namespace)
response = self.send_request('faces/recognize', data)
return response
def faces_train(self, uids = None, namespace = None, callback = None):
"""
Calls the training procedure for the specified UIDs, and reports back changes.
http://developers.face.com/docs/api/faces-train/
"""
if not uids:
raise AttributeError('Missing user IDs')
(facebook_uids, twitter_uids) = self.__check_user_auth_credentials(uids)
data = {'uids': uids}
self.__append_user_auth_data(data, facebook_uids, twitter_uids)
self.__append_optional_arguments(data, namespace = namespace, callback = callback)
response = self.send_request('faces/train', data)
return response
### Methods for managing face tags ###
def tags_get(self, uids = None, urls = None, pids = None, order = 'recent', \
limit = 5, together = False, filter = None, namespace = None):
"""
Returns saved tags in one or more photos, or for the specified User ID(s).
This method also accepts multiple filters for finding tags corresponding to
a more specific criteria such as front-facing, recent, or where two or more
users appear together in same photos.
http://developers.face.com/docs/api/tags-get/
"""
(facebook_uids, twitter_uids) = self.__check_user_auth_credentials(uids)
data = {'uids': uids,
'urls': urls,
'together': together,
'limit': limit}
self.__append_user_auth_data(data, facebook_uids, twitter_uids)
self.__append_optional_arguments(data, pids = pids, filter = filter, \
namespace = namespace)
response = self.send_request('tags/get', data)
return response
def tags_add(self, url = None, x = None, y = None, width = None, uid = None, \
tagger_id = None, label = None, password = None):
"""
Add a (manual) face tag to a photo. Use this method to add face tags where
those were not detected for completeness of your service.
http://developers.face.com/docs/api/tags-add/
"""
if not url or not x or not y or not width or not uid or not tagger_id:
raise AttributeError('Missing one of the required arguments')
(facebook_uids, twitter_uids) = self.__check_user_auth_credentials(uid)
data = {'url': url,
'x': x,
'y': y,
'width': width,
'uid': uid,
'tagger_id': tagger_id}
self.__append_user_auth_data(data, facebook_uids, twitter_uids)
self.__append_optional_arguments(data, label = label, password = password)
response = self.send_request('tags/add', data)
return response
def tags_save(self, tids = None, uid = None, tagger_id = None, label = None, \
password = None):
"""
Saves a face tag. Use this method to save tags for training the face.com
index, or for future use of the faces.detect and tags.get methods.
http://developers.face.com/docs/api/tags-save/
"""
if not tids or not uid:
raise AttributeError('Missing required argument')
(facebook_uids, twitter_uids) = self.__check_user_auth_credentials(uid)
data = {'tids': tids,
'uid': uid}
self.__append_user_auth_data(data, facebook_uids, twitter_uids)
self.__append_optional_arguments(data, tagger_id = tagger_id, label = label, \
password = password)
response = self.send_request('tags/save', data)
return response
def tags_remove(self, tids = None, password = None):
"""
Remove a previously saved face tag from a photo.
http://developers.face.com/docs/api/tags-remove/
"""
if not tids:
raise AttributeError('Missing tag IDs')
data = {'tids': tids}
response = self.send_request('tags/remove', data)
return response
### Account management methods ###
def account_limits(self):
"""
Returns current rate limits for the account represented by the passed API key and Secret.
http://developers.face.com/docs/api/account-limits/
"""
response = self.send_request('account/limits')
return response['usage']
def account_users(self, namespaces = None):
"""
Returns current rate limits for the account represented by the passed API key and Secret.
http://developers.face.com/docs/api/account-limits/
"""
if not namespaces:
raise AttributeError('Missing namespaces argument')
response = self.send_request('account/users', {'namespaces': namespaces})
return response
def __check_user_auth_credentials(self, uids):
# Check if needed credentials are provided
facebook_uids = [uid for uid in uids.split(',') \
if uid.find('@facebook.com') != -1]
twitter_uids = [uid for uid in uids.split(',') \
if uid.find('@twitter.com') != -1]
if facebook_uids and not self.facebook_credentials:
raise AttributeError('You need to set Facebook credentials to perform action on Facebook users')
if twitter_uids and not self.twitter_credentials:
raise AttributeError('You need to set Twitter credentials to perform action on Twitter users')
return (facebook_uids, twitter_uids)
def __append_user_auth_data(self, data, facebook_uids, twitter_uids):
if facebook_uids:
data.update({'user_auth': 'fb_user:%s,fb_oauth_token:%s' % (self.facebook_credentials['fb_user'],
self.facebook_credentials['fb_session'])})
if twitter_uids:
# If both user/password and OAuth credentials are provided, use
# OAuth as default
if self.twitter_credentials.get('twitter_oauth_user', None):
data.update({'user_auth': 'twitter_oauth_user:%s,twitter_oauth_secret:%s,twitter_oauth_token:%s' %
(self.twitter_credentials['twitter_oauth_user'], self.twitter_credentials['twitter_oauth_secret'], \
self.twitter_credentials['twitter_oauth_token'])})
else:
data.update({'user_auth': 'twitter_user:%s,twitter_password:%s' % (self.twitter_credentials['twitter_user'],
self.twitter_credentials['twitter_password'])})
def __append_optional_arguments(self, data, **kwargs):
for key, value in kwargs.iteritems():
if value:
data.update({key: value})
def send_request(self, method = None, parameters = None):
url = '%s/%s' % (API_URL, method)
data = {'api_key': self.api_key,
'api_secret': self.api_secret,
'format': self.format}
if parameters:
data.update(parameters)
# raise Exception(url, data)
# Local file is provided, use multi-part form
if 'file' in parameters:
from multipart import Multipart
form = Multipart()
for key, value in data.iteritems():
if key == 'file':
file = open(value, 'r')
# with open(value, 'r') as file:
form.file(os.path.basename(key), os.path.basename(key), file.read())
else:
form.field(key, value)
(content_type, post_data) = form.get()
headers = {'Content-Type': content_type}
else:
post_data = urllib.urlencode(data)
headers = {}
request = urllib2.Request(url, headers = headers, data = post_data)
response = urllib2.urlopen(request)
response = response.read()
response_data = json.loads(response)
if 'status' in response_data and \
response_data['status'] == 'failure':
raise FaceError(response_data['error_code'], response_data['error_message'])
return response_data
class FaceError(Exception):
def __init__(self, error_code, error_message):
self.error_code = error_code
self.error_message = error_message
def __str__(self):
return '%s (%d)' % (self.error_message, self.error_code)
| [
"gleitz@hunch.com"
] | gleitz@hunch.com |
e61371dd76bda43a1630895f7d5de5b4dcc87d4d | c1bd12405d244c5924a4b069286cd9baf2c63895 | /azure-mgmt-sql/azure/mgmt/sql/models/operation_display.py | a5b2f841200641e4d361e2bfe4953bfad84d2a10 | [
"MIT"
] | permissive | lmazuel/azure-sdk-for-python | 972708ad5902778004680b142874582a284a8a7c | b40e0e36cc00a82b7f8ca2fa599b1928240c98b5 | refs/heads/master | 2022-08-16T02:32:14.070707 | 2018-03-29T17:16:15 | 2018-03-29T17:16:15 | 21,287,134 | 1 | 3 | MIT | 2019-10-25T15:56:00 | 2014-06-27T19:40:56 | Python | UTF-8 | Python | false | false | 1,798 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class OperationDisplay(Model):
"""Display metadata associated with the operation.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar provider: The localized friendly form of the resource provider name.
:vartype provider: str
:ivar resource: The localized friendly form of the resource type related
to this action/operation.
:vartype resource: str
:ivar operation: The localized friendly name for the operation.
:vartype operation: str
:ivar description: The localized friendly description for the operation.
:vartype description: str
"""
_validation = {
'provider': {'readonly': True},
'resource': {'readonly': True},
'operation': {'readonly': True},
'description': {'readonly': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(self):
super(OperationDisplay, self).__init__()
self.provider = None
self.resource = None
self.operation = None
self.description = None
| [
"autorestci@microsoft.com"
] | autorestci@microsoft.com |
6c5e15a223ab40fd0c865b6b2acf3b6f0832efc0 | 4b7791aa4a93ccfa6e2c3ffb10dfcbe11d042c66 | /estudos/estudo_02.py | cf804e6be1490fbd787b503f0869c96b7be1f3df | [] | no_license | Dev33Renan/Python-Exercises | bbf895f241d4142a6172b911228227cd4a6fe6ab | ffc73bc0b912b41564034e7740ea29a9f5249553 | refs/heads/main | 2023-06-11T05:44:11.902322 | 2021-06-20T19:11:28 | 2021-06-20T19:11:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,049 | py | frase = "O joão Foi andar ,de bicicleta."
frase_02 = 'A maria não gosta de bicicleta'
#seleção por posição (index)
print(frase[5])
#seleção por intervalo (index e quantidade de casas)
print(frase[2:6])
#seleção por intervalo (index , quantidade e passo)
print(frase[5:1:-1])
#substituição por low case (letra minúscula )
print(frase.lower())
#substituição por upper case (letra maiúscula)
print(frase.upper())
# função title substitui a primeira letra de toda palavra na istring por upper case(letra maiúscula)
print(frase.title())
# função captalize torna todas as letra da frase minúsculas além de tornar a primeira letra da frase em maiúsculo
print(frase.capitalize())
# swapcase inverte letras maiúsculas se tornam minúsculas vice e versa
print(frase.swapcase())
#ljust ajusta o valor da string para o tamanho definido
frase = frase.ljust(50)
frase += "!" #concatenação de strings (mesmo que frase = frase + "!")
print(frase)
#split separa todos os itens dentro de uma string em uma lista
print(frase.split())
#replace substituir um iten por outro dentro da string
print(frase.replace(",","."))
#strip remove todos caracteres definidos no inicio da string
print(frase.strip("O, "))
#center ajustar a string adicionando espaços necessários no inicio e no fim da string
print(frase.center(100))
#find procura e retorna a posição de um valor
print(frase.find("a"))
#startswith retorna true ou false em caso a string começar com um valor determinado
print(frase)
print(frase.startswith('O'))
#endswith retorna // // em caso de a string finalizar com um valor determinado
print(frase.endswith('!'))
#islower retorna true ou false em caso de a frase possuir apenas letras minusculas ou não
print(frase.lower().islower())
#isupper // letras maiúsculas ou não
print(frase.upper().isupper())
#type retorna tipo de variavel
print(type(frase))
print(type(1))
#count retorna quantidade de itens
print(frase.count('o'))
#len() retorna tamanho string
print(len(frase))
| [
"hikarofcarvalho@gmail.com"
] | hikarofcarvalho@gmail.com |
c4fc32732a38e01ea2ca2aacee54f77ae3b99850 | 2b791f6d4cf4b18fc8bcc2b6e3cb3a516c59077d | /test/examples/test_pysc2.py | 2aa8a6b259f0b00abc9a8180e1b2168cc65e29f9 | [
"MIT"
] | permissive | osu-xai/abp | 9c99c66de1d9c91da360d133900e29e7b85d45d2 | cd83eaa2810a1c5350c849303d61639576c0bb0d | refs/heads/master | 2021-06-11T16:01:30.210471 | 2020-11-30T23:05:50 | 2020-11-30T23:05:50 | 155,762,226 | 0 | 1 | MIT | 2020-11-30T23:05:52 | 2018-11-01T19:01:12 | Python | UTF-8 | Python | false | false | 584 | py | import sys
import unittest
class PySC2Tests(unittest.TestCase):
def test_pysc2_shards_dqn(self):
sys.argv = ['',
'--task', 'abp.examples.pysc2.collect_shards.dqn',
'--folder', 'test/tasks/pysc2_collect_shards_dqn']
from abp.trainer.task_runner import main
main()
def test_pysc2_shards_hra(self):
sys.argv = ['',
'--task', 'abp.examples.pysc2.collect_shards.hra',
'--folder', 'test/tasks/pysc2_collect_shards_hra']
from abp.trainer.task_runner import main
main()
| [
"nealla@lwneal.com"
] | nealla@lwneal.com |
7f60604b39fc937cd4de9b6d0a1569ae01f700be | 0f099ceacd4afabc92874dc9ab836c6baa4f0dbc | /mrbelvedereci/testresults/admin.py | 25b2c02d8a522a2454a6c730c4181ffebf878eed | [
"BSD-3-Clause"
] | permissive | davidjray/mrbelvedereci | 84d6c317a7d395602c421496d7dc6196f3b19258 | a5cfebdad2fafff648f705785df95739023f2af8 | refs/heads/master | 2020-05-18T14:22:48.037304 | 2017-02-25T00:40:59 | 2017-02-25T00:40:59 | 84,244,028 | 0 | 0 | null | 2017-03-07T20:40:57 | 2017-03-07T20:40:57 | null | UTF-8 | Python | false | false | 563 | py | from django.contrib import admin
from mrbelvedereci.testresults.models import TestResult
from mrbelvedereci.testresults.models import TestMethod
class TestResultAdmin(admin.ModelAdmin):
list_display = ('build_flow', 'method', 'duration', 'outcome')
list_filter = ('build_flow__build__repo', 'method', 'method__testclass')
admin.site.register(TestResult, TestResultAdmin)
class TestMethodAdmin(admin.ModelAdmin):
list_display = ('name', 'testclass')
list_filter = ('testclass__repo', 'testclass')
admin.site.register(TestMethod, TestMethodAdmin)
| [
"jlantz@salesforce.com"
] | jlantz@salesforce.com |
111f2a2d1b3b762a31d45f54fa8a37bcc757338e | 93713f46f16f1e29b725f263da164fed24ebf8a8 | /Library/lib/python3.7/site-packages/astropy-4.0-py3.7-macosx-10.9-x86_64.egg/astropy/timeseries/periodograms/lombscargle/implementations/tests/test_utils.py | a0bb22fd6950e860070e9bbb8178f5ff5be32784 | [
"BSD-3-Clause"
] | permissive | holzschu/Carnets | b83d15136d25db640cea023abb5c280b26a9620e | 1ad7ec05fb1e3676ac879585296c513c3ee50ef9 | refs/heads/master | 2023-02-20T12:05:14.980685 | 2023-02-13T15:59:23 | 2023-02-13T15:59:23 | 167,671,526 | 541 | 36 | BSD-3-Clause | 2022-11-29T03:08:22 | 2019-01-26T09:26:46 | Python | UTF-8 | Python | false | false | 2,191 | py |
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from astropy.timeseries.periodograms.lombscargle.implementations.utils import extirpolate, bitceil, trig_sum
@pytest.mark.parametrize('N', 2 ** np.arange(1, 12))
@pytest.mark.parametrize('offset', [-1, 0, 1])
def test_bitceil(N, offset):
assert_equal(bitceil(N + offset),
int(2 ** np.ceil(np.log2(N + offset))))
@pytest.fixture
def extirpolate_data():
rng = np.random.RandomState(0)
x = 100 * rng.rand(50)
y = np.sin(x)
f = lambda x: np.sin(x / 10)
return x, y, f
@pytest.mark.parametrize('N', [100, None])
@pytest.mark.parametrize('M', [5])
def test_extirpolate(N, M, extirpolate_data):
x, y, f = extirpolate_data
y_hat = extirpolate(x, y, N, M)
x_hat = np.arange(len(y_hat))
assert_allclose(np.dot(f(x), y), np.dot(f(x_hat), y_hat))
@pytest.fixture
def extirpolate_int_data():
rng = np.random.RandomState(0)
x = 100 * rng.rand(50)
x[:25] = x[:25].astype(int)
y = np.sin(x)
f = lambda x: np.sin(x / 10)
return x, y, f
@pytest.mark.parametrize('N', [100, None])
@pytest.mark.parametrize('M', [5])
def test_extirpolate_with_integers(N, M, extirpolate_int_data):
x, y, f = extirpolate_int_data
y_hat = extirpolate(x, y, N, M)
x_hat = np.arange(len(y_hat))
assert_allclose(np.dot(f(x), y), np.dot(f(x_hat), y_hat))
@pytest.fixture
def trig_sum_data():
rng = np.random.RandomState(0)
t = 10 * rng.rand(50)
h = np.sin(t)
return t, h
@pytest.mark.parametrize('f0', [0, 1])
@pytest.mark.parametrize('adjust_t', [True, False])
@pytest.mark.parametrize('freq_factor', [1, 2])
@pytest.mark.parametrize('df', [0.1])
def test_trig_sum(f0, adjust_t, freq_factor, df, trig_sum_data):
t, h = trig_sum_data
tfit = t - t.min() if adjust_t else t
S1, C1 = trig_sum(tfit, h, df, N=1000, use_fft=True,
f0=f0, freq_factor=freq_factor, oversampling=10)
S2, C2 = trig_sum(tfit, h, df, N=1000, use_fft=False,
f0=f0, freq_factor=freq_factor, oversampling=10)
assert_allclose(S1, S2, atol=1E-2)
assert_allclose(C1, C2, atol=1E-2)
| [
"nicolas.holzschuch@inria.fr"
] | nicolas.holzschuch@inria.fr |
d2da12a87c1b55b04a218cf8a15a54b23fea5b08 | 3d65a2d72e65083c752281368cf040ae977e4757 | /generate_empty_data_directory.py | 5537758e68a618c65078169d960cab67860cdd18 | [] | no_license | florisvb/OdorAnalysis | 6b4b2c32979b9139856aee20cc63c34cfe63819e | 18beae8d3c6be271f171b1c36c9fd932a8a404ba | refs/heads/master | 2020-06-03T14:48:34.962795 | 2012-10-23T22:28:21 | 2012-10-23T22:28:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | import sys, os
from optparse import OptionParser
def main(path, config):
os.mkdir(os.path.join(path, config.data_path))
os.mkdir(os.path.join(path, config.raw_datasets_path))
os.mkdir(os.path.join(path, config.culled_datasets_path))
os.mkdir(os.path.join(path, config.h5_path))
os.mkdir(os.path.join(path, config.tmp_data_path))
os.mkdir(os.path.join(path, config.odor_control_path))
os.mkdir(os.path.join(path, config.figure_path))
for fig in config.figures:
os.mkdir(os.path.join(path, config.figure_path, fig))
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("--path", type="str", dest="path", default='',
help="path to empty data folder, where you have a configuration file")
(options, args) = parser.parse_args()
path = options.path
sys.path.append(path)
import analysis_configuration
config = analysis_configuration.Config()
main(path, config)
| [
"florisvb@gmail.com"
] | florisvb@gmail.com |
a5721a4ecf125350389de8fc1870448f3186c310 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/containerservice/v20200101/list_managed_cluster_access_profile.py | 5a33bdd1c4b4c49b22b00ddb4f2856092048bf12 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 4,186 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ListManagedClusterAccessProfileResult',
'AwaitableListManagedClusterAccessProfileResult',
'list_managed_cluster_access_profile',
]
@pulumi.output_type
class ListManagedClusterAccessProfileResult:
"""
Managed cluster Access Profile.
"""
def __init__(__self__, kube_config=None, location=None, name=None, tags=None, type=None):
if kube_config and not isinstance(kube_config, str):
raise TypeError("Expected argument 'kube_config' to be a str")
pulumi.set(__self__, "kube_config", kube_config)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="kubeConfig")
def kube_config(self) -> Optional[str]:
"""
Base64-encoded Kubernetes configuration file.
"""
return pulumi.get(self, "kube_config")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
class AwaitableListManagedClusterAccessProfileResult(ListManagedClusterAccessProfileResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListManagedClusterAccessProfileResult(
kube_config=self.kube_config,
location=self.location,
name=self.name,
tags=self.tags,
type=self.type)
def list_managed_cluster_access_profile(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
role_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListManagedClusterAccessProfileResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: The name of the resource group.
:param str resource_name: The name of the managed cluster resource.
:param str role_name: The name of the role for managed cluster accessProfile resource.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
__args__['roleName'] = role_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:containerservice/v20200101:listManagedClusterAccessProfile', __args__, opts=opts, typ=ListManagedClusterAccessProfileResult).value
return AwaitableListManagedClusterAccessProfileResult(
kube_config=__ret__.kube_config,
location=__ret__.location,
name=__ret__.name,
tags=__ret__.tags,
type=__ret__.type)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
6649df6a8169655213193e4015de1facfed50fec | 7357d367b0af4650ccc5b783b7a59090fdde47bb | /models/research/attention_ocr/python/metrics.py | 83f67809c7f18bd28777139ee752b99c790da44b | [
"MIT"
] | permissive | BarracudaPff/code-golf-data-python | fb0cfc74d1777c4246d56a5db8525432bf37ab1a | 42e8858c2ebc6a061012bcadb167d29cebb85c5e | refs/heads/main | 2023-05-29T05:52:22.856551 | 2020-05-23T22:12:48 | 2020-05-23T22:12:48 | 378,832,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,742 | py | """Quality metrics for the model."""
import tensorflow as tf
def char_accuracy(predictions, targets, rej_char, streaming=False):
"""Computes character level accuracy.
Both predictions and targets should have the same shape
[batch_size x seq_length].
Args:
predictions: predicted characters ids.
targets: ground truth character ids.
rej_char: the character id used to mark an empty element (end of sequence).
streaming: if True, uses the streaming mean from the slim.metric module.
Returns:
a update_ops for execution and value tensor whose value on evaluation
returns the total character accuracy.
"""
with tf.variable_scope("CharAccuracy"):
predictions.get_shape().assert_is_compatible_with(targets.get_shape())
targets = tf.to_int32(targets)
const_rej_char = tf.constant(rej_char, shape=targets.get_shape())
weights = tf.to_float(tf.not_equal(targets, const_rej_char))
correct_chars = tf.to_float(tf.equal(predictions, targets))
accuracy_per_example = tf.div(tf.reduce_sum(tf.multiply(correct_chars, weights), 1), tf.reduce_sum(weights, 1))
if streaming:
return tf.contrib.metrics.streaming_mean(accuracy_per_example)
else:
return tf.reduce_mean(accuracy_per_example)
def sequence_accuracy(predictions, targets, rej_char, streaming=False):
"""Computes sequence level accuracy.
Both input tensors should have the same shape: [batch_size x seq_length].
Args:
predictions: predicted character classes.
targets: ground truth character classes.
rej_char: the character id used to mark empty element (end of sequence).
streaming: if True, uses the streaming mean from the slim.metric module.
Returns:
a update_ops for execution and value tensor whose value on evaluation
returns the total sequence accuracy.
"""
with tf.variable_scope("SequenceAccuracy"):
predictions.get_shape().assert_is_compatible_with(targets.get_shape())
targets = tf.to_int32(targets)
const_rej_char = tf.constant(rej_char, shape=targets.get_shape(), dtype=tf.int32)
include_mask = tf.not_equal(targets, const_rej_char)
include_predictions = tf.to_int32(tf.where(include_mask, predictions, tf.zeros_like(predictions) + rej_char))
correct_chars = tf.to_float(tf.equal(include_predictions, targets))
correct_chars_counts = tf.cast(tf.reduce_sum(correct_chars, reduction_indices=[1]), dtype=tf.int32)
target_length = targets.get_shape().dims[1].value
target_chars_counts = tf.constant(target_length, shape=correct_chars_counts.get_shape())
accuracy_per_example = tf.to_float(tf.equal(correct_chars_counts, target_chars_counts))
if streaming:
return tf.contrib.metrics.streaming_mean(accuracy_per_example)
else:
return tf.reduce_mean(accuracy_per_example) | [
"sokolov.yas@gmail.com"
] | sokolov.yas@gmail.com |
89b7174617d0b48116861a28f2447630f2a8f87e | 9e1b884b94a0570e0c0781a7f7023e8ec482b5b4 | /codes/T20.py | 3eed41f3258b1e41601825e791c60a4addaa61a0 | [] | no_license | sunjunee/offer_book_python_codes | 568579434d82a7231074e41c67476c3ab8b9f181 | ecc852a5d38c8a02b9c2d0473065579363035f83 | refs/heads/master | 2020-03-12T09:49:40.587404 | 2018-06-10T13:55:24 | 2018-06-10T13:55:24 | 130,560,256 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py | # -*- coding: utf-8 -*-
"""
@ Author: Jun Sun {Python3}
@ E-mail: sunjunee@qq.com
@ Date: 2018-04-22 15:03:02
"""
#表示数值的字符串:实现一个函数来判断一个字符串是否表示数值
#如+100, 5e2, -123, 3.1415, -1e-16
#{+-}{digits}.{digits}[eE]{+-}{digits}
def judgeString(string):
pA = pB = pC = True
index, pA = getInteger(string, 0)
if(index <= len(string) - 1 and string[index] == "."):
index, pB = getUsignInteger(string, index + 1)
if(index <= len(string) - 1 and (string[index] == "e" or string[index] == "E")):
index, pC = getInteger(string, index + 1)
if(pA and pB and pC and index == len(string)):
return True
return False
def getUsignInteger(string, index):
p = index
pA = False
while(p <= len(string) - 1 and string[p] >= '0' and string[p] <= '9'):
p += 1
if(p > index):
pA = True
return p, pA
def getInteger(string, index):
if(string[index] == '-' or string[index] == "+"):
index += 1
return getUsignInteger(string, index)
if __name__ == "__main__":
testCase = ["+100", "5e2", "-123", "3.145678", "-12.56e+23",
"-0.13e.w", ".e2", "+", "-.23", "5e0.2"]
print(list(map(judgeString, testCase))) | [
"sunjunee@qq.com"
] | sunjunee@qq.com |
f8b441fb5a799a8054e3d99e1f9a9577ca133ded | bd87d8947878ccb2f5b720e70a22493b00868fd3 | /fluent/11_interfaces/monkey_patching.py | 376ef7d9da46e44a141c6e9d89cca318520fdec9 | [] | no_license | damiansp/completePython | 4cbf12ef682a1d4a5498f77e407dc02e44a7d7ac | 3f5e2f14d79c93df5147b82d901190c054535158 | refs/heads/master | 2023-09-01T20:50:03.444440 | 2023-08-28T00:27:57 | 2023-08-28T00:27:57 | 99,197,610 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | import collections
from random import shuffle
Card = collections.namedtuple('Card', ['rank', 'suit'])
class FrenchDeck:
ranks = [str(n) for n in range(2, 11)] + list('JQKA')
suits = 'spades diamonds clubs hearts'.split()
def __init__(self):
self._cards = [Card(rank, suit) for suit in self.suits
for rank in self.ranks]
def __len__(self):
return len(self._cards)
def __getitem__(self, position):
return self._cards[position]
deck = FrenchDeck()
print(deck[:5])
# shuffle(deck) # error: FrenchDeck does not support item assignment
def set_card(deck, position, card):
deck._cards[position] = card
FrenchDeck.__setitem__ = set_card
shuffle(deck)
print(deck[:5])
| [
"damiansp@gmail.com"
] | damiansp@gmail.com |
99b46dc88fa2141bbc84b499ca4f5cd6a537b7f9 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02576/s456985906.py | 8c3a47e877dfd91f6009c8645fef18b303b19a4f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | import sys
read = sys.stdin.read
#readlines = sys.stdin.readlines
from math import ceil
def main():
n, x, t = map(int, input().split())
print(ceil(n / x) * t)
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
cda854759b81092d95dc44d05842714eb9b494ba | e483b0515cca39f4ddac19645f03fc1695d1939f | /google/ads/google_ads/v1/proto/enums/product_bidding_category_status_pb2.py | 077aaca8546340153b855c91a6e7ebd3e2e0b8bb | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | BrunoWMello/google-ads-python | 0af63d2ca273eee96efd8a33252d27112c049442 | 9b074a037d10f0c1208a00d5d41a8e5e25405f28 | refs/heads/master | 2020-05-27T04:37:47.669144 | 2019-05-24T17:07:31 | 2019-05-24T17:07:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 4,249 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v1/proto/enums/product_bidding_category_status.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v1/proto/enums/product_bidding_category_status.proto',
package='google.ads.googleads.v1.enums',
syntax='proto3',
serialized_options=_b('\n!com.google.ads.googleads.v1.enumsB!ProductBiddingCategoryStatusProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v1/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V1.Enums\312\002\035Google\\Ads\\GoogleAds\\V1\\Enums\352\002!Google::Ads::GoogleAds::V1::Enums'),
serialized_pb=_b('\nIgoogle/ads/googleads_v1/proto/enums/product_bidding_category_status.proto\x12\x1dgoogle.ads.googleads.v1.enums\x1a\x1cgoogle/api/annotations.proto\"z\n ProductBiddingCategoryStatusEnum\"V\n\x1cProductBiddingCategoryStatus\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\n\n\x06\x41\x43TIVE\x10\x02\x12\x0c\n\x08OBSOLETE\x10\x03\x42\xf6\x01\n!com.google.ads.googleads.v1.enumsB!ProductBiddingCategoryStatusProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v1/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V1.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V1\\Enums\xea\x02!Google::Ads::GoogleAds::V1::Enumsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_PRODUCTBIDDINGCATEGORYSTATUSENUM_PRODUCTBIDDINGCATEGORYSTATUS = _descriptor.EnumDescriptor(
name='ProductBiddingCategoryStatus',
full_name='google.ads.googleads.v1.enums.ProductBiddingCategoryStatusEnum.ProductBiddingCategoryStatus',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVE', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OBSOLETE', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=174,
serialized_end=260,
)
_sym_db.RegisterEnumDescriptor(_PRODUCTBIDDINGCATEGORYSTATUSENUM_PRODUCTBIDDINGCATEGORYSTATUS)
_PRODUCTBIDDINGCATEGORYSTATUSENUM = _descriptor.Descriptor(
name='ProductBiddingCategoryStatusEnum',
full_name='google.ads.googleads.v1.enums.ProductBiddingCategoryStatusEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_PRODUCTBIDDINGCATEGORYSTATUSENUM_PRODUCTBIDDINGCATEGORYSTATUS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=138,
serialized_end=260,
)
_PRODUCTBIDDINGCATEGORYSTATUSENUM_PRODUCTBIDDINGCATEGORYSTATUS.containing_type = _PRODUCTBIDDINGCATEGORYSTATUSENUM
DESCRIPTOR.message_types_by_name['ProductBiddingCategoryStatusEnum'] = _PRODUCTBIDDINGCATEGORYSTATUSENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ProductBiddingCategoryStatusEnum = _reflection.GeneratedProtocolMessageType('ProductBiddingCategoryStatusEnum', (_message.Message,), dict(
DESCRIPTOR = _PRODUCTBIDDINGCATEGORYSTATUSENUM,
__module__ = 'google.ads.googleads_v1.proto.enums.product_bidding_category_status_pb2'
,
__doc__ = """Status of the product bidding category.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.enums.ProductBiddingCategoryStatusEnum)
))
_sym_db.RegisterMessage(ProductBiddingCategoryStatusEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"noreply@github.com"
] | BrunoWMello.noreply@github.com |
faf060d7b38be5fea1712230d9bbb6f91e45b7f9 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_noisy3275.py | b7544f2ecf5b480e932e0f8dbee5116b5707548d | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,327 | py | # qubit number=4
# total number=43
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=31
prog.cz(input_qubit[0],input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=33
prog.x(input_qubit[3]) # number=29
prog.cx(input_qubit[0],input_qubit[3]) # number=30
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[0],input_qubit[2]) # number=37
prog.x(input_qubit[2]) # number=38
prog.h(input_qubit[2]) # number=40
prog.cz(input_qubit[0],input_qubit[2]) # number=41
prog.h(input_qubit[2]) # number=42
prog.y(input_qubit[1]) # number=19
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.y(input_qubit[3]) # number=20
prog.y(input_qubit[1]) # number=12
prog.rx(-2.158274153016188,input_qubit[3]) # number=24
prog.h(input_qubit[0]) # number=16
prog.cz(input_qubit[2],input_qubit[0]) # number=17
prog.h(input_qubit[0]) # number=18
prog.cx(input_qubit[1],input_qubit[0]) # number=21
prog.z(input_qubit[1]) # number=22
prog.cx(input_qubit[1],input_qubit[0]) # number=23
prog.h(input_qubit[0]) # number=25
prog.cz(input_qubit[2],input_qubit[0]) # number=26
prog.h(input_qubit[0]) # number=27
prog.x(input_qubit[0]) # number=35
prog.x(input_qubit[0]) # number=36
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy3275.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
2405333f527484bf17e95f0a345b48a18152227e | 2ade3aada45862b7cb2cced9f3402b3738266d15 | /PhysiCell-model-builder/bin/substrates.py | eaf5acd78c4e1fcb8cd36c52daeec546d444e5b7 | [
"BSD-3-Clause"
] | permissive | willisdc/PhysiCell_Studio | 70c7a6c7ce65691cb9d46644bbdf7f6b79f8a926 | 7fb7ca6277a353947a446782499880ffcd5889ad | refs/heads/main | 2023-09-02T23:36:57.405936 | 2021-11-12T20:40:49 | 2021-11-12T20:40:49 | 422,694,252 | 0 | 0 | null | 2021-10-29T19:41:18 | 2021-10-29T19:41:17 | null | UTF-8 | Python | false | false | 84,822 | py | # substrates.py - code for the 'Out: Plots' tab of the GUI.
#
# Contains visualization for: Cells and substrates, possibly overlaid (on the left); Extra analysis (2D line plots; on the right)
#
# Author: Randy Heiland, with contributions from many students and collaborators
#
import os, math
from pathlib import Path
from ipywidgets import Layout, Label, Text, Checkbox, Button, BoundedIntText, HBox, VBox, Box, \
FloatText, Dropdown, SelectMultiple, RadioButtons, interactive
import matplotlib.pyplot as plt
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
from matplotlib.collections import LineCollection
from matplotlib.patches import Circle, Ellipse, Rectangle
from matplotlib.collections import PatchCollection
import matplotlib.colors as mplc
from matplotlib import gridspec
from collections import deque
from pyMCDS import pyMCDS
import numpy as np
import scipy.io
import xml.etree.ElementTree as ET # https://docs.python.org/2/library/xml.etree.elementtree.html
import glob
import platform
import zipfile
from debug import debug_view
import warnings
import traceback
import sys
hublib_flag = True
if platform.system() != 'Windows':
try:
# print("Trying to import hublib.ui")
from hublib.ui import Download
except:
hublib_flag = False
else:
hublib_flag = False
#warnings.warn(message, mplDeprecation, stacklevel=1)
warnings.filterwarnings("ignore")
class SubstrateTab(object):
def __init__(self):
self.tb_count = 0
self.output_dir = '.'
# self.output_dir = 'tmpdir'
# These are recomputed below
# basic_length = 12.5
basic_length = 12.0
self.figsize_width_substrate = 15.0 # allow extra for colormap
self.figsize_height_substrate = basic_length
self.figsize_width_2Dplot = basic_length
self.figsize_height_2Dplot = basic_length
# self.width_substrate = basic_length # allow extra for colormap
# self.height_substrate = basic_length
self.figsize_width_svg = basic_length
self.figsize_height_svg = basic_length
# self.width_svg = basic_length
# self.height_svg = basic_length
self.figsize_width_substrate = 15.0 # allow extra for colormap
self.figsize_height_substrate = 12.0
self.figsize_width_svg = 12.0
self.figsize_height_svg = 12.0
self.axis_label_fontsize = 15
self.ax0 = None
self.ax1 = None
self.ax1_lymph_TC = None
self.ax1_lymph_TH2 = None
self.updated_analysis_flag = True
self.analysis_data_plotted = False
# self.analysis_data_set1 = False # live, infected, dead
# self.analysis_data_set2 = False # Mac, Neut, CD8,
# self.analysis_data_set3 = False
# self.analysis_data_set4 = False
# colors of plots for extra analysis (line ~1100)
self.mac_color = 'lime'
self.neut_color = 'cyan'
self.cd8_color = 'red'
self.dc_color = 'fuchsia'
self.cd4_color = 'orange'
self.fib_color = 'blueviolet'
self.lymph_DC_color = 'black'
self.lymph_TC_color = 'red'
self.viral_load_color = 'black'
self.Ig_total_color = 'black'
# self.fig = plt.figure(figsize=(7.2,6)) # this strange figsize results in a ~square contour plot
self.first_time = True
self.modulo = 1
self.use_defaults = True
self.svg_delta_t = 1
self.substrate_delta_t = 1
self.svg_frame = 1
self.substrate_frame = 1
self.customized_output_freq = False
self.therapy_activation_time = 1000000
self.max_svg_frame_pre_therapy = 1000000
self.max_substrate_frame_pre_therapy = 1000000
self.svg_xmin = 0
# Probably don't want to hardwire these if we allow changing the domain size
# self.svg_xrange = 2000
# self.xmin = -1000.
# self.xmax = 1000.
# self.ymin = -1000.
# self.ymax = 1000.
# self.x_range = 2000.
# self.y_range = 2000.
self.show_nucleus = False
self.show_edge = True
self.alpha = 1.0 # 0.7 is partially transparent (all cells)
substrates_default_disabled_flag = True # True = disable them by default; False=enable them
# initial value
self.field_index = 4
# self.field_index = self.substrate_choice.value + 4
self.skip_cb = True
# define dummy size of mesh (set in the tool's primary module)
self.numx = 0
self.numy = 0
# ------- custom data for cells ----------
self.xval = np.empty([1])
# print('sub, init: len(self.xval) = ',len(self.xval))
self.yval1 = np.empty([1]) # live, infected, dead
self.yval2 = np.empty([1])
self.yval3 = np.empty([1])
self.yval4 = np.empty([1]) # Mac,Neut,CD8,DC,CD4,Fib
self.yval5 = np.empty([1])
self.yval6 = np.empty([1])
self.yval7 = np.empty([1])
self.yval8 = np.empty([1])
self.yval9 = np.empty([1])
self.yval10 = np.empty([1]) # viral load
self.yval11 = np.empty([1]) # lymph node dynamics: DC,TC,TH1,TH2
self.yval12 = np.empty([1])
self.yval13 = np.empty([1])
self.yval14 = np.empty([1])
self.yval15 = np.empty([1]) # Ig total
self.tname = "time"
self.yname = 'Y'
# self.num_2D_plots = 1
self.title_str = ''
tab_height = '600px'
tab_height = '500px'
constWidth = '180px'
constWidth2 = '150px'
tab_layout = Layout(width='900px', # border='2px solid black',
height=tab_height, ) #overflow_y='scroll')
max_frames = 1
# NOTE: The "interactive" widget contains the plot(s). Whenever any plot needs to be updated,
# its "update" method needs to be invoked. So, if you notice multiple, flashing
# plot updates occuring, you can search for all instances of "self.i_plot.update()" and do
# a debug print to see where they occur.
# self.mcds_plot = interactive(self.plot_substrate, frame=(0, max_frames), continuous_update=False)
# self.i_plot = interactive(self.plot_plots, frame=(0, max_frames), continuous_update=False)
self.i_plot = interactive(self.plot_substrate, frame=(0, max_frames), continuous_update=False)
# "plot_size" controls the size of the tab height, not the plot (rf. figsize for that)
# NOTE: the Substrates Plot tab has an extra row of widgets at the top of it (cf. Cell Plots tab)
svg_plot_size = '700px'
svg_plot_size = '900px'
plot_area_width = '1500px'
plot_area_height = '900px'
self.i_plot.layout.width = plot_area_width
self.i_plot.layout.height = plot_area_height
self.fontsize = 20
# self.fontsize = 30
#============ new GUI =================
self.max_frames = BoundedIntText(value=0,description='# cell frames',min=0,max=999999,layout=Layout(width='160px')) # border='1px solid black',
self.cells_toggle = Checkbox(description='Cells',value=True, style = {'description_width': 'initial'}, layout=Layout(width='110px', )) #border='1px solid black'))
self.cell_edges_toggle = Checkbox(description='edge',value=self.show_edge, style = {'description_width': 'initial'}, layout=Layout(width='110px',)) # align_items='stretch',
layout1 = Layout(display='flex',
flex_flow='row',
align_items='center',
width='25%', ) #border='1px solid black')
hb1=HBox([self.cells_toggle,self.cell_edges_toggle ]) # layout=layout1)
# cells_vbox=VBox([self.max_frames, hb1], layout=Layout(width='350px',border='1px solid black',))
cells_vbox=VBox([self.max_frames, hb1], layout=Layout(width='320px'))
#--------------------------
self.substrates_toggle = Checkbox(description='Substrates', style = {'description_width': 'initial'})
# self.field_min_max = {'assembled_virion':[0.,1.,False] }
# hacky I know, but make a dict that's got (key,value) reversed from the dict in the Dropdown below
# ipywidgets 8 docs: Selection widgets no longer accept a dictionary of options. Pass a list of key-value pairs instead.
self.field_dict = {0:'director signal', 1:'cargo signal'}
# self.substrate_choice = Dropdown(options={'assembled_virion': 0},layout=Layout(width='150px'))
# options will be replaced below, based on initial.xml
self.substrate_choice = Dropdown(
options={'director signal': 0, 'cargo signal':1},
value=0,
disabled = substrates_default_disabled_flag,
# description='Field',
layout=Layout(width='150px')
)
self.colormap_dd = Dropdown(options=['viridis', 'jet', 'YlOrRd'],value='YlOrRd',layout=Layout(width='200px'))
# self.colormap_dd.observe(self.mcds_field_cb)
self.colormap_dd.observe(self.substrate_field_cb)
hb2 = HBox([self.substrates_toggle,self.substrate_choice,self.colormap_dd], layout=Layout(width='350px', )) # border='1px solid black',))
self.colormap_fixed_toggle = Checkbox(description='Fix',style = {'description_width': 'initial'}, layout=Layout(width='60px'))
constWidth2 = '160px'
self.colormap_min = FloatText(
description='Min',
value=0,
step = 0.1,
layout=Layout(width=constWidth2),)
self.colormap_max = FloatText(
description='Max',
value=38,
step = 0.1,
layout=Layout(width=constWidth2),)
# hb3=HBox([colormap_fixed_toggle,colormap_min,colormap_max], layout=Layout(width='500px',justify_content='flex-start'))
hb3=HBox([self.colormap_fixed_toggle,self.colormap_min,self.colormap_max], layout=Layout(justify_content='flex-start')) # border='1px solid black',
substrate_vbox=VBox([hb2, hb3], layout=Layout(width='500px'))
#--------------------------
# analysis_label = Label(value='--Extra analysis--')
self.analysis_data_toggle = Checkbox(
description='Extra analysis',
disabled=False,
style = {'description_width': 'initial'},
layout=Layout(width='130px', ) # border='1px solid black',)
# layout=Layout(width=constWidth2),
)
self.analysis_data_update_button = Button(
description='Update',
disabled=True,
style = {'description_width': 'initial'},
layout=Layout(width='130px', ) # border='1px solid black',)
# layout=Layout(width=constWidth2),
)
self.analysis_data_update_button.style.button_color = 'lightgreen'
#--------------
# self.analysis_data_update_button= Button(
# description='Update',
# disabled=True,)
# # layout=Layout(width='120px',) ,style = {'description_width': 'initial'})
# self.analysis_data_update_button.style.button_color = 'lightgreen'
# analysis_data_vbox1 = VBox([self.analysis_data_toggle, ], layout=Layout(justify_content='center')) # ,border='1px solid black', width='330px',
# analysis_data_vbox1 = VBox([analysis_label, self.analysis_data_update_button, ], layout=Layout(justify_content='center')) # ,border='1px solid black', width='330px',
analysis_data_vbox1 = VBox([self.analysis_data_toggle, self.analysis_data_update_button], layout=Layout(justify_content='center')) # ,border='1px solid black',)) # width='330px',
# self.analysis_data_choice = SelectMultiple(
# # options=['assembled_virion','susceptible','infected', 'dead'],
# options=['live','infected', 'dead'],
# disabled=True,
# value=['live'],
# rows=3,
# layout=Layout(width='160px', ) )
self.analysis_data_choice = Dropdown(
options={'live,infected,dead':0, 'Mac,Neut,CD8,DC,CD4,Fib':1, 'viral load':2, 'antibody load':3, 'lymph:DC,TC':4, 'lymph:Th1,Th2':5},
# options=['live,infected,dead', 'Mac,Neut,CD8,DC,CD4,Fib', 'viral load', 'lymph:DC,TC', 'lymph:Th1,Th2'],
value=0,
disabled = True,
# description='Field',
# layout=Layout(width='150px')
)
self.analysis_data_choice_y = {0:[False,0.,1.], 1:[False,0.,1.], 2:[False,0.,1.], 3:[False,0.,1.], 4:[False,0.,1.], 5:[False,0.,1.]}
# self.analysis_data_choice = RadioButtons(
# options=['live,infected,dead', 'Mac,Neut,CD8,DC,CD4,Fib', 'viral load', 'lymph node dynamics'],
# value='live,infected,dead',
# # layout={'width': 'max-content'}, # If the items' names are long
# disabled=True
# )
# called when a user selects another choice in the custom data radio buttons
def analysis_data_choice_cb(b):
idx = self.analysis_data_choice.value
if idx > 2:
self.fixed_yrange.disabled = True
self.y_min.disabled = True
self.y_max.disabled = True
else:
self.fixed_yrange.disabled = False
self.y_min.disabled = False
self.y_max.disabled = False
self.fixed_yrange.value = self.analysis_data_choice_y[idx][0]
self.y_min.value = self.analysis_data_choice_y[idx][1]
# print('idx,y_min=',idx,self.y_min.value)
self.y_max.value = self.analysis_data_choice_y[idx][2]
# self.update_analysis_data()
self.i_plot.update()
self.analysis_data_choice.observe(analysis_data_choice_cb)
self.analysis_data_wait = Label('',color = 'red')
# self.analysis_data_wait = Label('Will be available after simulation completes.')
# self.analysis_data_wait = Label('Wait for 1st time processing...')
def analysis_data_toggle_cb(b):
# self.update()
if (self.analysis_data_toggle.value):
self.analysis_data_wait.value = 'Press Update to analyze available data.'
self.analysis_data_choice.disabled = False
self.analysis_data_update_button.disabled = False
# self.update_analysis_data()
else:
self.analysis_data_wait.value = ''
self.analysis_data_choice.disabled = True
self.analysis_data_update_button.disabled = True
self.i_plot.update()
# self.analysis_data_wait.value = ''
self.analysis_data_toggle.observe(analysis_data_toggle_cb)
def analysis_data_update_cb(b):
# self.update()
self.analysis_data_wait.value = 'Updating analysis data...'
self.update_analysis_data()
self.i_plot.update()
self.analysis_data_wait.value = ''
self.analysis_data_update_button.on_click(analysis_data_update_cb)
#----------------------------
self.fixed_yrange = Checkbox(description='Fix',style = {'description_width': 'initial'}, layout=Layout(width='60px'))
constWidth3 = '120px'
self.y_min = FloatText(
description='Ymin',
value=0,
step = 1.0,
style = {'description_width': 'initial'},
layout=Layout(width=constWidth3),)
self.y_max = FloatText(
description='Ymax',
value=1.0,
step = 1.0,
style = {'description_width': 'initial'},
layout=Layout(width=constWidth3),)
def fixed_yrange_cb(b):
idx = self.analysis_data_choice.value
self.analysis_data_choice_y[idx][0] = self.fixed_yrange.value
if self.fixed_yrange.value:
self.y_min.disabled = False
self.y_max.disabled = False
else:
self.y_min.disabled = True
self.y_max.disabled = True
# self.fixed_yrange.observe(fixed_yrange_cb)
def analysis_yrange_cb(b):
idx = self.analysis_data_choice.value
self.analysis_data_choice_y[idx][1] = self.y_min.value
self.analysis_data_choice_y[idx][2] = self.y_max.value
print('dict=',self.analysis_data_choice_y)
# self.y_min.observe(analysis_yrange_cb)
# self.y_max.observe(analysis_yrange_cb)
# hb3=HBox([colormap_fixed_toggle,colormap_min,colormap_max], layout=Layout(width='500px',justify_content='flex-start'))
y_range_box = HBox([self.fixed_yrange,self.y_min,self.y_max]) # layout=Layout(justify_content='flex-start')) # border='1px solid black',
#gui=HBox([cells_vbox, substrate_vbox, analysis_data_hbox], justify_content='center') # vs. 'flex-start , layout=Layout(width='900px'))
#==========================================================================
# ------- "observe" functionality (callbacks)
self.max_frames.observe(self.update_max_frames)
# self.field_min_max = {'dummy': [0., 1., False]}
# NOTE: manually setting these for now (vs. parsing them out of data/initial.xml)
# print("substrate __init__: self.substrate_choice.value=",self.substrate_choice.value)
# self.substrate_choice.observe(self.mcds_field_cb)
# self.substrate_choice.observe(self.mcds_field_changed_cb)
self.substrate_choice.observe(self.substrate_field_changed_cb)
# self.field_colormap = Dropdown(
# options=['viridis', 'jet', 'YlOrRd'],
# value='YlOrRd',
# # description='Field',
# layout=Layout(width=constWidth)
# )
# rwh2
# self.field_cmap.observe(self.plot_substrate)
# self.field_colormap.observe(self.substrate_field_cb)
self.colormap_min.observe(self.substrate_field_cb)
self.colormap_max.observe(self.substrate_field_cb)
# self.cmap_fixed_toggle = Checkbox(
# description='Fix',
# disabled=False,
# # layout=Layout(width=constWidth2),
# )
# self.colormap_fixed_toggle.observe(self.mcds_field_cb)
# self.cmap_min = FloatText(
# description='Min',
# value=0,
# step = 0.1,
# disabled=True,
# layout=Layout(width=constWidth2),
# )
# self.cmap_max = FloatText(
# description='Max',
# value=38,
# step = 0.1,
# disabled=True,
# layout=Layout(width=constWidth2),
# )
def colormap_fixed_toggle_cb(b):
field_name = self.field_dict[self.substrate_choice.value]
# print(self.cmap_fixed_toggle.value)
if (self.colormap_fixed_toggle.value): # toggle on fixed range
self.colormap_min.disabled = False
self.colormap_max.disabled = False
self.field_min_max[field_name][0] = self.colormap_min.value
self.field_min_max[field_name][1] = self.colormap_max.value
self.field_min_max[field_name][2] = True
# self.save_min_max.disabled = False
else: # toggle off fixed range
self.colormap_min.disabled = True
self.colormap_max.disabled = True
self.field_min_max[field_name][2] = False
# self.save_min_max.disabled = True
# self.mcds_field_cb()
if not self.skip_cb:
# print("colormap_fixed_toggle_cb(): i_plot.update")
self.i_plot.update()
# self.colormap_fixed_toggle.observe(colormap_fixed_toggle_cb)
self.colormap_fixed_toggle.observe(self.substrate_field_cb)
def cell_edges_toggle_cb(b):
# self.update()
if (self.cell_edges_toggle.value):
self.show_edge = True
else:
self.show_edge = False
# print("cell_edges_toggle_cb(): i_plot.update")
self.i_plot.update()
self.cell_edges_toggle.observe(cell_edges_toggle_cb)
def cells_toggle_cb(b):
# self.update()
self.skip_cb = True
if self.cells_toggle.value:
self.cell_edges_toggle.disabled = False
# self.cell_nucleus_toggle.disabled = False
else:
self.cell_edges_toggle.disabled = True
# self.cell_nucleus_toggle.disabled = True
self.skip_cb = False
# print("cells_toggle_cb(): i_plot.update")
self.i_plot.update()
self.cells_toggle.observe(cells_toggle_cb)
def substrates_toggle_cb(b):
self.skip_cb = True
if self.substrates_toggle.value: # seems bass-ackwards, but makes sense
self.colormap_fixed_toggle.disabled = False
self.colormap_min.disabled = False
self.colormap_max.disabled = False
self.substrate_choice.disabled = False
# self.field_colormap.disabled = False
self.colormap_dd.disabled = False
else:
self.colormap_fixed_toggle.disabled = True
self.colormap_min.disabled = True
self.colormap_max.disabled = True
self.substrate_choice.disabled = True
# self.field_colormap.disabled = True
self.colormap_dd.disabled = True
self.skip_cb = False
# print("substrates_toggle_cb: i_plot.update")
self.i_plot.update()
self.substrates_toggle.observe(substrates_toggle_cb)
#---------------------
# def analysis_data_toggle_cb(b):
# # print("analysis_data_toggle_cb()")
# self.skip_cb = True
# if (self.analysis_data_toggle.value): # seems bass-ackwards
# self.analysis_data_choice.disabled = False
# self.analysis_data_update_button.disabled = False
# else:
# self.analysis_data_choice.disabled = True
# self.analysis_data_update_button.disabled = True
# self.skip_cb = False
# # print("analysis_data_toggle_cb(): i_plot.update")
# self.i_plot.update()
# self.analysis_data_toggle.observe(analysis_data_toggle_cb)
# self.analysis_data_update_button.on_click(self.update_analysis_data)
#---------------------
help_label = Label('select slider: drag or left/right arrows')
# analysis_data_hbox = HBox([analysis_data_vbox1, VBox([self.analysis_data_choice, y_range_box, self.analysis_data_wait]), ])
analysis_data_hbox = HBox([analysis_data_vbox1, VBox([self.analysis_data_choice, self.analysis_data_wait]), ])
controls_box = HBox([cells_vbox, substrate_vbox, analysis_data_hbox], justify_content='center') # vs. 'flex-start , layout=Layout(width='900px'))
if (hublib_flag):
self.download_button = Download('mcds.zip', style='warning', icon='cloud-download',
tooltip='Download MCDS data', cb=self.download_cb)
self.download_svg_button = Download('svg.zip', style='warning', icon='cloud-download',
tooltip='Download cells SVG', cb=self.download_svg_cb)
# config_file = Path(os.path.join(self.output_dir, 'config.xml'))
# config_file = self.output_dir + '/config.xml'
self.download_config_button = Download('config.zip', style='warning', icon='cloud-download',
tooltip='Download the config params', cb=self.download_config_cb)
download_row = HBox([self.download_button.w, self.download_svg_button.w, self.download_config_button.w, Label("Download data (browser must allow pop-ups).")])
# box_layout = Layout(border='0px solid')
# controls_box = VBox([row1, row2]) # ,width='50%', layout=box_layout)
# controls_box = HBox([cells_vbox, substrate_vbox, analysis_data_hbox], justify_content='center') # vs. 'flex-start , layout=Layout(width='900px'))
self.tab = VBox([controls_box, self.i_plot, download_row, debug_view])
else:
# self.tab = VBox([row1, row2])
# self.tab = VBox([row1, row2, self.i_plot])
self.tab = VBox([controls_box, self.i_plot])
#---------------------------------------------------
def reset_analysis_data_plotting(self, bool_val):
# self.analysis_data_toggle.disabled = bool_val
self.analysis_data_plotted = False
if (bool_val == True):
self.analysis_data_toggle.value = False
self.xval = np.empty([1])
self.yval1 = np.empty([1])
self.yval2 = np.empty([1])
self.yval3 = np.empty([1])
self.yval4 = np.empty([1])
self.yval5 = np.empty([1])
self.yval6 = np.empty([1])
self.yval7 = np.empty([1])
self.yval8 = np.empty([1])
self.yval9 = np.empty([1])
self.yval10 = np.empty([1])
self.yval11 = np.empty([1])
self.yval12 = np.empty([1])
self.yval13 = np.empty([1])
self.yval14 = np.empty([1])
self.yval15 = np.empty([1])
# No longer used(?) since the manual 'Update' of analyses
# self.analysis_data_set1 = False # live, infected, dead
# self.analysis_data_set2 = False # Mac, Neut, CD8, etc
# self.analysis_data_set3 = False # viral load
# self.analysis_data_set4 = False # lymph node dynamics
self.analysis_data_toggle.value = False
self.analysis_data_choice.disabled = bool_val
self.analysis_data_update_button.disabled = bool_val
#---------------------------------------------------
def update_dropdown_fields(self, data_dir):
self.output_dir = data_dir
# print('!! update_dropdown_fields() called: self.output_dir = ', self.output_dir)
tree = None
try:
fname = os.path.join(self.output_dir, "initial.xml")
tree = ET.parse(fname)
xml_root = tree.getroot()
except:
print("Cannot open ",fname," to read info, e.g., names of substrate fields.")
return
# No longer used since 'Update' button (I think)
# self.analysis_data_set1 = False # live, infected, dead
# self.analysis_data_set2 = False # mac, neut, cd8
# self.analysis_data_set3 = False # viral load
# self.analysis_data_set4 = False # lymph node dynamics
xml_root = tree.getroot()
self.field_min_max = {}
self.field_dict = {}
dropdown_options = {}
uep = xml_root.find('.//variables')
comment_str = ""
field_idx = 0
if (uep):
for elm in uep.findall('variable'):
# print("-----> ",elm.attrib['name'])
field_name = elm.attrib['name']
if ('assembled' not in field_name):
self.field_min_max[field_name] = [0., 1., False]
self.field_dict[field_idx] = field_name
dropdown_options[field_name] = field_idx
self.field_min_max[field_name][0] = 0
self.field_min_max[field_name][1] = 1
# self.field_min_max[field_name][0] = field_idx #rwh: helps debug
# self.field_min_max[field_name][1] = field_idx+1
self.field_min_max[field_name][2] = False
field_idx += 1
# constWidth = '180px'
# print('options=',dropdown_options)
# print(self.field_min_max) # debug
self.substrate_choice.value = 0
self.substrate_choice.options = dropdown_options
# print("----- update_dropdown_fields(): self.field_dict= ", self.field_dict)
# self.mcds_field = Dropdown(
# # options={'oxygen': 0, 'glucose': 1},
# options=dropdown_options,
# value=0,
# # description='Field',
# layout=Layout(width=constWidth)
# )
# def update_max_frames_expected(self, value): # called when beginning an interactive Run
# self.max_frames.value = value # assumes naming scheme: "snapshot%08d.svg"
# self.mcds_plot.children[0].max = self.max_frames.value
#------------------------------------------------------------------------------
# called from pc4covid19 module when user selects new cache dir in 'Load Config'
def update_params(self, config_tab, user_params_tab):
self.reset_analysis_data_plotting(True)
# xml_root.find(".//x_min").text = str(self.xmin.value)
# xml_root.find(".//x_max").text = str(self.xmax.value)
# xml_root.find(".//dx").text = str(self.xdelta.value)
# xml_root.find(".//y_min").text = str(self.ymin.value)
# xml_root.find(".//y_max").text = str(self.ymax.value)
# xml_root.find(".//dy").text = str(self.ydelta.value)
# xml_root.find(".//z_min").text = str(self.zmin.value)
# xml_root.find(".//z_max").text = str(self.zmax.value)
# xml_root.find(".//dz").text = str(self.zdelta.value)
self.xmin = config_tab.xmin.value
self.xmax = config_tab.xmax.value
self.x_range = self.xmax - self.xmin
self.svg_xrange = self.xmax - self.xmin
self.ymin = config_tab.ymin.value
self.ymax = config_tab.ymax.value
self.y_range = self.ymax - self.ymin
self.numx = math.ceil( (self.xmax - self.xmin) / config_tab.xdelta.value)
self.numy = math.ceil( (self.ymax - self.ymin) / config_tab.ydelta.value)
# if (self.x_range > self.y_range):
# ratio = self.y_range / self.x_range
# self.figsize_width_substrate = self.width_substrate # allow extra for colormap
# self.figsize_height_substrate = self.height_substrate * ratio
# self.figsize_width_svg = self.width_svg
# self.figsize_height_svg = self.height_svg * ratio
# else: # x < y
# ratio = self.x_range / self.y_range
# self.figsize_width_substrate = self.width_substrate * ratio
# self.figsize_height_substrate = self.height_substrate
# self.figsize_width_svg = self.width_svg * ratio
# self.figsize_height_svg = self.height_svg
if (self.x_range > self.y_range):
ratio = self.y_range / self.x_range
self.figsize_width_substrate = 15.0 # allow extra for colormap
self.figsize_height_substrate = 12.0 * ratio
self.figsize_width_svg = 12.0
self.figsize_height_svg = 12.0 * ratio
else: # x < y
ratio = self.x_range / self.y_range
self.figsize_width_substrate = 15.0 * ratio
self.figsize_height_substrate = 12.0
self.figsize_width_svg = 12.0 * ratio
self.figsize_height_svg = 12.0
# print('update_params(): sub w,h= ',self.figsize_width_substrate,self.figsize_height_substrate,' , svg w,h= ',self.figsize_width_svg,self.figsize_height_svg)
self.svg_flag = config_tab.toggle_svg.value
self.substrates_flag = config_tab.toggle_mcds.value
# print("substrates: update_params(): svg_flag, toggle=",self.svg_flag,config_tab.toggle_svg.value)
# print("substrates: update_params(): self.substrates_flag = ",self.substrates_flag)
self.svg_delta_t = config_tab.svg_interval.value
self.substrate_delta_t = config_tab.mcds_interval.value
self.modulo = int(self.substrate_delta_t / self.svg_delta_t)
# print("substrates: update_params(): modulo=",self.modulo)
if self.customized_output_freq:
# self.therapy_activation_time = user_params_tab.therapy_activation_time.value # NOTE: edit for user param name
# print("substrates: update_params(): therapy_activation_time=",self.therapy_activation_time)
self.max_svg_frame_pre_therapy = int(self.therapy_activation_time/self.svg_delta_t)
self.max_substrate_frame_pre_therapy = int(self.therapy_activation_time/self.substrate_delta_t)
#------------------------------------------------------------------------------
# def update(self, rdir):
# Called from driver module (e.g., pc4*.py) (among other places?)
def update(self, rdir=''):
# with debug_view:
# print("substrates: update rdir=", rdir)
# print("substrates: update rdir=", rdir)
if rdir:
self.output_dir = rdir
# print('update(): self.output_dir = ', self.output_dir)
if self.first_time:
# if True:
self.first_time = False
full_xml_filename = Path(os.path.join(self.output_dir, 'config.xml'))
# print("substrates: update(), config.xml = ",full_xml_filename)
# self.num_svgs = len(glob.glob(os.path.join(self.output_dir, 'snap*.svg')))
# self.num_substrates = len(glob.glob(os.path.join(self.output_dir, 'output*.xml')))
# print("substrates: num_svgs,num_substrates =",self.num_svgs,self.num_substrates)
# argh - no! If no files created, then denom = -1
# self.modulo = int((self.num_svgs - 1) / (self.num_substrates - 1))
# print("substrates: update(): modulo=",self.modulo)
if full_xml_filename.is_file():
tree = ET.parse(full_xml_filename) # this file cannot be overwritten; part of tool distro
xml_root = tree.getroot()
self.svg_delta_t = int(xml_root.find(".//SVG//interval").text)
self.substrate_delta_t = int(xml_root.find(".//full_data//interval").text)
# print("substrates: svg,substrate delta_t values=",self.svg_delta_t,self.substrate_delta_t)
self.modulo = int(self.substrate_delta_t / self.svg_delta_t)
# print("substrates-2: update(): modulo=",self.modulo)
# all_files = sorted(glob.glob(os.path.join(self.output_dir, 'output*.xml'))) # if the substrates/MCDS
all_files = sorted(glob.glob(os.path.join(self.output_dir, 'snap*.svg'))) # if .svg
if len(all_files) > 0:
last_file = all_files[-1]
# print("substrates.py/update(): len(snap*.svg) = ",len(all_files)," , last_file=",last_file)
self.max_frames.value = int(last_file[-12:-4]) # assumes naming scheme: "snapshot%08d.svg"
else:
substrate_files = sorted(glob.glob(os.path.join(self.output_dir, 'output*.xml')))
if len(substrate_files) > 0:
last_file = substrate_files[-1]
self.max_frames.value = int(last_file[-12:-4])
def download_config_cb(self):
file_str = os.path.join(self.output_dir, 'config.xml')
with zipfile.ZipFile('config.zip', 'w') as myzip:
myzip.write(file_str, os.path.basename(file_str)) # 2nd arg avoids full filename path in the archive
def download_svg_cb(self):
file_str = os.path.join(self.output_dir, '*.svg')
# print('zip up all ',file_str)
with zipfile.ZipFile('svg.zip', 'w') as myzip:
for f in glob.glob(file_str):
myzip.write(f, os.path.basename(f)) # 2nd arg avoids full filename path in the archive
def download_cb(self):
file_xml = os.path.join(self.output_dir, '*.xml')
file_mat = os.path.join(self.output_dir, '*.mat')
# print('zip up all ',file_str)
with zipfile.ZipFile('mcds.zip', 'w') as myzip:
for f in glob.glob(file_xml):
myzip.write(f, os.path.basename(f)) # 2nd arg avoids full filename path in the archive
for f in glob.glob(file_mat):
myzip.write(f, os.path.basename(f))
def update_max_frames(self,_b):
self.i_plot.children[0].max = self.max_frames.value
# called if user selected different substrate in dropdown
# @debug_view.capture(clear_output=True)
def substrate_field_changed_cb(self, b):
if (self.substrate_choice.value == None):
return
# self.tb_count += 1
# print('substrate_field_changed_cb(): tb_count=',self.tb_count,', options= ',self.substrate_choice.options)
if self.tb_count == 25: # originally checked == 5 (I don't remember why I did this)
# foo = 1/0 # force an exception for a traceback
try:
raise NameError('HiThere')
except:
with debug_view:
# print("substrates: update rdir=", rdir)
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_tb(exc_traceback, limit=None, file=sys.stdout)
# sys.exit(-1)
# print('substrate_field_changed_cb: self.field_index=', self.field_index)
# print('substrate_field_changed_cb: self.substrate_choice.value=', self.substrate_choice.value)
# if (self.field_index == self.substrate_choice.value + 4):
# return
self.field_index = self.substrate_choice.value + 4
field_name = self.field_dict[self.substrate_choice.value]
# print('substrate_field_changed_cb: field_name='+ field_name)
# print(self.field_min_max[field_name])
# BEWARE of these triggering the substrate_field_cb() callback! Hence, the "skip_cb"
self.skip_cb = True
self.colormap_min.value = self.field_min_max[field_name][0]
self.colormap_max.value = self.field_min_max[field_name][1]
self.colormap_fixed_toggle.value = bool(self.field_min_max[field_name][2])
self.skip_cb = False
# if not self.skip_cb:
# print("substrate_field_changed_cb: i_plot.update")
self.i_plot.update()
# called if user provided different min/max values for colormap, or a different colormap
def substrate_field_cb(self, b):
# # if self.skip_cb:
# return
self.field_index = self.substrate_choice.value + 4
field_name = self.field_dict[self.substrate_choice.value]
# print('substrate_field_cb: field_name='+ field_name)
# print('substrate_field_cb: '+ field_name)
self.field_min_max[field_name][0] = self.colormap_min.value
self.field_min_max[field_name][1] = self.colormap_max.value
self.field_min_max[field_name][2] = self.colormap_fixed_toggle.value
# print('rwh: substrate_field_cb: ',self.field_min_max)
# self.field_index = self.substrate_choice.value + 4
# print('field_index=',self.field_index)
if not self.skip_cb:
# print("substrate_field_cb: i_plot.update, field_index=",self.field_index)
self.i_plot.update()
#------------------------------------------------------------
# Should be called only when we need to *compute* analysis_data
# def update_analysis_data(self,b):
def update_analysis_data(self):
# print('----- update_analysis_data')
# print('update_analysis_data(): self.output_dir = ', self.output_dir)
# If we've already computed the plots being requested, just return.
# if ('live' in self.analysis_data_choice.value) and self.analysis_data_set1:
# return
# elif ('Mac' in self.analysis_data_choice.value) and self.analysis_data_set2:
# return
# elif ('load' in self.analysis_data_choice.value) and self.analysis_data_set3:
# return
# elif ('lymph' in self.analysis_data_choice.value) and self.analysis_data_set4:
# return
# self.analysis_data_wait.value = 'Wait for update...'
# self.analysis_data_wait.value = 'compute 1st of 4 sets...'
# self.updated_analysis_flag = True
self.analysis_data_plotted = True
cwd = os.getcwd()
# print("----- cwd(1)=",cwd)
data_dir = cwd
# print("----- data_dir(1)=",cwd)
if 'cache' in self.output_dir:
data_dir = self.output_dir
# print("----- chdir to data_dir(2)=",data_dir, " --> chdir to there")
# os.chdir(data_dir)
else:
# print('update_analysis_data: cwd=',cwd)
if not 'tmpdir' in cwd:
data_dir = os.path.abspath('tmpdir')
# print("----- data_dir(3)=",cwd)
os.chdir(data_dir)
xml_files = glob.glob('output*.xml')
# xml_files = glob.glob(os.path.join('tmpdir', 'output*.xml'))
xml_files.sort()
# print('update_analysis_data(): len(xml_files)=',len(xml_files))
# print('xml_files = ',xml_files)
# print("----- chdir back to cwd=",cwd)
# os.chdir(cwd)
ds_count = len(xml_files)
# print("----- ds_count = ",ds_count)
mcds = [pyMCDS(xml_files[i], '.') for i in range(ds_count)] # optimize eventually?
# # mcds = [pyMCDS(xml_files[i], 'tmpdir') for i in range(ds_count)]
# # mcds = [pyMCDS(xml_files[i], data_dir) for i in range(ds_count)]
# print("----- mcds = ",mcds)
# print(mcds[0].data['discrete_cells'].keys())
# dict_keys(['ID', 'position_x', 'position_y', 'position_z', 'total_volume', 'cell_type', 'cycle_model', 'current_phase', 'elapsed_time_in_phase', 'nuclear_volume', 'cytoplasmic_volume', 'fluid_fraction', 'calcified_fraction', 'orientation_x', 'orientation_y', 'orientation_z', 'polarity', 'migration_speed', 'motility_vector_x', 'motility_vector_y', 'motility_vector_z', 'migration_bias', 'motility_bias_direction_x', 'motility_bias_direction_y', 'motility_bias_direction_z', 'persistence_time', 'motility_reserved', 'unbound_external_ACE2', 'bound_external_ACE2', 'unbound_internal_ACE2', 'bound_internal_ACE2', 'ACE2_binding_rate', 'ACE2_endocytosis_rate', 'ACE2_cargo_release_rate', 'ACE2_recycling_rate', 'virion', 'uncoated_virion', 'viral_RNA', 'viral_protein', 'assembled_virion', 'virion_uncoating_rate', 'uncoated_to_RNA_rate', 'protein_synthesis_rate', 'virion_assembly_rate', 'virion_export_rate', 'max_infected_apoptosis_rate', 'max_apoptosis_half_max', 'apoptosis_hill_power'])
# def cell_data_plot(xname, yname_list, t):
# discrete_cells_names = ['virion', 'assembled_virion'] # not used now
tval = np.linspace(0, mcds[-1].get_time(), len(xml_files))
# return
# if all selected: ('assembled_virion', 'susceptible', 'infected', 'dead')
# print('analysis_data_choice = ', self.analysis_data_choice.value) # get RadioButton selection
# self.num_2D_plots = len(self.analysis_data_choice.value)
# print('num_2D_plots = ', self.num_2D_plots)
xname = 'time'
if xname == self.tname:
self.xval = tval
# print("xname == self.tname")
# print("#1 self.xval=",self.xval)
else:
print("Warning: xname != self.tname")
# elif xname in discrete_cells_names:
# self.xval = np.array([mcds[i].data['discrete_cells'][xname].sum() for i in range(ds_count)])
# else:
# if xname == 'susceptible_cells':
# self.xval = np.array([(mcds[i].data['discrete_cells']['assembled_virion'] <= 1).sum() for i in range(ds_count)])
# + np.array([(mcds[i].data['discrete_cells']['cycle_model'] < 6).sum() for i in range(ds_count)])
# elif xname == 'infected_cells':
# self.xval = np.array([(mcds[i].data['discrete_cells']['assembled_virion'] > 1).sum() for i in range(ds_count)]) \
# + np.array([(mcds[i].data['discrete_cells']['cycle_model'] < 6).sum() for i in range(ds_count)])
# elif xname == 'dead_cells':
# self.xval = np.array([len(mcds[0].data['discrete_cells']['ID']) - len(mcds[i].data['discrete_cells']['ID']) for i in range(ds_count)]) \
# + np.array([(mcds[i].data['discrete_cells']['cycle_model'] >= 6).sum() for i in range(ds_count)])
# print('analysis_data_choice = ',self.analysis_data_choice.value)
# if 'live' in self.analysis_data_choice.value: # live,infected,dead
# if (self.analysis_data_set1 == False):
self.analysis_data_wait.value = 'compute 1 of 6 ...'
# count epi cells still alive
self.yval1 = np.array( [(np.count_nonzero((mcds[idx].data['discrete_cells']['cell_type'] == 1) & (mcds[idx].data['discrete_cells']['cycle_model'] < 100) == True)) for idx in range(ds_count)] )
# count epi cells infected
self.yval2 = np.array( [(np.count_nonzero((mcds[idx].data['discrete_cells']['cell_type'] == 1) & (mcds[idx].data['discrete_cells']['virion'] > 1.) == True)) for idx in range(ds_count)] )
# print('self.yval2=',self.yval2)
# count epi cells dead
self.yval3 = np.array( [(np.count_nonzero((mcds[idx].data['discrete_cells']['cell_type'] == 1) & (mcds[idx].data['discrete_cells']['cycle_model'] >= 100) == True)) for idx in range(ds_count)] )
# print('self.yval3=',self.yval3)
# self.analysis_data_set1 = True
# elif 'Mac' in self.analysis_data_choice.value: # mac,neut,cd8,DC,cd4,Fib
# if (self.analysis_data_set2 == False):
# count Macs
# self.yval4 = np.array( [(np.count_nonzero((mcds[idx].data['discrete_cells']['cell_type'] == 4) == True)) for idx in range(ds_count)] )
self.analysis_data_wait.value = 'compute 2 of 6 ...'
self.yval4 = np.array( [(np.count_nonzero((mcds[idx].data['discrete_cells']['cell_type'] == 4) & (mcds[idx].data['discrete_cells']['cycle_model'] < 100.) == True)) for idx in range(ds_count)] )
# count Neuts
# self.yval5 = np.array( [(np.count_nonzero((mcds[idx].data['discrete_cells']['cell_type'] == 5) == True)) for idx in range(ds_count)] )
self.yval5 = np.array( [(np.count_nonzero((mcds[idx].data['discrete_cells']['cell_type'] == 5) & (mcds[idx].data['discrete_cells']['cycle_model'] < 100.) == True)) for idx in range(ds_count)] )
# count CD8
self.yval6 = np.array( [(np.count_nonzero((mcds[idx].data['discrete_cells']['cell_type'] == 3) & (mcds[idx].data['discrete_cells']['cycle_model'] < 100.) == True)) for idx in range(ds_count)] )
# count DC
self.yval7 = np.array( [(np.count_nonzero((mcds[idx].data['discrete_cells']['cell_type'] == 6) & (mcds[idx].data['discrete_cells']['cycle_model'] < 100.) == True)) for idx in range(ds_count)] )
# count CD4
self.yval8 = np.array( [(np.count_nonzero((mcds[idx].data['discrete_cells']['cell_type'] == 7) & (mcds[idx].data['discrete_cells']['cycle_model'] < 100.) == True)) for idx in range(ds_count)] )
# count Fibroblasts
self.yval9 = np.array( [(np.count_nonzero((mcds[idx].data['discrete_cells']['cell_type'] == 8) & (mcds[idx].data['discrete_cells']['cycle_model'] < 100.) == True)) for idx in range(ds_count)] )
# self.analysis_data_set2 = True
self.analysis_data_wait.value = 'compute 3 of 6 ...'
# elif 'load' in self.analysis_data_choice.value: # viral load
# if (self.analysis_data_set3 == False):
# compute viral load in epi cells
# self.yval10 = np.array( [np.where(mcds[idx].data['discrete_cells']['cell_type'] == 1) & np.floor(mcds[idx].data['discrete_cells']['assembled_virion']).sum() for idx in range(ds_count)] )
self.yval10 = np.array( [np.floor(mcds[idx].data['discrete_cells']['assembled_virion']).sum() for idx in range(ds_count)] ).astype(int)
# self.analysis_data_set3 = True
self.analysis_data_wait.value = 'compute 4 of 6 ...'
self.yval15 = np.array([ (mcds[idx].get_concentrations('Ig')).sum() for idx in range(ds_count)] )
self.analysis_data_wait.value = 'compute 5 and 6 ...'
# elif 'lymph' in self.analysis_data_choice.value: # lymph node dynamics
# if (self.analysis_data_set4 == False):
lymph_data = np.genfromtxt('dm_tc.dat', delimiter=' ')
self.yval11 = lymph_data[:,0]
# print('lymph data: yval11 = ',self.yval11)
self.yval12 = lymph_data[:,1]
self.yval13 = lymph_data[:,2]
self.yval14 = lymph_data[:,3]
# self.analysis_data_set4 = True
# self.analysis_data_wait.value = ''
self.i_plot.update()
#------------------------------------------------------------
# def plot_analysis_data_dummy(self):
# print('----- plot_analysis_data()')
# x = np.linspace(0, 2*np.pi, 400)
# y = np.sin(x**2)
# # self.i_plot.update()
# self.ax1.plot(x, y)
#------------------------------------------------------------
# Performed when the "Extra analysis" is toggled off
def plot_empty_analysis_data(self):
self.ax1.plot([0.], [0.], color='white',marker='.') # hack empty
# self.ax1.clf()
self.ax1.get_xaxis().set_visible(False)
self.ax1.get_yaxis().set_visible(False)
self.ax1.axis('off')
self.ax1_lymph_TC.plot([0.], [0.], color='white',marker='.') # hack empty
# self.ax1.clf()
self.ax1_lymph_TC.get_xaxis().set_visible(False)
self.ax1_lymph_TC.get_yaxis().set_visible(False)
self.ax1_lymph_TC.axis('off')
self.ax1_lymph_TH2.plot([0.], [0.], color='white',marker='.') # hack empty
# self.ax1.clf()
self.ax1_lymph_TH2.get_xaxis().set_visible(False)
self.ax1_lymph_TH2.get_yaxis().set_visible(False)
self.ax1_lymph_TH2.axis('off')
#------------------------------------------------------------
# Called from 'plot_substrate' if the checkbox is ON
# def plot_analysis_data(self, xname, yname_list, t):
def plot_analysis_data(self, xname, yname_list, substrate_frame_num):
# print("---------- plot_analysis_data()")
# global current_idx, axes_max
global current_frame
# current_frame = frame
# fname = "snapshot%08d.svg" % frame
# full_fname = os.path.join(self.output_dir, fname)
# print('plot_analysis_data: self.output_dir=',self.output_dir)
#----------- line plots for extra analysis ---------------------
self.ax1_lymph_TC.get_yaxis().set_visible(False)
self.ax1_lymph_TC.axis('off')
self.ax1_lymph_TH2.get_yaxis().set_visible(False)
self.ax1_lymph_TH2.axis('off')
if self.analysis_data_choice.value == 0: # live,infected,dead
p1 = self.ax1.plot(self.xval, self.yval1, label='live', linewidth=3)
p2 = self.ax1.plot(self.xval, self.yval2, label='infected', linewidth=3)
p3 = self.ax1.plot(self.xval, self.yval3, label='dead', linewidth=3)
elif self.analysis_data_choice.value == 1: # Mac,Neut,CD8,DC,CD4,Fib
p1 = self.ax1.plot(self.xval, self.yval4, label='Mac', linewidth=3, color=self.mac_color)
p2 = self.ax1.plot(self.xval, self.yval5, linestyle='dashed', label='Neut', linewidth=3, color=self.neut_color)
p3 = self.ax1.plot(self.xval, self.yval6, label='CD8', linewidth=3, color=self.cd8_color)
p4 = self.ax1.plot(self.xval, self.yval7, linestyle='dashed', label='DC', linewidth=3, color=self.dc_color)
# print('plot_analysis_data(): yval6=',self.yval6)
# print('plot_analysis_data(): yval7=',self.yval7)
p5 = self.ax1.plot(self.xval, self.yval8, label='CD4', linewidth=3, color=self.cd4_color)
p6 = self.ax1.plot(self.xval, self.yval9, linestyle='dashed', label='Fib', linewidth=3, color=self.fib_color) # dashes=[6,2],
# print('plot_analysis_data(): yval9=',self.yval9)
elif self.analysis_data_choice.value == 2: # viral load
if len(self.xval) > len(self.yval10):
print('problem: len(xval) >= len(yval10)',self.xval,self.yval10 )
pass
else:
p7 = self.ax1.plot(self.xval, self.yval10, linewidth=3, color=self.viral_load_color)
elif self.analysis_data_choice.value == 3: # Ig total (sum of signal)
if len(self.xval) > len(self.yval15):
print('problem: len(xval) >= len(yval15)',self.xval,self.yval15 )
pass
else:
p_Ig = self.ax1.plot(self.xval, self.yval15, linewidth=3, color=self.Ig_total_color)
elif self.analysis_data_choice.value == 4: # lymph: DC,TC
self.ax1_lymph_TC.get_yaxis().set_visible(True)
self.ax1_lymph_TC.axis('on')
if len(self.xval) < len(self.yval11):
# print("lymph unequal x,y: xval=",self.xval, ", yval=",self.yval11)
# print("lymph xval < yval11")
# p8 = self.ax1.plot(self.xval, self.yval11[:len(self.xval)], label='DC', linewidth=3, color=self.lymph_DC_color)
p8 = self.ax1.plot(self.xval, self.yval11[:len(self.xval)], linewidth=3, color=self.lymph_DC_color)
p9 = self.ax1_lymph_TC.plot(self.xval, self.yval12[:len(self.xval)], linewidth=3, color=self.lymph_TC_color)
else:
p8 = self.ax1.plot(self.xval, self.yval11, linewidth=3, color=self.lymph_DC_color)
p9 = self.ax1_lymph_TC.plot(self.xval, self.yval12, linewidth=3, color=self.lymph_TC_color)
elif self.analysis_data_choice.value == 5: # lymph: Th1,Th2
self.ax1_lymph_TH2.get_yaxis().set_visible(True)
self.ax1_lymph_TH2.axis('on')
if len(self.xval) < len(self.yval13):
p10 = self.ax1.plot(self.xval, self.yval13[:len(self.xval)], linewidth=3, color=self.lymph_DC_color)
p11 = self.ax1_lymph_TH2.plot(self.xval, self.yval14[:len(self.xval)], linewidth=3, color=self.lymph_TC_color)
else:
p10 = self.ax1.plot(self.xval, self.yval13, linewidth=3, color=self.lymph_DC_color)
p11 = self.ax1_lymph_TH2.plot(self.xval, self.yval14, linewidth=3, color=self.lymph_TC_color)
# print('xval=',xval) # [ 0. 60. 120. ...
# print('yval=',yval) # [2793 2793 2793 ...
# print('t=',t)
#----------- markers (circles) on top of line plots: tracking Cells/Substrates plots ---------------------
# if (t >= 0):
xoff= self.xval.max() * .01 # should be a % of axes range
fsize=12
# kdx = self.substrate_frame
kdx = substrate_frame_num
# kdx = len(self.xval) - 1
# if (kdx >= len(self.xval)):
# kdx = len(self.xval) - 1
# print("plot_analysis_data(): t=",t,", kdx=",kdx,", len(self.xval)=",len(self.xval))
# if (t >= 0 and len(self.xval) > 1):
# if (substrate_frame_num >= len(self.xval)):
if (kdx >= len(self.xval)):
pass
elif (substrate_frame_num >= 0 and len(self.xval) > 1):
# print('self.xval=',self.xval) # [ 0. 60. 120. ...
if self.analysis_data_choice.value == 0: # live,infected,dead
self.ax1.plot(self.xval[kdx], self.yval1[kdx], p1[-1].get_color(), marker='o', markersize=12)
self.ax1.plot(self.xval[kdx], self.yval2[kdx], p2[-1].get_color(), marker='o', markersize=12)
self.ax1.plot(self.xval[kdx], self.yval3[kdx], p3[-1].get_color(), marker='o', markersize=12)
# label = "{:d}".format(self.yval1[self.substrate_frame]),
# self.ax1.annotate(str(self.yval1[self.substrate_frame]), (self.xval[self.substrate_frame]+xoff,self.yval1[self.substrate_frame]+yoff) )
ymax= max(int(self.yval1.max()),int(self.yval2.max()),int(self.yval3.max())) # should be a % of axes range
yoff= ymax * .01 # should be a % of axes range
self.ax1.text( self.xval[kdx]+xoff, self.yval1[kdx]+yoff, str(self.yval1[kdx]), fontsize=fsize)
self.ax1.text( self.xval[kdx]+xoff, self.yval2[kdx]+yoff, str(self.yval2[kdx]), fontsize=fsize)
self.ax1.text( self.xval[kdx]+xoff, self.yval3[kdx]+yoff, str(self.yval3[kdx]), fontsize=fsize)
elif self.analysis_data_choice.value == 1: # Mac,Neut,CD8,DC,CD4,Fib
self.ax1.plot(self.xval[kdx], self.yval4[kdx], p1[-1].get_color(), marker='o', markersize=12)
self.ax1.plot(self.xval[kdx], self.yval5[kdx], p2[-1].get_color(), marker='o', markersize=12)
self.ax1.plot(self.xval[kdx], self.yval6[kdx], p3[-1].get_color(), marker='o', markersize=12)
self.ax1.plot(self.xval[kdx], self.yval7[kdx], p4[-1].get_color(), marker='o', markersize=12)
self.ax1.plot(self.xval[kdx], self.yval8[kdx], p5[-1].get_color(), marker='o', markersize=12)
self.ax1.plot(self.xval[kdx], self.yval9[kdx], p6[-1].get_color(), marker='o', markersize=12)
# label markers
ymax= max(int(self.yval4.max()), int(self.yval5.max()), int(self.yval6.max()), int(self.yval7.max()), int(self.yval8.max()), int(self.yval9.max()) ) # should be a % of axes range
yoff= ymax * .01 # should be a % of axes range
self.ax1.text( self.xval[kdx]+xoff, self.yval4[kdx]+yoff, str(self.yval4[kdx]), fontsize=fsize)
self.ax1.text( self.xval[kdx]+xoff, self.yval5[kdx]+yoff, str(self.yval5[kdx]), fontsize=fsize)
self.ax1.text( self.xval[kdx]+xoff, self.yval6[kdx]+yoff, str(self.yval6[kdx]), fontsize=fsize)
self.ax1.text( self.xval[kdx]+xoff, self.yval7[kdx]+yoff, str(self.yval7[kdx]), fontsize=fsize)
self.ax1.text( self.xval[kdx]+xoff, self.yval8[kdx]+yoff, str(self.yval8[kdx]), fontsize=fsize)
self.ax1.text( self.xval[kdx]+xoff, self.yval9[kdx]+yoff, str(self.yval9[kdx]), fontsize=fsize)
elif self.analysis_data_choice.value == 2: # viral load
# self.ax1.plot(self.xval[kdx], self.yval10[kdx], p7[-1].get_color(), marker='o', markersize=12)
self.ax1.plot(self.xval[kdx], self.yval10[kdx], color='black', marker='o', markersize=12)
# ymax= int(self.yval10.max())
# yoff= ymax * .01 # should be a % of axes range
# self.ax1.text( self.xval[kdx]+xoff, self.yval10[kdx]+yoff, str(self.yval10[kdx]), fontsize=fsize)
# self.ax1_lymph_TC.text( self.xval[kdx]+xoff, self.yval11[kdx]+yoff, str(self.yval11[kdx]), fontsize=fsize)
elif self.analysis_data_choice.value == 3: # Ig total
self.ax1.plot(self.xval[kdx], self.yval15[kdx], color='black', marker='o', markersize=12)
elif self.analysis_data_choice.value == 4: # lymph:DC,TC
self.ax1.plot( self.xval[kdx], self.yval11[kdx], p8[-1].get_color(), marker='o', markersize=12)
self.ax1_lymph_TC.plot(self.xval[kdx], self.yval12[kdx], p9[-1].get_color(), marker='o', markersize=12)
# self.ax1.plot(self.xval[self.substrate_frame], self.yval11[self.substrate_frame], p8[-1].get_color(), marker='o', markersize=12)
ymax= self.yval11.max()
yoff= ymax * .01 # should be a % of axes range
sval = '%.2f' % self.yval11[self.substrate_frame]
# self.ax1.text( self.xval[self.substrate_frame]+xoff, self.yval11[self.substrate_frame]+yoff, sval, fontsize=fsize)
ymax= self.yval12.max()
yoff= ymax * .01 # should be a % of axes range
sval = '%.2f' % self.yval12[self.substrate_frame]
# self.ax1_lymph_TC.text( self.xval[self.substrate_frame]+xoff, self.yval12[self.substrate_frame]+yoff, sval, fontsize=fsize)
elif self.analysis_data_choice.value == 5: # lymph:Th1,Th2
# print('kdx=',kdx,', yval14[kdx]=',self.yval14[kdx])
self.ax1.plot( self.xval[kdx], self.yval13[kdx], p10[-1].get_color(), marker='o', markersize=12)
self.ax1_lymph_TH2.plot(self.xval[kdx], self.yval14[kdx], p11[-1].get_color(), marker='o', markersize=12)
ymax= self.yval13.max()
yoff= ymax * .01 # should be a % of axes range
sval = '%.2f' % self.yval13[self.substrate_frame]
# self.ax1.text( self.xval[self.substrate_frame]+xoff, self.yval13[self.substrate_frame]+yoff, sval, fontsize=fsize)
ymax= self.yval14.max()
yoff= ymax * .01 # should be a % of axes range
sval = '%.2f' % self.yval14[self.substrate_frame]
# self.ax1_lymph_TH2.text( self.xval[self.substrate_frame]+xoff, self.yval14[self.substrate_frame]+yoff, sval, fontsize=fsize)
#-------- Provide a legend if necessary
# if 'load' in self.analysis_data_choice.value: # no legend for viral load
if self.analysis_data_choice.value == 2: # no legend for viral load
pass
elif self.analysis_data_choice.value == 3: # no legend for Ig load
pass
# elif 'lymph' in self.analysis_data_choice.value:
elif (self.analysis_data_choice.value == 4) or (self.analysis_data_choice.value == 5):
pass
else:
self.ax1.legend(loc='center left', prop={'size': 15})
if xname == self.tname:
self.ax1.set_xlabel('time (min)', fontsize=self.axis_label_fontsize)
else:
self.ax1.set_xlabel('total ' * (xname != self.tname) + xname)
# self.ax1.set_ylabel('total ' + (yname_list[0] if len(yname_list) == 1 else ', '.join(yname_list)))
if self.analysis_data_choice.value == 0: # live, infected, dead
self.ax1.set_ylabel('# of epithelial cells', fontsize=self.axis_label_fontsize)
elif self.analysis_data_choice.value == 1: # Mac, Neut, etc
self.ax1.set_ylabel('# of cells', fontsize=self.axis_label_fontsize)
# elif 'load' in self.analysis_data_choice.value: # viral load
elif self.analysis_data_choice.value == 2: # viral load
self.ax1.set_ylabel('viral load', fontsize=self.axis_label_fontsize)
elif self.analysis_data_choice.value == 3: # Ig sum
self.ax1.set_ylabel('antibody load', fontsize=self.axis_label_fontsize)
# elif 'lymph' in self.analysis_data_choice.value:
elif (self.analysis_data_choice.value == 4): # lymph: DC,TC
self.ax1.set_ylabel('DC', fontsize=self.axis_label_fontsize, color=self.lymph_DC_color)
self.ax1_lymph_TC.set_ylabel('TC', fontsize=self.axis_label_fontsize, color=self.lymph_TC_color)
self.ax1_lymph_TC.tick_params(axis='y', colors=self.lymph_TC_color)
elif (self.analysis_data_choice.value == 5): # lymph: Th1,Th2
self.ax1.set_ylabel('Th1', fontsize=self.axis_label_fontsize, color=self.lymph_DC_color)
self.ax1_lymph_TH2.set_ylabel('Th2', fontsize=self.axis_label_fontsize, color=self.lymph_TC_color)
self.ax1_lymph_TH2.tick_params(axis='y', colors=self.lymph_TC_color)
max_time_min = int(self.xval[-1])
# print('self.xval =',self.xval)
# print('max_time_min =',max_time_min)
num_days = int(max_time_min/1440.)
num_hours = int((max_time_min - num_days*1440.)/60.)
num_min = int(max_time_min % 60)
title_str = 'Updated to ' + '%dd, %dh %dm'%(num_days,num_hours,num_min)
if (self.analysis_data_plotted):
self.ax1.set_title(title_str)
else:
self.ax1.set_xticklabels([])
self.ax1.set_yticklabels([])
# p = self.ax1.plot(xval, yval, label=yname)
# self.ax1.set_legend()
# self.ax1.tight_layout()
# self.ax1.show()
#---------------------------------------------------------------------------
def circles(self, x, y, s, c='b', vmin=None, vmax=None, **kwargs):
"""
See https://gist.github.com/syrte/592a062c562cd2a98a83
Make a scatter plot of circles.
Similar to plt.scatter, but the size of circles are in data scale.
Parameters
----------
x, y : scalar or array_like, shape (n, )
Input data
s : scalar or array_like, shape (n, )
Radius of circles.
c : color or sequence of color, optional, default : 'b'
`c` can be a single color format string, or a sequence of color
specifications of length `N`, or a sequence of `N` numbers to be
mapped to colors using the `cmap` and `norm` specified via kwargs.
Note that `c` should not be a single numeric RGB or RGBA sequence
because that is indistinguishable from an array of values
to be colormapped. (If you insist, use `color` instead.)
`c` can be a 2-D array in which the rows are RGB or RGBA, however.
vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with `norm` to normalize
luminance data. If either are `None`, the min and max of the
color array is used.
kwargs : `~matplotlib.collections.Collection` properties
Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls),
norm, cmap, transform, etc.
Returns
-------
paths : `~matplotlib.collections.PathCollection`
Examples
--------
a = np.arange(11)
circles(a, a, s=a*0.2, c=a, alpha=0.5, ec='none')
plt.colorbar()
License
--------
This code is under [The BSD 3-Clause License]
(http://opensource.org/licenses/BSD-3-Clause)
"""
if np.isscalar(c):
kwargs.setdefault('color', c)
c = None
if 'fc' in kwargs:
kwargs.setdefault('facecolor', kwargs.pop('fc'))
if 'ec' in kwargs:
kwargs.setdefault('edgecolor', kwargs.pop('ec'))
if 'ls' in kwargs:
kwargs.setdefault('linestyle', kwargs.pop('ls'))
if 'lw' in kwargs:
kwargs.setdefault('linewidth', kwargs.pop('lw'))
# You can set `facecolor` with an array for each patch,
# while you can only set `facecolors` with a value for all.
zipped = np.broadcast(x, y, s)
patches = [Circle((x_, y_), s_)
for x_, y_, s_ in zipped]
collection = PatchCollection(patches, **kwargs)
if c is not None:
c = np.broadcast_to(c, zipped.shape).ravel()
collection.set_array(c)
collection.set_clim(vmin, vmax)
# ax = plt.gca()
# ax.add_collection(collection)
# ax.autoscale_view()
self.ax0.add_collection(collection)
self.ax0.autoscale_view()
# plt.draw_if_interactive()
if c is not None:
# plt.sci(collection)
self.ax0.sci(collection)
# return collection
#------------------------------------------------------------
# def plot_svg(self, frame, rdel=''):
def plot_svg(self, frame):
# global current_idx, axes_max
global current_frame
current_frame = frame
fname = "snapshot%08d.svg" % frame
full_fname = os.path.join(self.output_dir, fname)
# with debug_view:
# print("plot_svg:", full_fname)
# print("-- plot_svg:", full_fname)
if not os.path.isfile(full_fname):
print("Once output files are generated, click the slider.")
return
xlist = deque()
ylist = deque()
rlist = deque()
rgb_list = deque()
# print('\n---- ' + fname + ':')
# tree = ET.parse(fname)
tree = ET.parse(full_fname)
root = tree.getroot()
# print('--- root.tag ---')
# print(root.tag)
# print('--- root.attrib ---')
# print(root.attrib)
# print('--- child.tag, child.attrib ---')
numChildren = 0
for child in root:
# print(child.tag, child.attrib)
# print("keys=",child.attrib.keys())
if self.use_defaults and ('width' in child.attrib.keys()):
self.axes_max = float(child.attrib['width'])
# print("debug> found width --> axes_max =", axes_max)
if child.text and "Current time" in child.text:
svals = child.text.split()
# remove the ".00" on minutes
self.title_str += " cells: " + svals[2] + "d, " + svals[4] + "h, " + svals[7][:-3] + "m"
# self.cell_time_mins = int(svals[2])*1440 + int(svals[4])*60 + int(svals[7][:-3])
# self.title_str += " cells: " + str(self.cell_time_mins) + "m" # rwh
# print("width ",child.attrib['width'])
# print('attrib=',child.attrib)
# if (child.attrib['id'] == 'tissue'):
if ('id' in child.attrib.keys()):
# print('-------- found tissue!!')
tissue_parent = child
break
# print('------ search tissue')
cells_parent = None
for child in tissue_parent:
# print('attrib=',child.attrib)
if (child.attrib['id'] == 'cells'):
# print('-------- found cells, setting cells_parent')
cells_parent = child
break
numChildren += 1
num_cells = 0
# print('------ search cells')
for child in cells_parent:
# print(child.tag, child.attrib)
# print('attrib=',child.attrib)
for circle in child: # two circles in each child: outer + nucleus
# circle.attrib={'cx': '1085.59','cy': '1225.24','fill': 'rgb(159,159,96)','r': '6.67717','stroke': 'rgb(159,159,96)','stroke-width': '0.5'}
# print(' --- cx,cy=',circle.attrib['cx'],circle.attrib['cy'])
xval = float(circle.attrib['cx'])
# map SVG coords into comp domain
# xval = (xval-self.svg_xmin)/self.svg_xrange * self.x_range + self.xmin
xval = xval/self.x_range * self.x_range + self.xmin
s = circle.attrib['fill']
# print("s=",s)
# print("type(s)=",type(s))
if (s[0:3] == "rgb"): # if an rgb string, e.g. "rgb(175,175,80)"
rgb = list(map(int, s[4:-1].split(",")))
rgb[:] = [x / 255. for x in rgb]
else: # otherwise, must be a color name
rgb_tuple = mplc.to_rgb(mplc.cnames[s]) # a tuple
rgb = [x for x in rgb_tuple]
# test for bogus x,y locations (rwh TODO: use max of domain?)
too_large_val = 10000.
if (np.fabs(xval) > too_large_val):
print("bogus xval=", xval)
break
yval = float(circle.attrib['cy'])
# yval = (yval - self.svg_xmin)/self.svg_xrange * self.y_range + self.ymin
yval = yval/self.y_range * self.y_range + self.ymin
if (np.fabs(yval) > too_large_val):
print("bogus xval=", xval)
break
rval = float(circle.attrib['r'])
# if (rgb[0] > rgb[1]):
# print(num_cells,rgb, rval)
xlist.append(xval)
ylist.append(yval)
rlist.append(rval)
rgb_list.append(rgb)
# For .svg files with cells that *have* a nucleus, there will be a 2nd
if (not self.show_nucleus):
#if (not self.show_nucleus):
break
num_cells += 1
# if num_cells > 3: # for debugging
# print(fname,': num_cells= ',num_cells," --- debug exit.")
# sys.exit(1)
# break
# print(fname,': num_cells= ',num_cells)
xvals = np.array(xlist)
yvals = np.array(ylist)
rvals = np.array(rlist)
rgbs = np.array(rgb_list)
# print("xvals[0:5]=",xvals[0:5])
# print("rvals[0:5]=",rvals[0:5])
# print("rvals.min, max=",rvals.min(),rvals.max())
# rwh - is this where I change size of render window?? (YES - yipeee!)
# plt.figure(figsize=(6, 6))
# plt.cla()
# if (self.substrates_toggle.value):
self.title_str += " (" + str(num_cells) + " agents)"
# title_str = " (" + str(num_cells) + " agents)"
# else:
# mins= round(int(float(root.find(".//current_time").text))) # TODO: check units = mins
# hrs = int(mins/60)
# days = int(hrs/24)
# title_str = '%dd, %dh, %dm' % (int(days),(hrs%24), mins - (hrs*60))
# plt.title(self.title_str)
self.ax0.set_title(self.title_str)
# plt.xlim(self.xmin, self.xmax)
# plt.ylim(self.ymin, self.ymax)
self.ax0.set_xlim(self.xmin, self.xmax)
self.ax0.set_ylim(self.ymin, self.ymax)
# self.ax0.colorbar(collection)
# plt.xlim(axes_min,axes_max)
# plt.ylim(axes_min,axes_max)
# plt.scatter(xvals,yvals, s=rvals*scale_radius, c=rgbs)
# TODO: make figsize a function of plot_size? What about non-square plots?
# self.fig = plt.figure(figsize=(9, 9))
# axx = plt.axes([0, 0.05, 0.9, 0.9]) # left, bottom, width, height
# axx = fig.gca()
# print('fig.dpi=',fig.dpi) # = 72
# im = ax.imshow(f.reshape(100,100), interpolation='nearest', cmap=cmap, extent=[0,20, 0,20])
# ax.xlim(axes_min,axes_max)
# ax.ylim(axes_min,axes_max)
# convert radii to radii in pixels
# ax1 = self.fig.gca()
# N = len(xvals)
# rr_pix = (ax1.transData.transform(np.vstack([rvals, rvals]).T) -
# ax1.transData.transform(np.vstack([np.zeros(N), np.zeros(N)]).T))
# rpix, _ = rr_pix.T
# markers_size = (144. * rpix / self.fig.dpi)**2 # = (2*rpix / fig.dpi * 72)**2
# markers_size = markers_size/4000000.
# print('max=',markers_size.max())
#rwh - temp fix - Ah, error only occurs when "edges" is toggled on
if (self.show_edge):
try:
# plt.scatter(xvals,yvals, s=markers_size, c=rgbs, edgecolor='black', linewidth=0.5)
self.circles(xvals,yvals, s=rvals, color=rgbs, alpha=self.alpha, edgecolor='black', linewidth=0.5)
# cell_circles = self.circles(xvals,yvals, s=rvals, color=rgbs, edgecolor='black', linewidth=0.5)
# plt.sci(cell_circles)
except (ValueError):
pass
else:
# plt.scatter(xvals,yvals, s=markers_size, c=rgbs)
self.circles(xvals,yvals, s=rvals, color=rgbs, alpha=self.alpha)
# im = ax.imshow(np.arange(100).reshape((10, 10))) # rwh: dummy, for future testing
# cbar = self.fig.colorbar(substrate_plot, ax=self.ax0)
# plt.colorbar(im, cax=cax)
# x = np.linspace(0, 2*np.pi, 100)
# y = np.sin(x**2)
# self.i_plot.update()
# self.ax1.plot(x, y)
# self.plot_analysis_data_0("time", ["assembled_virion"], 20)
# if (self.show_tracks):
# for key in self.trackd.keys():
# xtracks = self.trackd[key][:,0]
# ytracks = self.trackd[key][:,1]
# plt.plot(xtracks[0:frame],ytracks[0:frame], linewidth=5)
#---------------------------------------------------------------------------
# assume "frame" is cell frame #, unless Cells is togggled off, then it's the substrate frame #
# def plot_substrate(self, frame, grid):
def plot_substrate(self, frame):
# print("plot_substrate(): frame*self.substrate_delta_t = ",frame*self.substrate_delta_t)
# print("plot_substrate(): frame*self.svg_delta_t = ",frame*self.svg_delta_t)
# print("plot_substrate(): fig width: SVG+2D = ",self.figsize_width_svg + self.figsize_width_2Dplot) # 24
# print("plot_substrate(): fig width: substrate+2D = ",self.figsize_width_substrate + self.figsize_width_2Dplot) # 27
self.title_str = ''
# Recall:
# self.svg_delta_t = config_tab.svg_interval.value
# self.substrate_delta_t = config_tab.mcds_interval.value
# self.modulo = int(self.substrate_delta_t / self.svg_delta_t)
# self.therapy_activation_time = user_params_tab.therapy_activation_time.value
# print("plot_substrate(): pre_therapy: max svg, substrate frames = ",max_svg_frame_pre_therapy, max_substrate_frame_pre_therapy)
# Assume: # .svg files >= # substrate files
# if (self.cells_toggle.value):
if self.substrates_toggle.value:
# maybe only show 2nd plot if self.analysis_data_toggle is True
# if self.analysis_data_toggle.value: # substrates and 2D plots
if True: # substrates and 2D plots
# self.fig, (self.ax0, self.ax1) = plt.subplots(1, 2, figsize=(30, 12))
# self.fig, (self.ax0, self.ax1) = plt.subplots(1, 2, figsize=(self.figsize_width_substrate + self.figsize_width_2Dplot, self.figsize_height_substrate))
# self.fig, (self.ax0, self.ax1) = plt.subplots(1, 2, figsize=(31, self.figsize_height_substrate), gridspec_kw={'width_ratios': [1.35, 1]})
# self.fig, (self.ax0, self.ax1) = plt.subplots(1, 2, figsize=(31, 13), gridspec_kw={'width_ratios': [1.35, 1]})
self.fig, (self.ax0, self.ax1) = plt.subplots(1, 2, figsize=(34, 15), gridspec_kw={'width_ratios': [1.5, 1]})
self.ax1_lymph_TC = self.ax1.twinx()
self.ax1_lymph_TH2 = self.ax1.twinx()
# else: # substrates plot, but no 2D plot
# print('plot sub: sub w,h= ',self.figsize_width_substrate,self.figsize_height_substrate)
# self.fig, (self.ax0) = plt.subplots(1, 1, figsize=(self.figsize_width_substrate, self.figsize_height_substrate))
# # self.fig, (self.ax0) = plt.subplots(1, 1, figsize=(12, 12))
if (self.customized_output_freq and (frame > self.max_svg_frame_pre_therapy)):
self.substrate_frame = self.max_substrate_frame_pre_therapy + (frame - self.max_svg_frame_pre_therapy)
else:
self.substrate_frame = int(frame / self.modulo)
fname = "output%08d_microenvironment0.mat" % self.substrate_frame
xml_fname = "output%08d.xml" % self.substrate_frame
# fullname = output_dir_str + fname
# fullname = fname
full_fname = os.path.join(self.output_dir, fname)
# print("--- plot_substrate(): full_fname=",full_fname)
full_xml_fname = os.path.join(self.output_dir, xml_fname)
# self.output_dir = '.'
# if not os.path.isfile(fullname):
if not os.path.isfile(full_fname):
print("Once output files are generated, click the slider.") # No: output00000000_microenvironment0.mat
return
# tree = ET.parse(xml_fname)
tree = ET.parse(full_xml_fname)
xml_root = tree.getroot()
mins = round(int(float(xml_root.find(".//current_time").text))) # TODO: check units = mins
self.substrate_mins= round(int(float(xml_root.find(".//current_time").text))) # TODO: check units = mins
hrs = int(mins/60)
days = int(hrs/24)
self.title_str = 'substrate: %dd, %dh, %dm' % (int(days),(hrs%24), mins - (hrs*60))
# self.title_str = 'substrate: %dm' % (mins ) # rwh
info_dict = {}
scipy.io.loadmat(full_fname, info_dict)
M = info_dict['multiscale_microenvironment']
f = M[self.field_index, :] # 4=tumor cells field, 5=blood vessel density, 6=growth substrate
try:
xgrid = M[0, :].reshape(self.numy, self.numx)
ygrid = M[1, :].reshape(self.numy, self.numx)
except:
print("substrates.py: mismatched mesh size for reshape: numx,numy=",self.numx, self.numy)
pass
# xgrid = M[0, :].reshape(self.numy, self.numx)
# ygrid = M[1, :].reshape(self.numy, self.numx)
num_contours = 15
levels = MaxNLocator(nbins=num_contours).tick_values(self.colormap_min.value, self.colormap_max.value)
contour_ok = True
if (self.colormap_fixed_toggle.value):
try:
substrate_plot = self.ax0.contourf(xgrid, ygrid, M[self.field_index, :].reshape(self.numy, self.numx), levels=levels, extend='both', cmap=self.colormap_dd.value, fontsize=self.fontsize)
except:
contour_ok = False
# print('got error on contourf 1.')
else:
try:
substrate_plot = self.ax0.contourf(xgrid, ygrid, M[self.field_index, :].reshape(self.numy,self.numx), num_contours, cmap=self.colormap_dd.value)
except:
contour_ok = False
# print('got error on contourf 2.')
if (contour_ok):
self.ax0.set_title(self.title_str, fontsize=self.fontsize)
cbar = self.fig.colorbar(substrate_plot, ax=self.ax0)
cbar.ax.tick_params(labelsize=self.fontsize)
self.ax0.set_xlim(self.xmin, self.xmax)
self.ax0.set_ylim(self.ymin, self.ymax)
# Now plot the cells (possibly on top of the substrate)
if self.cells_toggle.value:
if not self.substrates_toggle.value:
# maybe only show 2nd plot if self.analysis_data_toggle is True
# self.fig, (self.ax0, self.ax1) = plt.subplots(1, 2, figsize=(self.figsize_width_svg*2, self.figsize_height_svg))
# self.fig, (self.ax0, self.ax1) = plt.subplots(1, 2, figsize=(24, 12))
# if self.analysis_data_toggle.value: # cells (SVG) and 2D plot (no substrate)
# if False: # cells (SVG) and 2D plot (no substrate)
# # self.fig, (self.ax0, self.ax1) = plt.subplots(1, 2, figsize=(20, 10))
# self.fig, (self.ax0, self.ax1) = plt.subplots(1, 2, figsize=(self.figsize_width_svg + self.figsize_width_2Dplot, self.figsize_height_svg))
if True: # cells (SVG), but no 2D plot (and no substrate)
# else: # cells (SVG), but no 2D plot (and no substrate)
# print('plot svg: svg w,h= ',self.figsize_width_svg,self.figsize_height_svg)
# self.fig, (self.ax0) = plt.subplots(1, 1, figsize=(self.figsize_width_svg, self.figsize_height_svg))
self.fig, (self.ax0, self.ax1) = plt.subplots(1, 2, figsize=(25, self.figsize_height_substrate), gridspec_kw={'width_ratios': [1.1, 1]})
self.ax1_lymph_TC = self.ax1.twinx()
self.ax1_lymph_TH2 = self.ax1.twinx()
self.svg_frame = frame
# print('plot_svg with frame=',self.svg_frame)
self.plot_svg(self.svg_frame)
# cbar = self.fig.colorbar(substrate_plot, ax=self.ax0)
if (self.analysis_data_toggle.value):
# if (self.substrate_frame > 0): # rwh: when to plot extra analysis (custom data)?
# print('analysis_data_toggle.value =',self.analysis_data_toggle.value )
# self.plot_analysis_data("time", ["assembled_virion"], -1)
# print('self.substrate_frame = ',self.substrate_frame)
self.substrate_frame = int(frame / self.modulo)
self.plot_analysis_data("time", ["assembled_virion"], self.substrate_frame)
else:
self.plot_empty_analysis_data() | [
"heiland@indiana.edu"
] | heiland@indiana.edu |
34adc483132c9caf762c7cacb07b61a87b41f63f | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /tNFTY9ggpTkeHvBaN_8.py | a1976203fc818883d4a044522788b64c44d4a5b8 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py |
def total_volume(*args):
sum = 0
for each_list in args:
mul=1
for num in each_list:
mul = mul*num
sum = sum + mul
return sum
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
128e264329fe51285f1f6f5e2510c05123196b45 | 46244bb6af145cb393846505f37bf576a8396aa0 | /leetcode/152.maximum_product_subarray.py | 0fc1bda3b921136bc8e52fbde9679d2808d39979 | [] | no_license | aoeuidht/homework | c4fabfb5f45dbef0874e9732c7d026a7f00e13dc | 49fb2a2f8a78227589da3e5ec82ea7844b36e0e7 | refs/heads/master | 2022-10-28T06:42:04.343618 | 2022-10-15T15:52:06 | 2022-10-15T15:52:06 | 18,726,877 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,379 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
class Solution:
# @param A, a list of integers
# @return an integer
def maxProduct(self, A):
al = len(A)
if al < 1:
return 0
elif al == 1:
return A[0]
max_product = A[0]
neg_lo, neg_hi = None, None
lo, hi = 0, None
_p = 0
for idx, a in enumerate(A):
if a == 0:
max_product = max(max_product, 0)
hi = idx - 1
if _p < 0:
max_product = self.calc_negative(A, lo, hi,
neg_lo, neg_hi,
_p, max_product)
lo = idx + 1
neg_lo, neg_hi = None, None
_p = 0
continue
if a < 0:
if neg_lo is None:
neg_lo = idx
else:
neg_hi = idx
_p = a if (_p == 0) else (_p * a)
max_product = _p if (_p > max_product) else max_product
hi = idx
if _p < 0:
max_product = self.calc_negative(A, lo, hi,
neg_lo, neg_hi,
_p, max_product)
return max_product
def calc_negative(self, A, lo, hi,
neg_lo, neg_hi,
p, max_product):
if hi == lo:
return max_product
elif (hi - lo) == 1:
return max(max_product, A[lo], A[hi])
print lo, hi, neg_lo, neg_hi, p, max_product
# try to remove the first part
__p = p
for i in range(neg_lo, -1, -1):
if A[i] != 0:
__p /= A[i]
else:
break
max_product = __p if (__p > max_product) else max_product
# try to remove the right part
neg_hi = neg_hi if not (neg_hi is None) else neg_lo
__p = p
for i in range(neg_hi, len(A)):
if A[i] != 0:
__p /= A[i]
else:
break
max_product = __p if (__p > max_product) else max_product
return max_product
if __name__ == '__main__':
s = Solution()
r = s.maxProduct(map(int, sys.argv[1].split(',')))
print r
| [
"sockpuppet.lea@gmail.com"
] | sockpuppet.lea@gmail.com |
ff897b58afc4069f5b6d6e1cdadf44950299b683 | dddd18198df381e61f5dd71985edd362d19001c4 | /djstell/reactor/models.py | ed65438e73460408a7b344e10df947a5ed0d71be | [] | no_license | nedbat/nedbatcom | 1b148e3cf30cff3c121dbe5412017e8a7f42a30a | d460a606c77ebf856557ccdee29dd27a69455896 | refs/heads/master | 2023-09-01T09:28:37.368419 | 2023-08-29T18:14:38 | 2023-08-29T18:14:38 | 7,422,053 | 8 | 6 | null | 2022-07-10T23:28:37 | 2013-01-03T11:52:06 | HTML | UTF-8 | Python | false | false | 1,753 | py | from django.conf import settings
from django.db import models
from django.urls import reverse
from .tools import md5
class Comment(models.Model):
entryid = models.CharField(max_length=40, db_index=True)
name = models.CharField(max_length=60)
email = models.CharField(max_length=100, null=True)
website = models.CharField(max_length=100, null=True)
posted = models.DateTimeField(db_index=True)
body = models.TextField()
notify = models.BooleanField()
def __str__(self):
return f"Comment from {self.name} on {self.entryid}"
def admin_url(self):
aurl = reverse(f'admin:{self._meta.app_label}_{self._meta.model_name}_change', args=(self.pk,))
return settings.EXT_BASE + aurl
def gravatar_url(self):
anum = int(md5(self.email, self.website)[:4], 16) % 282
email_hash = md5(self.email)
avhost = "https://nedbatchelder.com"
default_url = f"{avhost}/pix/avatar/a{anum}.jpg"
url = f"https://www.gravatar.com/avatar/{email_hash}.jpg?default={default_url}&size=80"
return url
def fixed_website(self):
"""Ancient comments might be missing http://, so add it."""
if self.website and "://" not in self.website:
return "http://" + self.website
else:
return self.website
class ReactorRouter:
def db_for_read(self, model, **hints):
if model._meta.app_label == "reactor":
return "reactor"
return None
db_for_write = db_for_read
def allow_relation(self, obj1, obj2, **hints):
return None
def allow_migrate(self, db, app_label, model_name=None, **hints):
if app_label == "reactor":
return db == "reactor"
return None
| [
"ned@nedbatchelder.com"
] | ned@nedbatchelder.com |
730a99c3692fb4c357608ee9bc85fb75f8fc6ec0 | 9141e27444128fa8474235f63225f8c121c42878 | /tests/extensions/test_registry.py | 0a0de60d4e5c7a1b0cfcc2229d21f243b13e32de | [
"BSD-3-Clause"
] | permissive | gc-ss/pyapp | 00a8ab7c517fe6613049640991e27880a00887fb | 1fa2651d8b42f6e28b0c33b2b4fd287affd3a88f | refs/heads/master | 2023-04-08T01:51:43.374810 | 2020-09-14T00:22:07 | 2020-09-14T00:22:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,257 | py | from unittest import mock
import pytest
from pyapp.extensions.registry import ExtensionDetail
from pyapp.extensions.registry import ExtensionEntryPoints
from pyapp.extensions.registry import ExtensionRegistry
from pyapp.extensions.registry import pkg_resources
from tests.sample_ext import SampleExtension
from tests.sample_ext_simple import SampleSimpleExtension
class TestExtensionDetail:
@pytest.fixture
def target(self):
return ExtensionDetail(
SampleExtension(), "SampleExtension", "Sample Extension", "1.1"
)
@pytest.fixture
def target_simple(self):
return ExtensionDetail(
SampleSimpleExtension(),
"SampleSimpleExtension",
"Sample Simple Extension",
"1.2",
)
def test_default_settings(self, target: ExtensionDetail):
assert target.default_settings == "tests.sample_ext.default_settings"
def test_default_settings__absolute(self, target_simple: ExtensionDetail):
assert target_simple.default_settings == "tests.sample_ext.default_settings"
def test_checks_module(self, target: ExtensionDetail):
assert target.checks_module == "tests.sample_ext.checks"
def test_checks_module__absolute(self, target_simple: ExtensionDetail):
assert target_simple.checks_module == "tests.sample_ext.checks"
def test_register_commands(self, target: ExtensionDetail):
target.register_commands("abc")
assert target.extension.register_commands_called == "abc"
def test_checks_module__not_defined(self, target_simple: ExtensionDetail):
target_simple.register_commands("abc")
assert target_simple.extension.register_commands_called is False
def test_ready(self, target: ExtensionDetail):
target.ready()
assert target.extension.ready_called is True
def test_ready__not_defined(self, target_simple: ExtensionDetail):
target_simple.ready()
assert target_simple.extension.ready_called is False
def _make_entry_point(name, project_name, version):
mock_entry_point = mock.Mock()
mock_entry_point.name = name
mock_entry_point.dist.project_name = project_name
mock_entry_point.dist.version = version
mock_entry_point.resolve.return_value = "ExtensionInstance"
return mock_entry_point
class TestExtensionEntryPoints:
@pytest.fixture
def patchentrypoints(self, monkeypatch):
entry_points = (
_make_entry_point("FooExtension", "foo-extension", "0.1.2"),
_make_entry_point("BarExtension", "bar-extension", "3.2.1"),
)
mock_iter_entry_points = mock.Mock(return_value=entry_points)
monkeypatch.setattr(pkg_resources, "iter_entry_points", mock_iter_entry_points)
@pytest.fixture
def target(self, patchentrypoints):
return ExtensionEntryPoints()
def test_entry_points(self, target: ExtensionEntryPoints):
actual = [ep.name for ep in target._entry_points()]
assert ["FooExtension", "BarExtension"] == actual
def test_entry_points__with_white_list(self, patchentrypoints):
target = ExtensionEntryPoints(white_list=("BarExtension",))
actual = [ep.name for ep in target._entry_points()]
assert ["BarExtension"] == actual
def test_extensions(self, target: ExtensionEntryPoints):
actual = list(target.extensions())
assert [
ExtensionDetail(
"ExtensionInstance", "FooExtension", "foo-extension", "0.1.2"
),
ExtensionDetail(
"ExtensionInstance", "BarExtension", "bar-extension", "3.2.1"
),
] == actual
def test_extensions__no_load(self, target: ExtensionEntryPoints):
actual = list(target.extensions(False))
assert [
ExtensionDetail(None, "FooExtension", "foo-extension", "0.1.2"),
ExtensionDetail(None, "BarExtension", "bar-extension", "3.2.1"),
] == actual
class TestExtensionRegistry:
@pytest.fixture
def target(self):
return ExtensionRegistry(
[
ExtensionDetail(
SampleExtension(), "SampleExtension", "Sample Extension", "1.1"
)
]
)
def test_load_from(self, target: ExtensionRegistry):
target.load_from(
[
ExtensionDetail(
SampleSimpleExtension(),
"SampleSimpleExtension",
"Sample Simple Extension",
"1.2",
)
]
)
assert len(target) == 2
def test_register_commands(self, target: ExtensionRegistry):
mock_extension = mock.Mock()
target.append(
ExtensionDetail(mock_extension, "MockExtension", "Mock Extension", "1.1")
)
target.register_commands("abc")
mock_extension.register_commands.assert_called_with("abc")
def test_ready(self, target: ExtensionRegistry):
mock_extension = mock.Mock()
target.append(
ExtensionDetail(mock_extension, "MockExtension", "Mock Extension", "1.1")
)
target.ready()
mock_extension.ready.assert_called()
| [
"tim@savage.company"
] | tim@savage.company |
e5d7ef0d7bdd476f442e4f2293516f3a52bbf4e6 | 8cde90833d402b3b946e1b77a3265624b61182f8 | /python/openapi_client/models/__init__.py | cf44cc20701851840ab88055a617bb102fba6bd6 | [
"MIT"
] | permissive | joycejade0810/mastercard-api-client-tutorial | 15830eb5c66b07f2a8934485843e7a2474c3a380 | 50eff62c91a33f2ab879152e42ddd345786c80d8 | refs/heads/main | 2023-07-10T21:39:17.661544 | 2021-08-17T10:58:17 | 2021-08-17T10:58:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,295 | py | # coding: utf-8
# flake8: noqa
"""
MDES for Merchants
The MDES APIs are designed as RPC style stateless web services where each API endpoint represents an operation to be performed. All request and response payloads are sent in the JSON (JavaScript Object Notation) data-interchange format. Each endpoint in the API specifies the HTTP Method used to access it. All strings in request and response objects are to be UTF-8 encoded. Each API URI includes the major and minor version of API that it conforms to. This will allow multiple concurrent versions of the API to be deployed simultaneously. <br> __Authentication__ Mastercard uses OAuth 1.0a with body hash extension for authenticating the API clients. This requires every request that you send to Mastercard to be signed with an RSA private key. A private-public RSA key pair must be generated consisting of: <br> 1 . A private key for the OAuth signature for API requests. It is recommended to keep the private key in a password-protected or hardware keystore. <br> 2. A public key is shared with Mastercard during the project setup process through either a certificate signing request (CSR) or the API Key Generator. Mastercard will use the public key to verify the OAuth signature that is provided on every API call.<br> An OAUTH1.0a signer library is available on [GitHub](https://github.com/Mastercard/oauth1-signer-java) <br> __Encryption__<br> All communications between Issuer web service and the Mastercard gateway is encrypted using TLS. <br> __Additional Encryption of Sensitive Data__ In addition to the OAuth authentication, when using MDES Digital Enablement Service, any PCI sensitive and all account holder Personally Identifiable Information (PII) data must be encrypted. This requirement applies to the API fields containing encryptedData. Sensitive data is encrypted using a symmetric session (one-time-use) key. The symmetric session key is then wrapped with an RSA Public Key supplied by Mastercard during API setup phase (the Customer Encryption Key). <br> Java Client Encryption Library available on [GitHub](https://github.com/Mastercard/client-encryption-java) # noqa: E501
The version of the OpenAPI document: 1.2.10
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
# import models into model package
from openapi_client.models.account_holder_data import AccountHolderData
from openapi_client.models.account_holder_data_outbound import AccountHolderDataOutbound
from openapi_client.models.asset_response_schema import AssetResponseSchema
from openapi_client.models.authentication_methods import AuthenticationMethods
from openapi_client.models.billing_address import BillingAddress
from openapi_client.models.card_account_data_inbound import CardAccountDataInbound
from openapi_client.models.card_account_data_outbound import CardAccountDataOutbound
from openapi_client.models.decisioning_data import DecisioningData
from openapi_client.models.delete_request_schema import DeleteRequestSchema
from openapi_client.models.delete_response_schema import DeleteResponseSchema
from openapi_client.models.encrypted_payload import EncryptedPayload
from openapi_client.models.encrypted_payload_transact import EncryptedPayloadTransact
from openapi_client.models.error import Error
from openapi_client.models.errors_response import ErrorsResponse
from openapi_client.models.funding_account_data import FundingAccountData
from openapi_client.models.funding_account_info import FundingAccountInfo
from openapi_client.models.funding_account_info_encrypted_payload import FundingAccountInfoEncryptedPayload
from openapi_client.models.gateway_error import GatewayError
from openapi_client.models.gateway_errors_response import GatewayErrorsResponse
from openapi_client.models.gateway_errors_schema import GatewayErrorsSchema
from openapi_client.models.get_task_status_request_schema import GetTaskStatusRequestSchema
from openapi_client.models.get_task_status_response_schema import GetTaskStatusResponseSchema
from openapi_client.models.get_token_request_schema import GetTokenRequestSchema
from openapi_client.models.get_token_response_schema import GetTokenResponseSchema
from openapi_client.models.media_content import MediaContent
from openapi_client.models.notify_token_encrypted_payload import NotifyTokenEncryptedPayload
from openapi_client.models.notify_token_updated_request_schema import NotifyTokenUpdatedRequestSchema
from openapi_client.models.notify_token_updated_response_schema import NotifyTokenUpdatedResponseSchema
from openapi_client.models.phone_number import PhoneNumber
from openapi_client.models.product_config import ProductConfig
from openapi_client.models.search_tokens_request_schema import SearchTokensRequestSchema
from openapi_client.models.search_tokens_response_schema import SearchTokensResponseSchema
from openapi_client.models.suspend_request_schema import SuspendRequestSchema
from openapi_client.models.suspend_response_schema import SuspendResponseSchema
from openapi_client.models.token import Token
from openapi_client.models.token_detail import TokenDetail
from openapi_client.models.token_detail_data import TokenDetailData
from openapi_client.models.token_detail_data_get_token_only import TokenDetailDataGetTokenOnly
from openapi_client.models.token_detail_data_par_only import TokenDetailDataPAROnly
from openapi_client.models.token_detail_get_token_only import TokenDetailGetTokenOnly
from openapi_client.models.token_detail_par_only import TokenDetailPAROnly
from openapi_client.models.token_for_lcm import TokenForLCM
from openapi_client.models.token_info import TokenInfo
from openapi_client.models.tokenize_request_schema import TokenizeRequestSchema
from openapi_client.models.tokenize_response_schema import TokenizeResponseSchema
from openapi_client.models.transact_encrypted_data import TransactEncryptedData
from openapi_client.models.transact_error import TransactError
from openapi_client.models.transact_request_schema import TransactRequestSchema
from openapi_client.models.transact_response_schema import TransactResponseSchema
from openapi_client.models.un_suspend_request_schema import UnSuspendRequestSchema
from openapi_client.models.un_suspend_response_schema import UnSuspendResponseSchema
| [
"7020500+ech0s7r@users.noreply.github.com"
] | 7020500+ech0s7r@users.noreply.github.com |
6ba578146ce74c3dceec8a819b6e7c89264220a3 | 23d962a8e36b4a58e63e15f3c61a88b537a80f6e | /test/unit/mongo_class/repsetcoll_ins_doc.py | 8a75acee80fa5504bdf2fc7ed82626d1c2633972 | [
"MIT"
] | permissive | deepcoder42/mongo-lib | 3a893d38edb3e03decff0cfbcbf29339026909f9 | fa2b65587ab88ee90c9d85f12dd642c6295e0d94 | refs/heads/master | 2023-06-14T10:10:12.032877 | 2021-07-13T15:22:17 | 2021-07-13T15:22:17 | 337,179,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,146 | py | #!/usr/bin/python
# Classification (U)
"""Program: repsetcoll_ins_doc.py
Description: Unit testing of RepSetColl.ins_doc in mongo_class.py.
Usage:
test/unit/mongo_class/repsetcoll_ins_doc.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
# Local
sys.path.append(os.getcwd())
import mongo_class
import version
__version__ = version.__version__
class InsDoc(object):
"""Class: InsDoc
Description: Class stub holder for RepSetColl class.
Methods:
__init__
insert_one
"""
def __init__(self):
"""Function: __init__
Description: Class intialization.
Arguments:
"""
self.doc = None
def insert_one(self, doc):
"""Function: insert_one
Description: Stub holder for RepSetColl.db_coll.insert_one method.
Arguments:
(input) doc -> Document
"""
self.doc = doc
return True
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_ins_doc
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.name = "Mongo_Server"
self.user = "mongo_user"
self.japd = "mongo_pd"
self.host = "host_server"
self.port = 27017
self.dbs = "test"
self.coll = None
self.db_auth = None
self.repset = "mongo_repset"
self.doc = {"Document"}
def test_ins_doc(self):
"""Function: test_ins_doc
Description: Test ins_doc method.
Arguments:
"""
mongo = mongo_class.RepSetColl(
self.name, self.user, self.japd, self.host, self.port,
repset=self.repset)
mongo.db_coll = InsDoc()
self.assertFalse(mongo.ins_doc(self.doc))
if __name__ == "__main__":
unittest.main()
| [
"deepcoder42@gmail.com"
] | deepcoder42@gmail.com |
648e57155207f80d8186777a0f154c09b450a3b0 | 0cf704d61352737ba7a7698043375446ee1f7b03 | /travis/waitdb | 1574aee0701962e71aab9108ede02fe2ebadeb5d | [
"BSD-2-Clause-Views"
] | permissive | experimentAccount0/c2cgeoportal | 4be327e7be6c39b631e4500118507d9a8823122e | 5f7f781cf18fe396d0e940bc3dc6a5f928e030cd | refs/heads/master | 2021-01-15T21:33:58.169936 | 2017-08-07T07:51:41 | 2017-08-07T07:51:41 | 99,875,202 | 0 | 0 | null | 2017-08-10T03:04:56 | 2017-08-10T03:04:55 | null | UTF-8 | Python | false | false | 401 | #!/usr/bin/python
import sys
import time
import sqlalchemy
sleep_time = 1
# wait for the DB to be UP
while True:
print("Waiting for the DB to be reachable")
try:
sqlalchemy.create_engine(sys.argv[1]).connect()
exit(0)
except Exception as e:
print(str(e))
print("Waiting for the DB to be reachable")
time.sleep(sleep_time)
sleep_time *= 2
| [
"stephane.brunner@camptocamp.com"
] | stephane.brunner@camptocamp.com | |
4d971e6b6f550716ebcddbf6d61b9b7b7ab4e0aa | 546b8c3e1b876aab272e587765951e8acd7b3122 | /irlc/ex00/listcomp.py | b68fff096aa522d77f88184abdb258b84db06642 | [] | no_license | natashanorsker/RL_snakes | 2b8a9da5dd1e794e832830ab64e57ab7d4b0d6c3 | be8c75d1aa7a5ba7a6af50a0a990a97b0242c49d | refs/heads/main | 2023-04-21T14:08:30.840757 | 2021-05-11T17:33:35 | 2021-05-11T17:33:35 | 358,572,447 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | """
This file may not be shared/redistributed without permission. Please read copyright notice in the git repo. If this file contains other copyright notices disregard this text.
"""
nums = [1, 2, 3, 4, 5, 6]
plusOneNums = [x + 1 for x in nums]
oddNums = [x for x in nums if x % 2 == 1]
print(oddNums)
oddNumsPlusOne = [x + 1 for x in nums if x % 2 == 1]
print(oddNumsPlusOne)
"""
Dictionary comprehension. We make a new dictionary where both the keys and values may be changed
"""
toy_cost = {'toy car': 10, 'skipping rope': 6, 'toy train': 20}
print(toy_cost)
double_cost = {k: 2*v for k, v in toy_cost.items()}
print(double_cost)
bad_toys = {"broken "+k: v//2 for k, v in toy_cost.items()}
print(bad_toys)
expensive_toys = {k: v for k, v in toy_cost.items() if v >= 10}
print(expensive_toys)
| [
"natashanorsker@gmail.com"
] | natashanorsker@gmail.com |
7673db1213b13fbf378ed98baa8e76b8dc967d4b | 74be814f7cd10d3c91a53460bd6698aa8bc95704 | /AcWing算法基础课/记忆化搜索/901. 滑雪.py | aa5469dd0aafe27d88af53c536c5fa9b8188db37 | [] | no_license | weiyuyan/LeetCode | 7202f7422bc3bef6bd35ea299550b51905401656 | 19db0e78826d3e3d27d2574abd9d461eb41458d1 | refs/heads/master | 2020-12-03T17:10:53.738507 | 2020-05-27T08:28:36 | 2020-05-27T08:28:36 | 231,402,839 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,204 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# author:ShidongDu time:2020/4/1
'''
给定一个R行C列的矩阵,表示一个矩形网格滑雪场。
矩阵中第 i 行第 j 列的点表示滑雪场的第 i 行第 j 列区域的高度。
一个人从滑雪场中的某个区域内出发,每次可以向上下左右任意一个方向滑动一个单位距离。
当然,一个人能够滑动到某相邻区域的前提是该区域的高度低于自己目前所在区域的高度。
下面给出一个矩阵作为例子:
1 2 3 4 5
16 17 18 19 6
15 24 25 20 7
14 23 22 21 8
13 12 11 10 9
在给定矩阵中,一条可行的滑行轨迹为24-17-2-1。
在给定矩阵中,最长的滑行轨迹为25-24-23-…-3-2-1,沿途共经过25个区域。
现在给定你一个二维矩阵表示滑雪场各区域的高度,请你找出在该滑雪场中能够完成的最长滑雪轨迹,并输出其长度(可经过最大区域数)。
输入格式
第一行包含两个整数R和C。
接下来R行,每行包含C个整数,表示完整的二维矩阵。
输出格式
输出一个整数,表示可完成的最长滑雪长度。
数据范围
1≤R,C≤300,
0≤矩阵中整数≤10000
输入样例:
5 5
1 2 3 4 5
16 17 18 19 6
15 24 25 20 7
14 23 22 21 8
13 12 11 10 9
输出样例:
25
'''
# 状态表示:①集合:f[i][j]所有从[i][j]开始滑的路径的集合 ②属性:max
# 状态计算:按照第一步往哪个方向滑,分成4类:向上、向下、向左、向右
# f[i][j]的最大值 == max(f[i][j]向上,f[i][j]向下,f[i][j]向左,f[i][j]向右)(前提是a[i][j]得大于它们)
# from typing import List
# class Solution:
# def iceSkating(self, R: int, C:int, a:List[List[int]]):
# dp = [[1 for _ in range(C+2)] for _ in range(R+2)]
# for i in range(1, R+1):
# for j in range(1, C+1):
# if a[i][j] > a[i][j+1]:
# dp[i][j] = max(dp[i][j], dp[i][j+1]+1)
# if a[i][j] > a[i][j-1]:
# dp[i][j] = max(dp[i][j], dp[i][j-1]+1)
# if a[i][j] > a[i+1][j]:
# dp[i][j] = max(dp[i][j], dp[i+1][j]+1)
# if a[i][j] > a[i-1][j]:
# dp[i][j] = max(dp[i][j], dp[i-1][j]+1)
# res = dp[1][1]
# for i in range(1, R+1):
# for j in range(1, C+1):
# if res<dp[i][j]:
# res=dp[i][j]
# return res
#
# if __name__ == '__main__':
# R, C = list(map(int, input().split()))
# a = [[100000 for _ in range(C+2)] for _ in range(R+2)]
# for r in range(1, R+1):
# a[r] = [100000] + list(map(int, input().split())) + [100000]
# solution = Solution()
# res = solution.iceSkating(R, C, a)
# print(res)
#
r,c = map(int, input().split())
arr = [[0 for i in range(c+1)] for j in range(r+1)]
for i in range(1, r+1):
in_li = list(map(int, input().split()))
for j in range(1, c+1):
arr[i][j] = in_li[j-1]
# 2. 初始化dp数组
dp = [[-1 for i in range(c+1)] for j in range(r+1)]
# 3.递归搜索
dx = [-1,0,1,0] # !!!技巧:遍历方格的上下左右四个方向的技巧,新建两个这样的数组
dy = [0,1,0,-1]
def dfs(i, j):
if dp[i][j]!=-1:
return dp[i][j]
dp[i][j] = 1
for d in range(4):
a = i+dx[d]
b = j+dy[d]
if a>=1 and a<=r and b>=1 and b<=c and arr[a][b]<arr[i][j]:
dp[i][j] = max(dp[i][j], dfs(a,b)+1)
return dp[i][j]
# def dfs(i,j):
# if dp[i][j]!=-1:
# return dp[i][j]
# dp[i][j] = 1 # 初始化dp[i][j]等于1,表示路劲只包含[i,j]元素,长度为1
# for d in range(4):
# a = i+dx[d]
# b = j+dy[d]
# if a>=1 and a<=r and b>=1 and b<=c and arr[a][b]<arr[i][j]: # !出错,最后要比较arr数组中移动一个位置前后的高度大小
# dp[i][j] = max(dp[i][j], dfs(a,b)+1)
# return dp[i][j]
res = 0
for i in range(1, r+1):
for j in range(1, c+1):
res = max(res, dfs(i, j))
print(res)
# res = 0
# for i in range(1, r+1):
# for j in range(1, c+1):
# res = max(res, dfs(i,j))
# print(res) | [
"244128764@qq.com"
] | 244128764@qq.com |
5bb0f1b8c1083657bb7ed66ce5158451716ad0f7 | 4ea675fdab4b7aab8b84fd8b6127ff452e4e4a8f | /dc_mosaic.py | 6ec9f6214c790cb63e89745bd668433d2386bb32 | [
"Apache-2.0"
] | permissive | v0lat1le/data_cube_utilities | f1819ed5686a92c0ca20a40972afb161600d2462 | 5015da91c4380925ce7920e18a7d78268a989048 | refs/heads/master | 2020-05-18T15:10:35.361663 | 2017-03-08T00:18:58 | 2017-03-08T00:18:58 | 84,257,036 | 0 | 0 | null | 2017-03-07T23:36:53 | 2017-03-07T23:36:53 | null | UTF-8 | Python | false | false | 8,063 | py |
# Copyright 2016 United States Government as represented by the Administrator
# of the National Aeronautics and Space Administration. All Rights Reserved.
#
# Portion of this code is Copyright Geoscience Australia, Licensed under the
# Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License
# at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# The CEOS 2 platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gdal, osr
import collections
import gc
import numpy as np
import xarray as xr
from datetime import datetime
import collections
from collections import OrderedDict
import datacube
from . import dc_utilities as utilities
# Author: KMF
# Creation date: 2016-06-14
# Modified by: AHDS
# Last modified date:
def create_mosaic(dataset_in, clean_mask=None, no_data=-9999, intermediate_product=None, **kwargs):
"""
Description:
Creates a most recent - oldest mosaic of the input dataset. If no clean mask is given,
the 'cf_mask' variable must be included in the input dataset, as it will be used
to create a clean mask
-----
Inputs:
dataset_in (xarray.Dataset) - dataset retrieved from the Data Cube; should contain
coordinates: time, latitude, longitude
variables: variables to be mosaicked
If user does not provide a clean_mask, dataset_in must also include the cf_mask
variable
Optional Inputs:
clean_mask (nd numpy array with dtype boolean) - true for values user considers clean;
if user does not provide a clean mask, one will be created using cfmask
no_data (int/float) - no data pixel value; default: -9999
Output:
dataset_out (xarray.Dataset) - mosaicked data with
coordinates: latitude, longitude
variables: same as dataset_in
"""
# Create clean_mask from cfmask if none given
if clean_mask is None:
cfmask = dataset_in.cf_mask
clean_mask = utilities.create_cfmask_clean_mask(cfmask)
##dataset_in = dataset_in.drop('cf_mask')
#masks data with clean_mask. all values that are clean_mask==False are set to nodata.
for key in list(dataset_in.data_vars):
dataset_in[key].values[np.invert(clean_mask)] = no_data
if intermediate_product is not None:
dataset_out = intermediate_product.copy(deep=True)
else:
dataset_out = None
time_slices = reversed(range(len(clean_mask))) if kwargs and kwargs['reverse_time'] else range(len(clean_mask))
for index in time_slices:
dataset_slice = dataset_in.isel(time=index).astype("int16").drop('time')
if dataset_out is None:
dataset_out = dataset_slice.copy(deep=True)
#clear out the params as they can't be written to nc.
dataset_out.attrs = OrderedDict()
else:
for key in list(dataset_in.data_vars):
dataset_out[key].values[dataset_out[key].values==-9999] = dataset_slice[key].values[dataset_out[key].values==-9999]
return dataset_out
def create_median_mosaic(dataset_in, clean_mask=None, no_data=-9999, intermediate_product=None, **kwargs):
"""
Description:
Method for calculating the median pixel value for a given dataset.
-----
Input:
dataset_in (xarray dataset) - the set of data with clouds and no data removed.
Optional Inputs:
no_data (int/float) - no data value.
"""
# Create clean_mask from cfmask if none given
if clean_mask is None:
cfmask = dataset_in.cf_mask
clean_mask = utilities.create_cfmask_clean_mask(cfmask)
#dataset_in = dataset_in.drop('cf_mask')
#required for np.nan
dataset_in = dataset_in.astype("float64")
for key in list(dataset_in.data_vars):
dataset_in[key].values[np.invert(clean_mask)] = no_data
dataset_out = dataset_in.isel(time=0).drop('time').copy(deep=True)
dataset_out.attrs = OrderedDict()
# Loop over every key.
for key in list(dataset_in.data_vars):
dataset_in[key].values[dataset_in[key].values==no_data] = np.nan
dataset_out[key].values = np.nanmedian(dataset_in[key].values, axis=0)
dataset_out[key].values[dataset_out[key].values==np.nan] = no_data
return dataset_out.astype('int16')
def create_max_ndvi_mosaic(dataset_in, clean_mask=None, no_data=-9999, intermediate_product=None, **kwargs):
"""
Description:
Method for calculating the pixel value for the max ndvi value.
-----
Input:
dataset_in (xarray dataset) - the set of data with clouds and no data removed.
Optional Inputs:
no_data (int/float) - no data value.
"""
# Create clean_mask from cfmask if none given
if clean_mask is None:
cfmask = dataset_in.cf_mask
clean_mask = utilities.create_cfmask_clean_mask(cfmask)
#dataset_in = dataset_in.drop('cf_mask')
for key in list(dataset_in.data_vars):
dataset_in[key].values[np.invert(clean_mask)] = no_data
if intermediate_product is not None:
dataset_out = intermediate_product.copy(deep=True)
else:
dataset_out = None
for timeslice in range(clean_mask.shape[0]):
dataset_slice = dataset_in.isel(time=timeslice).astype("float64").drop('time')
ndvi = (dataset_slice.nir - dataset_slice.red) / (dataset_slice.nir + dataset_slice.red)
ndvi.values[np.invert(clean_mask)[timeslice,::]] = -1000000000
dataset_slice['ndvi'] = ndvi
if dataset_out is None:
dataset_out = dataset_slice.copy(deep=True)
#clear out the params as they can't be written to nc.
dataset_out.attrs = OrderedDict()
else:
for key in list(dataset_slice.data_vars):
dataset_out[key].values[dataset_slice.ndvi.values > dataset_out.ndvi.values] = dataset_slice[key].values[dataset_slice.ndvi.values > dataset_out.ndvi.values]
return dataset_out
def create_min_ndvi_mosaic(dataset_in, clean_mask=None, no_data=-9999, intermediate_product=None, **kwargs):
"""
Description:
Method for calculating the pixel value for the min ndvi value.
-----
Input:
dataset_in (xarray dataset) - the set of data with clouds and no data removed.
Optional Inputs:
no_data (int/float) - no data value.
"""
# Create clean_mask from cfmask if none given
if clean_mask is None:
cfmask = dataset_in.cf_mask
clean_mask = utilities.create_cfmask_clean_mask(cfmask)
#dataset_in = dataset_in.drop('cf_mask')
for key in list(dataset_in.data_vars):
dataset_in[key].values[np.invert(clean_mask)] = no_data
if intermediate_product is not None:
dataset_out = intermediate_product.copy(deep=True)
else:
dataset_out = None
for timeslice in range(clean_mask.shape[0]):
dataset_slice = dataset_in.isel(time=timeslice).astype("float64").drop('time')
ndvi = (dataset_slice.nir - dataset_slice.red) / (dataset_slice.nir + dataset_slice.red)
ndvi.values[np.invert(clean_mask)[timeslice,::]] = 1000000000
dataset_slice['ndvi'] = ndvi
if dataset_out is None:
dataset_out = dataset_slice.copy(deep=True)
#clear out the params as they can't be written to nc.
dataset_out.attrs = OrderedDict()
else:
for key in list(dataset_slice.data_vars):
dataset_out[key].values[dataset_slice.ndvi.values < dataset_out.ndvi.values] = dataset_slice[key].values[dataset_slice.ndvi.values < dataset_out.ndvi.values]
return dataset_out
| [
"alfredo.h.delos_santos@ama-inc.com"
] | alfredo.h.delos_santos@ama-inc.com |
2b3195ba262ba0da86a95bc47374e0749c55102d | 083ca3df7dba08779976d02d848315f85c45bf75 | /BinaryTreePostorderTraversal3.py | 58cab01e0197a9503d8d6e5bbc2ec068195ac364 | [] | no_license | jiangshen95/UbuntuLeetCode | 6427ce4dc8d9f0f6e74475faced1bcaaa9fc9f94 | fa02b469344cf7c82510249fba9aa59ae0cb4cc0 | refs/heads/master | 2021-05-07T02:04:47.215580 | 2020-06-11T02:33:35 | 2020-06-11T02:33:35 | 110,397,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def postorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
postorder = []
if not root:
return postorder
stack = [root]
while stack:
cur = stack.pop()
postorder = [cur.val] + postorder
if cur.left:
stack.append(cur.left)
if cur.right:
stack.append(cur.right)
return postorder
if __name__=='__main__':
a = TreeNode(1)
b = TreeNode(2)
c = TreeNode(3)
d = TreeNode(4)
e = TreeNode(5)
a.left = b;
a.right = c;
b.left = d;
d.right = e;
solution = Solution()
print(solution.postorderTraversal(a))
| [
"jiangshen95@163.com"
] | jiangshen95@163.com |
664a8438d8355afbb35e1f4a504b4cf54c2ed6bd | 266947fd84eed629ed0c21f6d91134239512afd9 | /BeginnerContest_A/078.py | d4f876810de243df887f94455a3fc46497abefcc | [] | no_license | SkiMsyk/AtCoder | c86adeec4fa470ec14c1be7400c9fc8b3fb301cd | 8102b99cf0fb6d7fa304edb942d21cf7016cba7d | refs/heads/master | 2022-09-03T01:23:10.748038 | 2022-08-15T01:19:55 | 2022-08-15T01:19:55 | 239,656,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | X, Y = input().split()
d = {'A':1, 'B':2, 'C':3,
'D':4, 'E':5, 'F':6}
if d[X] < d[Y]:
print('<')
elif d[X] == d[Y]:
print('=')
else:
print('>')
| [
"sakaimasayuki@sakaimasayukinoMacBook-puro.local"
] | sakaimasayuki@sakaimasayukinoMacBook-puro.local |
061c123ad1b7b5a44b4b25024ccb37b6e4d35897 | 1f7c4413d1e03bb431fd3efd34127693b192c7f2 | /tests/actions/save_group.py | 45cdeccef99723d54170a5f520498d8b644866e8 | [
"MIT"
] | permissive | blueshed/blueshed-micro | 8c6f8f572561f09b18f48a7b8a0466d9531ad16b | d52b28aaabf0ac6d43594155561d6cb3fce64a80 | refs/heads/master | 2020-12-25T17:13:36.458520 | 2016-11-18T17:57:27 | 2016-11-18T17:57:27 | 58,062,960 | 0 | 0 | null | 2016-10-31T11:28:17 | 2016-05-04T15:26:17 | Python | UTF-8 | Python | false | false | 724 | py | from blueshed.micro.orm.orm_utils import serialize
from tests.actions import model
def save_group(context: 'micro_context',
name: str,
id: int=None):
'''
Adds a group to the database if
it is not already there, otherwise
it updates it.
'''
with context.session as session:
if id:
group = session.query(model.Group).get(id)
group.name = name
signal = "group-changed"
else:
group = model.Group(name=name)
signal = "group-added"
session.add(group)
session.commit()
result = serialize(group)
context.broadcast(signal, result)
return result
| [
"pete@blueshed.co.uk"
] | pete@blueshed.co.uk |
294e69324ddea04aaeb771cab56347d49297d63a | eb12c383dca56dcd4a1e362bc02588bbd6e2dda6 | /racecar-ws/build/racecar/racecar/catkin_generated/pkg.develspace.context.pc.py | 9069f579dfa2ee8ea219e891d4f79361ff3f0e4e | [] | no_license | 174high/race-car | 2e5b403900496753bb71d5297cc3d1177620622a | 9fa772a84fee1489e9b52670f07e30d54db35fe8 | refs/heads/master | 2020-03-20T08:32:48.984720 | 2018-06-14T05:48:56 | 2018-06-14T05:48:56 | 137,311,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "razor_imu_9dof;tf;tf2_ros;urg_node;joy;rosbag;rostopic;rviz;gmapping;hector_mapping;robot_pose_ekf".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "racecar"
PROJECT_SPACE_DIR = "/home/ubuntu/racecar-ws/devel"
PROJECT_VERSION = "0.0.1"
| [
"you@example.com"
] | you@example.com |
c00b80fb096539a331e5b7ea6a66d592b34a175c | e64e88c5561698555381d9ed48d17dadbbcd753b | /crimefiles/migrations/0058_auto_20200711_1856.py | c3e7241fa313325d5d4020271b65ce47d68acb8c | [] | no_license | shreyasharma98/MS336_TechDhronas | 393979e67c3d5bbf5ff7a4924c71a637cc1337ea | cb2580cb64de8a52b9d1ddc4225ab077415ade7e | refs/heads/master | 2022-11-30T21:37:16.275763 | 2020-08-17T17:07:21 | 2020-08-17T17:07:21 | 284,041,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | # Generated by Django 2.0 on 2020-07-11 13:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('crimefiles', '0057_auto_20200711_1814'),
]
operations = [
migrations.AlterField(
model_name='caseclose',
name='complaintid',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='crimefiles.Complaint'),
),
migrations.AlterField(
model_name='casestatus',
name='complaintid',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='crimefiles.Complaint'),
),
migrations.AlterField(
model_name='copstatus',
name='complaintid',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='crimefiles.Complaint'),
),
migrations.AlterField(
model_name='declinereason',
name='complaintid',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='crimefiles.Complaint'),
),
migrations.AlterField(
model_name='fir',
name='complaintid',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='crimefiles.Complaint'),
),
]
| [
"65544777+aakriti1435@users.noreply.github.com"
] | 65544777+aakriti1435@users.noreply.github.com |
9e045519004f73f6c2d4f4a1252bf2571792b29d | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/pybites/topics/DataFormats/79/community.py | 6f0da38f033e3bd7c46dae372d33bce22a7c7e20 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 772 | py | _______ c__
____ c.. _______ C..
_______ r__
CSV_URL 'https://bites-data.s3.us-east-2.amazonaws.com/community.csv'
___ get_csv
"""Use requests to download the csv and return the
decoded content"""
w__ r__.S.. __ s
download s.g.. ?
decoded_content download.c__.d.. utf-8
cr c__.reader(decoded_content.s.. , d.._',')
next(cr)
my_list l..(cr)
r.. my_list
___ create_user_bar_chart(content
"""Receives csv file (decoded) content and print a table of timezones
and their corresponding member counts in pluses to standard output
"""
counter C..(user[2] ___ user __ content)
___ tz __ s..(counter
print _*{tz: <20} | {"+"*counter[tz]}')
create_user_bar_chart(get_csv
#get_csv() | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
18bf9c0ccb23bcdc9920c3056bd3f9bf0ba9e519 | c8fbb8abd6f00a53607773aba706f2893861c50c | /5-html.py | 51cbaa634f58a2ea8395201091e1a0678808ca5b | [] | no_license | abelsonlive/intro-to-webscraping | f160e80678ac0a007b1f633bb3d8658fcfc35d77 | 27aaea56990dd9ccc45c4ca4ba93d49d3d73cec0 | refs/heads/master | 2016-09-05T08:51:45.877565 | 2015-02-11T01:32:55 | 2015-02-11T01:32:55 | 30,586,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | import requests
from bs4 import BeautifulSoup
url = 'http://google.com/search'
search_parameters = {
'q': 'hack the gibson'
}
response = requests.get(url, params=search_parameters)
soup = BeautifulSoup(response.content)
for search_result in soup.find_all('h3', {'class':'r'}):
anchor_tag = search_result.find('a')
raw_link = anchor_tag.attrs.get('href')
clean_link = raw_link.split('q=')[-1]
print clean_link
| [
"brianabelson@gmail.com"
] | brianabelson@gmail.com |
3a696fce02ad18bb6f0d805193e9a19fec2c84c5 | 7f4b1d5e9963d63dd45b31c6cad8ced70d823217 | /interview-prep/geeks_for_geeks/greedy/huffman_decoding.py | cab62acfcb088effd9506ec8cb43182cc73ee544 | [] | no_license | mdhatmaker/Misc-python | b8be239619788ed343eb55b24734782e227594dc | 92751ea44f4c1d0d4ba60f5a1bb9c0708123077b | refs/heads/master | 2023-08-24T05:23:44.938059 | 2023-08-09T08:30:12 | 2023-08-09T08:30:12 | 194,360,769 | 3 | 4 | null | 2022-12-27T15:19:06 | 2019-06-29T03:39:13 | Python | UTF-8 | Python | false | false | 4,322 | py | import sys
import heapq
# https://practice.geeksforgeeks.org/problems/huffman-decoding-1/1
# https://www.geeksforgeeks.org/huffman-coding-greedy-algo-3/
# Given an encoded string, your task is to print the decoded String.
###############################################################################
def decode(arr):
return -1
def decode_file(root, s): # struct MinHeapNode* root, string s)
ans = ""
curr = root # struct MinHeapNode* curr = root;
for i in range(len(s)):
if s[i] == '0': curr = curr.left
else:
curr = curr.right
if (not curr.left) and (not curr.right):
ans += curr.data
curr = root
return ans + '\0'
"""
Steps to build Huffman Tree
Input is an array of unique characters along with their frequency
of occurrences and output is Huffman Tree.
1. Create a leaf node for each unique character and build a min
heap of all leaf nodes. (Min Heap is used as a priority queue.
The value of frequency field is used to compare two nodes in min
heap. Initially, the least frequent character is at root)
2. Extract two nodes with the minimum frequency from the min heap.
3. Create a new internal node with a frequency equal to the sum of
the two nodes frequencies. Make the first extracted node as its
left child and the other extracted node as its right child. Add
this node to the min heap.
4. Repeat steps #2 and #3 until the heap contains only one node.
The remaining node is the root node and the tree is complete.
"""
class Node:
def __init__(self, data, freq):
self.data = data
self.freq = freq
self.left = None
self.right = None
def __repr__(self):
return f'{self.data}:{self.freq}'
def __lt__(self, other):
if other and self.freq < other.freq: return True
else:
return False
def __gt__(self, other):
if other and self.freq > other.freq: return True
else:
return False
def __eq__(self, other):
if other and self.freq == other.freq: return True
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def build_huffman_tree(arr):
minHeap = []
for i in range(0, len(arr), 2):
item = (int(arr[i+1]), arr[i])
heapq.heappush(minHeap, item)
#while minHeap:
# print(heapq.heappop(minHeap), end=' ')
#print()
#struct MinHeapNode *left, *right, *top;
# Step 1: Create a min heap of capacity equal to size. Initially, there are
# nodes equal to size.
#struct MinHeap* minHeap = createAndBuildMinHeap(data, freq, size);
# Iterate while size of heap doesn't become 1
while not len(minHeap) == 1: # isSizeOne(minHeap):
# Step 2: Extract the two minimum freq items from in heap
left = heapq.heappop(minHeap) # extractMin(minHeap);
right = heapq.heappop(minHeap) # extractMin(minHeap);
# Step 3: Create a new internal node with frequency equal to the sum of the
# two nodes frequencies. Make the two extracted node as left and right children
# of this new node. Add this node to the min heap.
# '$' is a special value for internal nodes, not used
item = (left[0]+right[0], '$') # (left.freq + right.freq, '$')
heapq.heappush(minHeap, item)
#top = newNode('$', left->freq + right->freq);
#top->left = left;
#top->right = right;
#insertMinHeap(minHeap, top);
# Step 4: The remaining node is the root node and the tree is complete.
return heapq.heappop(minHeap) # extractMin(minHeap);
###############################################################################
if __name__ == "__main__":
test_inputs = []
#test_inputs.append( ("abc", "abc") )
#test_inputs.append( ("geeksforgeeks", "geeksforgeeks") )
test_inputs.append( ("a 5 b 9 c 12 d 13 e 16 f 45", "") )
""" Run process on sample inputs
"""
for inputs, results in test_inputs:
arr = [s for s in inputs.split()]
print(f'{inputs}')
rv = build_huffman_tree(arr)
print(f"{rv} expected: {results}")
#minHeap = rv
#while minHeap:
# print(heapq.heappop(minHeap), end=' ')
#print()
| [
"hatmanmd@yahoo.com"
] | hatmanmd@yahoo.com |
be7a3abe18aa4a85e2d13bdc988a23498b3a315b | a31edaf4843ff669d459d3150a7eebcd24f7e579 | /BSI/crypto/rand.py | 0689fc72aa12a374349c2dcb4e0b772d57a1c50b | [] | no_license | Ginkooo/PolibudaCode | 1d57b68cd7f62feb933bc90342f6128ff991f35d | 583c2b07b6947df782a412f26b224efc5c9e4180 | refs/heads/master | 2021-01-11T01:17:09.642285 | 2018-12-13T23:50:00 | 2018-12-13T23:50:00 | 70,732,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | import subprocess
import binascii
from argparse import ArgumentParser
ap = ArgumentParser(description='Encrypt file using system entrophy')
ap.add_argument('algorithm', help='Algorithm to use (For full list of ciphertypes please see: man enc')
ap.add_argument('input_file', help='File to cipher')
ap.add_argument('output_file', help='Output file')
args = ap.parse_args()
rand_key = open('/dev/urandom', 'rb').read(14)
rand_iv = open('/dev/urandom', 'rb').read(14)
rand_key = binascii.hexlify(rand_key).decode('ascii')
rand_iv = binascii.hexlify(rand_iv).decode('ascii')
algo = args.algorithm
ifile = args.input_file
ofile = args.output_file
cmd = 'openssl enc -{} -e -in {} -out {} -K {} -iv {}'.format(algo, ifile, ofile, rand_key, rand_iv)
print(cmd)
subprocess.check_call(cmd, shell=True)
print('File enciphered with key:')
print(rand_key)
print('iv was:')
print(rand_iv)
| [
"piotr_czajka@outlook.com"
] | piotr_czajka@outlook.com |
085522d19e1404567e8c4300ce0fa4ea1ef34023 | 008ea0c503829f33840495373ad3d60794575af3 | /PYDayByDay/common/list.py | 6262e957b92bfdffbe3d3158524d64346d6e2d3c | [] | no_license | JyHu/PYStudy | 6515bea47ca6f80e336f3b6a7a14b1159fde872f | ec0855c414237bdd7d0cb28f79a81c02ccd52d45 | refs/heads/master | 2016-08-12T19:44:06.723361 | 2016-04-11T10:38:59 | 2016-04-11T10:38:59 | 45,384,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,591 | py | #coding = utf-8
__author__ = 'JinyouHU'
word = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
a = word[2]
print 'a is :' +a
b = word[1:3]
print 'b is :'
print b
c = word[:2]
print 'c is :'
print c
d = word[0:]
print 'd is :'
print d
e = word[:2] + word[2:]
print 'e is :'
print e
f = word[-1]
print 'f is :'
print f
g = word[-4:2]
print 'g is :'
print g
h = word[-2:]
print 'h is :'
print h
i = word[:-2]
print 'i is :'
print i
l = len(word)
print 'length of word is :' + str(l)
print 'add new element...'
word.append('h')
print word
'''
List(列表) 是 Python 中使用最频繁的数据类型。
列表可以完成大多数集合类的数据结构实现。它支持字符,数字,字符串甚至可以包含列表(所谓嵌套)。
列表用[ ]标识。是python最通用的复合数据类型。看这段代码就明白。
列表中的值得分割也可以用到变量[头下标:尾下标],就可以截取相应的列表,从左到右索引默认0开始的,从右到左索引默认-1开始,下标可以为空表示取到头或尾。
'''
####################################################
'''
Python元组
元组是另一个数据类型,类似于List(列表)。
元组用"()"标识。内部元素用逗号隔开。但是元素不能二次赋值,相当于只读列表。
'''
tuple = ( 'abcd', 786 , 2.23, 'john', 70.2 )
tinytuple = (123, 'john')
print list # 输出完整列表
print list[0] # 输出列表的第一个元素
print list[1:3] # 输出第二个至第三个的元素
print list[2:] # 输出从第三个开始至列表末尾的所有元素 | [
"auu.aug@gmail.com"
] | auu.aug@gmail.com |
0e6712631230a8a0169cf4d69e1955916c0c03ca | d6509602e7ac7c57949e7837b74317f46d1fa4c3 | /pystruct/learners/frankwolfe_ssvm.py | 0f6d48f068e39da3370d334633b4f14bf275ee62 | [
"BSD-2-Clause"
] | permissive | DerThorsten/pystruct | 4bf7050d603c67ef5476befa66d98b96f9259545 | efe231d7eddb705b1c4c6458e8fd60d79bf3eeb9 | refs/heads/master | 2021-01-15T16:52:55.460885 | 2013-08-21T21:47:36 | 2013-08-21T21:47:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,857 | py | ######################
# Authors:
# Xianghang Liu <xianghangliu@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3-clause
#
# Implements structured SVM as described in Joachims et. al.
# Cutting-Plane Training of Structural SVMs
from time import time
import numpy as np
from pystruct.learners.ssvm import BaseSSVM
from pystruct.utils import find_constraint
class FrankWolfeSSVM(BaseSSVM):
"""Structured SVM solver using Block-coordinate Frank-Wolfe.
This implementation is somewhat experimental. Use with care.
This implementation follows the paper:
Lacoste-Julien, Jaggi, Schmidt, Pletscher JMLR 2013
Block-Coordinage Frank-Wolfe Optimization for Structural SVMs
With batch_mode=False, this implements the online (block-coordinate)
version of the algorithm (BCFW)
BCFW is an attractive alternative to subgradient methods, as no
learning rate is needed and a duality gap guarantee is given.
Parameters
----------
model : StructuredModel
Object containing the model structure. Has to implement
`loss`, `inference` and `loss_augmented_inference`.
max_iter : int, default=1000
Maximum number of passes over dataset to find constraints.
C : float, default=1
Regularization parameter. Corresponds to 1 / (lambda * n_samples).
verbose : int
Verbosity.
n_jobs : int, default=1
Number of parallel processes. Currently only n_jobs=1 is supported.
show_loss_every : int, default=0
How often the training set loss should be computed.
Zero corresponds to never.
tol : float, default=1e-3
Convergence tolerance on the duality gap.
logger : logger object, default=None
Pystruct logger for storing the model or extracting additional
information.
batch_mode : boolean, default=False
Whether to use batch updates. Will slow down learning enormously.
line_search : boolean, default=True
Whether to compute the optimum step size in each step.
The line-search is done in closed form and cheap.
There is usually no reason to turn this off.
check_dual_every : int, default=10
How often the stopping criterion should be checked. Computing
the stopping criterion is as costly as doing one pass over the dataset,
so check_dual_every=1 will make learning twice as slow.
do_averaging : bool, default=True
Whether to use weight averaging as described in the reference paper.
Currently this is only supported in the block-coordinate version.
Attributes
----------
w : nd-array, shape=(model.size_psi,)
The learned weights of the SVM.
``loss_curve_`` : list of float
List of loss values if show_loss_every > 0.
``objective_curve_`` : list of float
Cutting plane objective after each pass through the dataset.
``primal_objective_curve_`` : list of float
Primal objective after each pass through the dataset.
``timestamps_`` : list of int
Total training time stored before each iteration.
"""
def __init__(self, model, max_iter=1000, C=1.0, verbose=0, n_jobs=1,
show_loss_every=0, logger=None, batch_mode=False,
line_search=True, check_dual_every=10, tol=.001,
do_averaging=True):
if n_jobs != 1:
raise ValueError("FrankWolfeSSVM does not support multiprocessing"
" yet. Ignoring n_jobs != 1.")
BaseSSVM.__init__(self, model, max_iter, C, verbose=verbose,
n_jobs=n_jobs, show_loss_every=show_loss_every,
logger=logger)
self.tol = tol
self.batch_mode = batch_mode
self.line_search = line_search
self.check_dual_every = check_dual_every
self.do_averaging = do_averaging
def _calc_dual_gap(self, X, Y, l):
n_samples = len(X)
psi_gt = self.model.batch_psi(X, Y, Y) # FIXME don't calculate this again
Y_hat = self.model.batch_loss_augmented_inference(X, Y, self.w,
relaxed=True)
dpsi = psi_gt - self.model.batch_psi(X, Y_hat)
ls = np.sum(self.model.batch_loss(Y, Y_hat))
ws = dpsi * self.C
l = l * n_samples * self.C
dual_val = -0.5 * np.sum(self.w ** 2) + l
w_diff = self.w - ws
dual_gap = w_diff.T.dot(self.w) - l + ls * self.C
primal_val = dual_val + dual_gap
self.primal_objective_curve_.append(primal_val)
self.objective_curve_.append(dual_val)
self.timestamps_.append(time() - self.timestamps_[0])
return dual_val, dual_gap, primal_val
def _frank_wolfe_batch(self, X, Y):
"""Batch Frank-Wolfe learning.
This is basically included for reference / comparision only,
as the block-coordinate version is much faster.
Compare Algorithm 2 in the reference paper.
"""
l = 0.0
n_samples = float(len(X))
psi_gt = self.model.batch_psi(X, Y, Y)
for k in xrange(self.max_iter):
Y_hat = self.model.batch_loss_augmented_inference(X, Y, self.w,
relaxed=True)
dpsi = psi_gt - self.model.batch_psi(X, Y_hat)
ls = np.mean(self.model.batch_loss(Y, Y_hat))
ws = dpsi * self.C
w_diff = self.w - ws
dual_gap = 1.0 / (self.C * n_samples) * w_diff.T.dot(self.w) - l + ls
# line search for gamma
if self.line_search:
eps = 1e-15
gamma = dual_gap / (np.sum(w_diff ** 2) / (self.C * n_samples) + eps)
gamma = max(0.0, min(1.0, gamma))
else:
gamma = 2.0 / (k + 2.0)
dual_val = -0.5 * np.sum(self.w ** 2) + l * (n_samples * self.C)
dual_gap_display = dual_gap * n_samples * self.C
primal_val = dual_val + dual_gap_display
self.primal_objective_curve_.append(primal_val)
self.objective_curve_.append(dual_val)
self.timestamps_.append(time() - self.timestamps_[0])
if self.verbose > 0:
print("k = %d, dual: %f, dual_gap: %f, primal: %f, gamma: %f"
% (k, dual_val, dual_gap_display, primal_val, gamma))
# update w and l
self.w = (1.0 - gamma) * self.w + gamma * ws
l = (1.0 - gamma) * l + gamma * ls
if dual_gap < self.tol:
return
def _frank_wolfe_bc(self, X, Y):
"""Block-Coordinate Frank-Wolfe learning.
Compare Algorithm 3 in the reference paper.
"""
n_samples = len(X)
w = self.w.copy()
w_mat = np.zeros((n_samples, self.model.size_psi))
l_mat = np.zeros(n_samples)
l_avg = 0.0
l = 0.0
k = 0
for p in xrange(self.max_iter):
if self.verbose > 0:
print("Iteration %d" % p)
for i in range(n_samples):
x, y = X[i], Y[i]
y_hat, delta_psi, slack, loss = find_constraint(self.model, x, y, w)
# ws and ls
ws = delta_psi * self.C
ls = loss / n_samples
# line search
if self.line_search:
eps = 1e-15
w_diff = w_mat[i] - ws
gamma = (w_diff.T.dot(w) - (self.C * n_samples)*(l_mat[i] - ls)) / (np.sum(w_diff ** 2) + eps)
gamma = max(0.0, min(1.0, gamma))
else:
gamma = 2.0 * n_samples / (k + 2.0 * n_samples)
w -= w_mat[i]
w_mat[i] = (1.0 - gamma) * w_mat[i] + gamma * ws
w += w_mat[i]
l -= l_mat[i]
l_mat[i] = (1.0 - gamma) * l_mat[i] + gamma * ls
l += l_mat[i]
if self.do_averaging:
rho = 2.0 / (k + 2.0)
self.w = (1.0 - rho) * self.w + rho * w
l_avg = (1.0 - rho) * l_avg + rho * l
else:
self.w = w
k += 1
if (self.check_dual_every != 0) and (p % self.check_dual_every == 0):
dual_val, dual_gap, primal_val = self._calc_dual_gap(X, Y, l)
if self.verbose > 0:
print("dual: %f, dual_gap: %f, primal: %f"
% (dual_val, dual_gap, primal_val))
if dual_gap < self.tol:
return
def fit(self, X, Y, constraints=None, initialize=True):
"""Learn parameters using (block-coordinate) Frank-Wolfe learning.
Parameters
----------
X : iterable
Traing instances. Contains the structured input objects.
No requirement on the particular form of entries of X is made.
Y : iterable
Training labels. Contains the strctured labels for inputs in X.
Needs to have the same length as X.
contraints : ignored
initialize : boolean, default=True
Whether to initialize the model for the data.
Leave this true except if you really know what you are doing.
"""
if initialize:
self.model.initialize(X, Y)
self.objective_curve_, self.primal_objective_curve_ = [], []
self.timestamps_ = [time()]
self.w = getattr(self, "w", np.zeros(self.model.size_psi))
try:
if self.batch_mode:
self._frank_wolfe_batch(X, Y)
else:
self._frank_wolfe_bc(X, Y)
except KeyboardInterrupt:
pass
return self
| [
"amueller@ais.uni-bonn.de"
] | amueller@ais.uni-bonn.de |
0d981572ba1a4d26a11b7a95c3f10693250d3020 | ac216a2cc36f91625e440247986ead2cd8cce350 | /packages/infra_libs/infra_libs/ts_mon/common/test/targets_test.py | 43e9246c2e49ae906e0fb13174e107561e482faa | [
"BSD-3-Clause"
] | permissive | xinghun61/infra | b77cdc566d9a63c5d97f9e30e8d589982b1678ab | b5d4783f99461438ca9e6a477535617fadab6ba3 | refs/heads/master | 2023-01-12T21:36:49.360274 | 2019-10-01T18:09:22 | 2019-10-01T18:09:22 | 212,168,656 | 2 | 1 | BSD-3-Clause | 2023-01-07T10:18:03 | 2019-10-01T18:22:44 | Python | UTF-8 | Python | false | false | 4,291 | py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from infra_libs.ts_mon.common import targets
from infra_libs.ts_mon.protos import metrics_pb2
class TargetTest(unittest.TestCase):
def setUp(self):
self.task0 = targets.TaskTarget('serv', 'job', 'reg', 'host', 0)
self.task1 = targets.TaskTarget('serv', 'job', 'reg', 'host', 0)
self.task2 = targets.TaskTarget('serv', 'job', 'reg', 'host', 1)
self.device0 = targets.DeviceTarget('reg', 'role', 'net', 'host0')
self.device1 = targets.DeviceTarget('reg', 'role', 'net', 'host0')
self.device2 = targets.DeviceTarget('reg', 'role', 'net', 'host1')
def test_eq(self):
self.assertTrue(self.task0 == self.task1)
self.assertTrue(self.device0 == self.device1)
self.assertFalse(self.task0 == self.task2)
self.assertFalse(self.device0 == self.device2)
self.assertFalse(self.task0 == self.device0)
def test_hash(self):
d = {}
d[self.task0] = 1
d[self.task1] = 2
d[self.task2] = 3
d[self.device0] = 4
d[self.device1] = 5
d[self.device2] = 6
self.assertEqual(2, d[self.task0])
self.assertEqual(2, d[self.task1])
self.assertEqual(3, d[self.task2])
self.assertEqual(5, d[self.device0])
self.assertEqual(5, d[self.device1])
self.assertEqual(6, d[self.device2])
class DeviceTargetTest(unittest.TestCase):
def test_populate_target_pb(self):
pb = metrics_pb2.MetricsCollection()
target = targets.DeviceTarget('reg', 'role', 'net', 'host')
target.populate_target_pb(pb)
self.assertEqual(pb.network_device.metro, 'reg')
self.assertEqual(pb.network_device.role, 'role')
self.assertEqual(pb.network_device.hostgroup, 'net')
self.assertEqual(pb.network_device.hostname, 'host')
self.assertEqual(pb.network_device.realm, 'ACQ_CHROME')
self.assertEqual(pb.network_device.alertable, True)
def test_update_to_dict(self):
target = targets.DeviceTarget('reg', 'role', 'net', 'host')
self.assertEqual({
'region': 'reg',
'role': 'role',
'network': 'net',
'hostname': 'host'}, target.to_dict())
target.update({'region': 'other', 'hostname': 'guest'})
self.assertEqual({
'region': 'other',
'role': 'role',
'network': 'net',
'hostname': 'guest'}, target.to_dict())
def test_update_private_field(self):
target = targets.DeviceTarget('reg', 'role', 'net', 'host')
with self.assertRaises(AttributeError):
target.update({'realm': 'boo'})
def test_update_nonexistent_field(self):
target = targets.DeviceTarget('reg', 'role', 'net', 'host')
# Simulate a bug: exporting a non-existent field.
target._fields += ('bad',)
with self.assertRaises(AttributeError):
target.update({'bad': 'boo'})
class TaskTargetTest(unittest.TestCase):
def test_populate_target_pb(self):
pb = metrics_pb2.MetricsCollection()
target = targets.TaskTarget('serv', 'job', 'reg', 'host')
target.populate_target_pb(pb)
self.assertEqual(pb.task.service_name, 'serv')
self.assertEqual(pb.task.job_name, 'job')
self.assertEqual(pb.task.data_center, 'reg')
self.assertEqual(pb.task.host_name, 'host')
self.assertEqual(pb.task.task_num, 0)
def test_update_to_dict(self):
target = targets.TaskTarget('serv', 'job', 'reg', 'host', 5)
self.assertEqual({
'service_name': 'serv',
'job_name': 'job',
'region': 'reg',
'hostname': 'host',
'task_num': 5}, target.to_dict())
target.update({'region': 'other', 'hostname': 'guest'})
self.assertEqual({
'service_name': 'serv',
'job_name': 'job',
'region': 'other',
'hostname': 'guest',
'task_num': 5}, target.to_dict())
def test_update_private_field(self):
target = targets.TaskTarget('serv', 'job', 'reg', 'host')
with self.assertRaises(AttributeError):
target.update({'realm': 'boo'})
def test_update_nonexistent_field(self):
target = targets.TaskTarget('serv', 'job', 'reg', 'host')
# Simulate a bug: exporting a non-existent field.
target._fields += ('bad',)
with self.assertRaises(AttributeError):
target.update({'bad': 'boo'})
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
aa78a93cfcec310c321ca4d89d37d1d191768dec | 4a3dcb0cd8b271ca4e54eac077c286bfec399ff0 | /graphstats/embed/ase.py | d691563dac3fb5de1feec978498fea21de797f39 | [
"Apache-2.0"
] | permissive | tpsatish95/graspy | b5e42faccbb2cc0bdd607f8229e1cf83c9060530 | 3360e217a3f4200381a805c2b95b5de282d146ac | refs/heads/master | 2020-03-30T04:14:07.606769 | 2018-09-26T10:41:13 | 2018-09-26T10:41:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,490 | py |
# ase.py
# Created by Ben Pedigo on 2018-09-15.
# Email: bpedigo@jhu.edu
# Adapted from Disa Mhembere
from embed import BaseEmbed
from utils import import_graph, check_square
from svd import SelectSVD
from sklearn.decomposition import TruncatedSVD
import numpy as np
class ASEEmbedder(BaseEmbed):
"""
Class for computing the adjacency spectral embedding of a graph
"""
def __init__(self, n_components=2, eig_scale=0.5):
"""
Adjacency spectral embeding of a graph
Parameters
----------
n_components: int, optional (defaults None)
Number of embedding dimensions. If unspecified, uses graphstats.dimselect
"""
super.__init__(n_components=n_components, eig_scale=eig_scale)
def _reduce_dim(self, A):
if self.n_components == None:
tsvd = SelectSVD() #TODO other parameters here?
else:
tsvd = TruncatedSVD(n_components = min(self.n_components, A.shape[0] - 1))
tsvd.fit(A)
eig_vectors = tsvd.components_.T
eig_values = tsvd.singular_values_
#X_hat = eig_vectors[:, :A.shape[1]].copy() what was the point of this in original code
embedding = eig_vectors.dot(np.diag(eig_values**self.eig_scale))
return embedding
def fit(self, graph):
A = import_graph(graph)
check_square(A)
self.embedding = self._reduce_dim(A)
return self
| [
"benjamindpedigo@gmail.com"
] | benjamindpedigo@gmail.com |
f92bef4524810441c31b2e1f8fe242bfa96e8225 | 83d9b630bdc5535d67e35d69768b4d41fc4c9653 | /assignment1/assignment1/cs231n/gradient_check.py | 31257e96b4cf83c96ae458e56c1b435f19990bfc | [] | no_license | chenshaobin/assignment_CS231n | 2c8820f748fca6fb41cdb272a81c940f8c0a0e52 | 132c670d22dd37e6b4c1bd9da944c1f62a639d64 | refs/heads/main | 2022-12-30T21:05:12.500255 | 2020-10-18T04:49:40 | 2020-10-18T04:49:40 | 301,309,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,041 | py | from __future__ import print_function
from builtins import range
from past.builtins import xrange
import numpy as np
from random import randrange
def eval_numerical_gradient(f, x, verbose=True, h=0.00001):
"""
a naive implementation of numerical gradient of f at x
- f should be a function that takes a single argument
- x is the point (numpy array) to evaluate the gradient at
"""
# 损失函数对各参数的导数
fx = f(x) # evaluate function value at original point
grad = np.zeros_like(x)
# iterate over all indexes in x
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
# evaluate function at x+h
ix = it.multi_index
oldval = x[ix]
x[ix] = oldval + h # increment by h
fxph = f(x) # evalute f(x + h)
x[ix] = oldval - h
fxmh = f(x) # evaluate f(x - h)
x[ix] = oldval # restore
# compute the partial derivative with centered formula
grad[ix] = (fxph - fxmh) / (2 * h) # the slope
if verbose:
print(ix, grad[ix])
it.iternext() # step to next dimension
return grad
def eval_numerical_gradient_array(f, x, df, h=1e-5):
"""
Evaluate a numeric gradient for a function that accepts a numpy
array and returns a numpy array.
"""
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
oldval = x[ix]
x[ix] = oldval + h
pos = f(x).copy()
x[ix] = oldval - h
neg = f(x).copy()
x[ix] = oldval
grad[ix] = np.sum((pos - neg) * df) / (2 * h)
it.iternext()
return grad
def eval_numerical_gradient_blobs(f, inputs, output, h=1e-5):
"""
Compute numeric gradients for a function that operates on input
and output blobs.
We assume that f accepts several input blobs as arguments, followed by a
blob where outputs will be written. For example, f might be called like:
f(x, w, out)
where x and w are input Blobs, and the result of f will be written to out.
Inputs:
- f: function
- inputs: tuple of input blobs
- output: output blob
- h: step size
"""
numeric_diffs = []
for input_blob in inputs:
diff = np.zeros_like(input_blob.diffs)
it = np.nditer(input_blob.vals, flags=['multi_index'],
op_flags=['readwrite'])
while not it.finished:
idx = it.multi_index
orig = input_blob.vals[idx]
input_blob.vals[idx] = orig + h
f(*(inputs + (output,)))
pos = np.copy(output.vals)
input_blob.vals[idx] = orig - h
f(*(inputs + (output,)))
neg = np.copy(output.vals)
input_blob.vals[idx] = orig
diff[idx] = np.sum((pos - neg) * output.diffs) / (2.0 * h)
it.iternext()
numeric_diffs.append(diff)
return numeric_diffs
def eval_numerical_gradient_net(net, inputs, output, h=1e-5):
return eval_numerical_gradient_blobs(lambda *args: net.forward(),
inputs, output, h=h)
def grad_check_sparse(f, x, analytic_grad, num_checks=10, h=1e-5):
"""
sample a few random elements and only return numerical
in this dimensions.
"""
for i in range(num_checks):
ix = tuple([randrange(m) for m in x.shape])
# print('ix:', ix)
oldval = x[ix]
x[ix] = oldval + h # increment by h
fxph = f(x) # evaluate f(x + h)
x[ix] = oldval - h # increment by h
fxmh = f(x) # evaluate f(x - h)
x[ix] = oldval # reset
grad_numerical = (fxph - fxmh) / (2 * h)
grad_analytic = analytic_grad[ix]
rel_error = (abs(grad_numerical - grad_analytic) /
(abs(grad_numerical) + abs(grad_analytic)))
print('numerical: %f analytic: %f, relative error: %e'
%(grad_numerical, grad_analytic, rel_error))
| [
"13531194616@163.com"
] | 13531194616@163.com |
407444905ce70a6a84bff0d87d2fa26891342069 | 4a9995871447a406a7e6307a030503700cd41226 | /SRC/tool/project/ui_designer/dirCompare.py | 5236a57b2e5db9d7e49289cba144b877513cef52 | [] | no_license | juntaoh1234/12122003 | 96a107ce22d930e8d9517810736d8f6ce92dc7ad | 4bee39286c3708d7a0df3001e0daa9da51478170 | refs/heads/master | 2020-10-01T18:20:01.572599 | 2019-12-12T12:04:08 | 2019-12-12T12:04:08 | 227,596,967 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,043 | py | # -*- coding: utf-8 -*-
"""
Module implementing DirCompare.
"""
import filecmp
import os
from PyQt5.QtCore import pyqtSlot
from PyQt5 import QtGui, QtCore
from PyQt5.QtWidgets import QDialog, QTreeWidgetItem, QMessageBox
from SRC import settings
from SRC.common.fileHelper import copyFile, pathJoin
from SRC.tool.common.widgetHelper import openDirectoryDialog, qMessageBoxQuestion
from SRC.tool.project.ui_designer.Ui_dirCompare import Ui_Dialog
class DirCompare(QDialog, Ui_Dialog):
"""
Class documentation goes here.
"""
def __init__(self, parent=None,projectDir=None):
"""
Constructor
@param parent reference to the parent widget
@type QWidget
"""
super(DirCompare, self).__init__(parent)
self.setupUi(self)
self._translate = QtCore.QCoreApplication.translate
self.pushButton_replace.setEnabled(False)
self.treeWidget_local.nodesDict = {} # 本地树节点字典 “相对路径”:“节点对象”
self.treeWidget_load.nodesDict = {} # 下载树节点字典
self.COMMON, self.DIFF_FILE, self.ONLY = range(3) # 类型
self.toggle = True
self.perColors = [(85, 0, 127)] # 目录颜色
self.initLocalDirectory(projectDir)
# {'path':[item,'type']}
def initLocalDirectory(self,projectDir):
if not projectDir:
projectDir = self.getRootDir(settings.SRC_DIR)
localTestCaseDir = pathJoin(projectDir, settings.TESTCASE['testCaseDir'])[:-1]
self.setControlData(localTestCaseDir, self.treeWidget_local, self.lineEdit_local)
def getRootDir(self, relativePath):
'''
项目根目录
:return:
'''
base_dir = os.path.dirname(os.path.abspath(__file__))
base_dir = str(base_dir)
base_dir = base_dir.replace('\\', '/')
return base_dir.split(relativePath)[0]
def setControlData(self, dirPath, treeWidget, lineEdit):
if dirPath:
lineEdit.setText(dirPath)
self.createTree(treeWidget, dirPath.replace('/', '\\'))
@pyqtSlot()
def on_pushButton_open1_clicked(self):
fileDir = openDirectoryDialog(self, '打开本地目录')
self.setControlData(fileDir, self.treeWidget_local, self.lineEdit_local)
@pyqtSlot()
def on_pushButton_open2_clicked(self):
fileDir = openDirectoryDialog(self, '打开下载目录')
self.setControlData(fileDir, self.treeWidget_load, self.lineEdit_load)
@pyqtSlot()
def on_pushButton_cmp_clicked(self):
f1 = self.lineEdit_local.text().strip()
f2 = self.lineEdit_load.text().strip()
if not f1 or not f2:
QMessageBox.warning(self, '错误', '路径不能为空')
return
if not os.path.exists(f1):
QMessageBox.warning(self, '错误', '本地文件夹不存在!')
return
if not os.path.exists(f2):
QMessageBox.warning(self, '错误', '下载文件夹不存在!')
return
self.refresh()
result = filecmp.dircmp(f1, f2)
result.report_full_closure()
try:
self.diguiCmpResult({'root': result, }) # 递归对比结果
self.pushButton_replace.setEnabled(True)
showResult = '对比完成,没有新文件!'
for node in self.treeWidget_load.nodesDict.values():
if node[1] != 0:
showResult = '<font color="red">对比完成,有新文件,是否处理?</font>'
break
self.label_result.setText(self._translate("Dialog", showResult))
except Exception as e:
print(e)
@pyqtSlot()
def on_pushButton_replace_clicked(self):
res = qMessageBoxQuestion(self, '替换文件', '要进行替换操作吗?')
if not res:
return
try:
for node in self.treeWidget_load.nodesDict.values():
sourcePath = os.path.join(self.treeWidget_load.rootPath, node[0].relativePath[1:]).replace('/', '\\')
if node[0].select and os.path.isfile(sourcePath):
destPath = os.path.join(self.treeWidget_local.rootPath, node[0].relativePath[1:]).replace('/', '\\')
dir = os.path.dirname(destPath)
if not os.path.exists(dir):
os.makedirs(dir)
isBak = True if self.checkBox_bak.checkState() == 2 else False
copyFile(sourcePath, destPath, isBak)
self.on_pushButton_cmp_clicked()
self.label_result.setText(self._translate("Dialog", '替换完成!'))
except Exception as e:
print(e)
@pyqtSlot(QTreeWidgetItem, int)
def on_treeWidget_load_itemDoubleClicked(self, item, column):
rootNode = self.treeWidget_load
path = os.path.join(rootNode.rootPath, item.relativePath[1:]).replace('/', '\\')
self.setActiveItem(path, rootNode, (85, 255, 0), (255, 255, 255))
@pyqtSlot(QTreeWidgetItem, int)
def on_treeWidget_local_itemDoubleClicked(self, item, column):
pass
@pyqtSlot()
def on_pushButton_toggle_clicked(self):
self.toggle = not self.toggle
if self.toggle:
self.treeWidget_load.expandAll()
self.treeWidget_local.expandAll()
else:
self.treeWidget_load.collapseAll()
self.treeWidget_local.collapseAll()
def createTree(self, rootNode, rootPath):
try:
filesList = os.listdir(rootPath)
rootNode.clear()
rootNode.nodesDict.clear()
rootNode.select = False
rootNode.rootPath = rootPath # 文件名
rootNode.relativePath = '\\' # 根目录相对路径
self.diguiList(filesList, rootNode, rootPath, rootNode)
rootNode.expandAll()
except Exception as e:
print(e)
def diguiList(self, list, parentNode, dirPath, rootNode=None):
for file in list:
path = os.path.join(dirPath, file)
if os.path.exists(path):
node = self.addItemToView(parentNode, file, rootNode) # 添加节点
if os.path.isdir(path):
self.setFrColorForTreeItem(node, self.perColors[0]) # 设置目录的颜色
filesList = os.listdir(path)
self.diguiList(filesList, node, path, rootNode)
elif os.path.isfile(path):
pass
def addItemToView(self, parentNode, text, rootNode, bgColor=(255, 255, 255), frColor=(0, 0, 0)):
item = QTreeWidgetItem(parentNode) # 创建节点
self.setBgColorForTreeItem(item, bgColor)
self.setFrColorForTreeItem(item, frColor)
self.setTextForTreeItem(item, text)
item.select = False
item.relativePath = os.path.join(parentNode.relativePath, text) # 相对路径
rootNode.nodesDict[item.relativePath] = [item, self.COMMON] # 讲节点加入节点字典
return item
def setBgColorForTreeItem(self, node, color):
brush = QtGui.QBrush(QtGui.QColor(color[0], color[1], color[2])) # 创建颜色刷
brush.setStyle(QtCore.Qt.Dense4Pattern)
node.setBackground(0, brush)
def setFrColorForTreeItem(self, node, color):
brush = QtGui.QBrush(QtGui.QColor(color[0], color[1], color[2]))
brush.setStyle(QtCore.Qt.NoBrush)
node.setForeground(0, brush)
def setTextForTreeItem(self, node, text, window="Dialog"):
node.setText(0, self._translate(window, text))
def refresh(self):
self.createTree(self.treeWidget_local, self.lineEdit_local.text().replace('/', '\\'))
self.createTree(self.treeWidget_load, self.lineEdit_load.text().replace('/', '\\'))
def diguiCmpResult(self, subDirs):
if len(subDirs) == 0:
return
for cmpDir in subDirs.values():
if len(cmpDir.diff_files) > 0:
# 处理本地和下载不同内容的文件
for file in cmpDir.diff_files:
pathLeft = os.path.join(cmpDir.left, file).replace('/', '\\')
pathRight = os.path.join(cmpDir.right, file).replace('/', '\\')
self.updateNode(pathLeft, self.treeWidget_local, self.DIFF_FILE, (255, 0, 0), (255, 255, 255))
self.updateNode(pathRight, self.treeWidget_load, self.DIFF_FILE, (255, 0, 0), (85, 255, 0))
if len(cmpDir.left_only) > 0:
# 处理只有本地目录存在的文件(或目录)
for d in cmpDir.left_only:
path = os.path.join(cmpDir.left, d).replace('/', '\\')
self.updateNode(path, self.treeWidget_local, self.ONLY, (255, 85, 0), (255, 255, 255))
if len(cmpDir.right_only) > 0:
# 处理只有下载目录存在的文件(或目录)
for d in cmpDir.right_only:
path = os.path.join(cmpDir.right, d).replace('/', '\\')
self.updateNode(path, self.treeWidget_load, self.ONLY, (255, 85, 255), (85, 255, 0))
self.diguiCmpResult(cmpDir.subdirs)
def updateNode(self, path, rootNode, status, frColor, bgColor):
node = rootNode.nodesDict.get(path.split(rootNode.rootPath)[1])
self.setFrColorForTreeItem(node[0], frColor)
self.setBgColorForTreeItem(node[0], bgColor)
node[1] = status
node[0].select = True
if os.path.isdir(path):
filesList = os.listdir(path)
for file in filesList:
self.updateNode(os.path.join(path, file).replace('/', '\\'), rootNode, status, frColor, bgColor)
def setActiveItem(self, path, rootNode, selectColor, notSelectColor):
node = rootNode.nodesDict.get(path.split(rootNode.rootPath)[1])
node[0].select = not node[0].select
if node[0].select:
self.setBgColorForTreeItem(node[0], selectColor)
else:
self.setBgColorForTreeItem(node[0], notSelectColor)
if os.path.isdir(path):
filesList = os.listdir(path)
for file in filesList:
self.setActiveItem(os.path.join(path, file).replace('/', '\\'), rootNode, selectColor, notSelectColor)
if __name__ == "__main__":
import sys
from PyQt5.QtWidgets import QApplication
app = QApplication(sys.argv)
dlg = DirCompare()
dlg.show()
sys.exit(app.exec_())
| [
"1341890679@qq.com"
] | 1341890679@qq.com |
2310f6ba6d69d7d143a9f93b55954ca5c691f398 | 5d06a33d3685a6f255194b13fd2e615e38d68850 | /tests/opytimark/utils/test_constants.py | ee4d47bdb169e5811cb942a7bbb70115af9b72a6 | [
"Apache-2.0"
] | permissive | sarikoudis/opytimark | 617a59eafaabab5e67bd4040473a99f963df7788 | cad25623f23ce4b509d59381cf7bd79e41a966b6 | refs/heads/master | 2023-07-24T04:19:55.869169 | 2021-09-03T13:09:45 | 2021-09-03T13:09:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | import sys
from opytimark.utils import constants
def test_constants():
assert constants.DATA_FOLDER == 'data/'
assert constants.EPSILON == 1e-32
assert constants.FLOAT_MAX == sys.float_info.max
| [
"gth.rosa@uol.com.br"
] | gth.rosa@uol.com.br |
dfdbbbdf80ff3a131f9a789153624a55f21f9c20 | aa4b80cf7e7ac0028d0c7f67ade982d9b740a38b | /python/list/list_max.py | 0272716fe25776fbf18ced846eea450e85342060 | [] | no_license | ratularora/python_code | 9ac82492b8dc2e0bc2d96ba6df6fdc9f8752d322 | ddce847ba338a41b0b2fea8a36d49a61aa0a5b13 | refs/heads/master | 2021-01-19T04:34:22.038909 | 2017-09-27T08:14:45 | 2017-09-27T08:14:45 | 84,435,244 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | list1, list2 = [123, 565654, 'A','Z','gdgf'], [456, 700, 200]
print "Max value element : ", max(list1)
print "Max value element : ", max(list2)
| [
"arora.ratul@gmail.com"
] | arora.ratul@gmail.com |
ff455dd0b1d99aba94e9c35e313ed4aa46e522f1 | fb65b7c000642dca68c93ee85a87795b3f30fe21 | /Core_Python/pgm11.py | 652850cfd01fd7bd255185ef1e708fbf72d76138 | [] | no_license | toncysara17/luminarpythonprograms | f41b446251feba641e117d87ce235dc556086f8f | 17bc37c3f83c0e9792aaa8bccd901371a6413f14 | refs/heads/master | 2023-04-17T18:51:31.493118 | 2021-04-20T05:25:02 | 2021-04-20T05:25:02 | 358,550,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | #pgm to swap two variables
num1=5
num2=10
print("value before swapping")
print("Number1 is",num1)
print("Number2 is",num2)
#create a temporary variable and swap a value
temp=num1
num1=num2
num2=temp
print("Value after swapping")
print("Number1 is",num1)
print("Number2 is",num2) | [
"toncysara12@gmail.com"
] | toncysara12@gmail.com |
4f338929596b6be67843874be8412f875486b877 | 0e5291f09c5117504447cc8df683ca1506b70560 | /test/test_writable_tenant_group.py | 735b459c6d0339a0fcabd4fa29ee9bc034db512a | [
"MIT"
] | permissive | nrfta/python-netbox-client | abd0192b79aab912325485bf4e17777a21953c9b | 68ba6dd4d7306513dc1ad38f3ac59122ba4f70a8 | refs/heads/master | 2022-11-13T16:29:02.264187 | 2020-07-05T18:06:42 | 2020-07-05T18:06:42 | 277,121,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 887 | py | # coding: utf-8
"""
NetBox API
API to access NetBox # noqa: E501
OpenAPI spec version: 2.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import netbox_client
from netbox_client.models.writable_tenant_group import WritableTenantGroup # noqa: E501
from netbox_client.rest import ApiException
class TestWritableTenantGroup(unittest.TestCase):
"""WritableTenantGroup unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testWritableTenantGroup(self):
"""Test WritableTenantGroup"""
# FIXME: construct object with mandatory attributes with example values
# model = netbox_client.models.writable_tenant_group.WritableTenantGroup() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"67791576+underline-bot@users.noreply.github.com"
] | 67791576+underline-bot@users.noreply.github.com |
fb54501652083103c6154e7fb4d55f828df3fc3a | b0d5e423f09181a322a0166b06bf7fe45a3befc0 | /MetioTube/asgi.py | e2cd92f47887e46c1b286179c5713a337df86ce6 | [
"MIT"
] | permissive | Sheko1/MetioTube | f5da4184bb1590565ba34cef2fff02b379ab3e56 | c1c36d00ea46fc37cc7f3c0c9c0cae6e89b2113c | refs/heads/main | 2023-07-04T12:54:57.500778 | 2021-08-14T19:41:56 | 2021-08-14T19:41:56 | 383,907,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
ASGI config for MetioTube project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'MetioTube.settings')
application = get_asgi_application()
| [
"martinkypar@gmail.com"
] | martinkypar@gmail.com |
2d98fddc9cad55ca558182e05f59ea35a4c2354e | 7379db50aa0f0efe8d2e43e1d4b2b593fcd940e2 | /data_generators/chaotic_rnn.py | 28e9218c45f622eec7eff4bf650b8d9d96f12f17 | [] | no_license | snel-repo/demos | ea92f3f92c583a1cd6b634f6a52098a3d124bb95 | aeaaf7baaf24da1c19fae9788087db8beec5bb11 | refs/heads/master | 2022-05-23T15:17:46.689178 | 2020-04-29T23:14:19 | 2020-04-29T23:14:19 | 260,057,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,438 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from __future__ import print_function
import h5py
import numpy as np
import os
import tensorflow as tf # used for flags here
from utils import write_datasets
from synthetic_data_utils import add_alignment_projections, generate_data
from synthetic_data_utils import generate_rnn, get_train_n_valid_inds
from synthetic_data_utils import nparray_and_transpose
from synthetic_data_utils import spikify_data, gaussify_data, split_list_by_inds
#import matplotlib
#import matplotlib.pyplot as plt
import scipy.signal
#matplotlib.rcParams['image.interpolation'] = 'nearest'
DATA_DIR = "rnn_synth_data_v1.0"
flags = tf.app.flags
flags.DEFINE_string("save_dir", "/tmp/" + DATA_DIR + "/",
"Directory for saving data.")
flags.DEFINE_string("datafile_name", "thits_data",
"Name of data file for input case.")
flags.DEFINE_string("noise_type", "poisson", "Noise type for data.")
flags.DEFINE_integer("synth_data_seed", 5, "Random seed for RNN generation.")
flags.DEFINE_float("T", 1.0, "Time in seconds to generate.")
flags.DEFINE_integer("C", 100, "Number of conditions")
flags.DEFINE_integer("N", 50, "Number of units for the RNN")
flags.DEFINE_integer("S", 50, "Number of sampled units from RNN")
flags.DEFINE_integer("npcs", 10, "Number of PCS for multi-session case.")
flags.DEFINE_float("train_percentage", 4.0/5.0,
"Percentage of train vs validation trials")
flags.DEFINE_integer("nreplications", 40,
"Number of noise replications of the same underlying rates.")
flags.DEFINE_float("g", 1.5, "Complexity of dynamics")
flags.DEFINE_float("x0_std", 1.0,
"Volume from which to pull initial conditions (affects diversity of dynamics.")
flags.DEFINE_float("tau", 0.025, "Time constant of RNN")
flags.DEFINE_float("dt", 0.010, "Time bin")
flags.DEFINE_float("input_magnitude", 20.0,
"For the input case, what is the value of the input?")
flags.DEFINE_integer("ninputs", 0, "number of inputs")
flags.DEFINE_list("input_magnitude_list", "10,15,20,25,30", "Magnitudes for multiple inputs")
flags.DEFINE_float("max_firing_rate", 30.0, "Map 1.0 of RNN to a spikes per second")
flags.DEFINE_boolean("lorenz", False, "use lorenz system as generated inputs")
FLAGS = flags.FLAGS
# Note that with N small, (as it is 25 above), the finite size effects
# will have pretty dramatic effects on the dynamics of the random RNN.
# If you want more complex dynamics, you'll have to run the script a
# lot, or increase N (or g).
# Getting hard vs. easy data can be a little stochastic, so we set the seed.
# Pull out some commonly used parameters.
# These are user parameters (configuration)
rng = np.random.RandomState(seed=FLAGS.synth_data_seed)
T = FLAGS.T
C = FLAGS.C
N = FLAGS.N
S = FLAGS.S
input_magnitude = FLAGS.input_magnitude
input_magnitude_list = [float(i) for i in (FLAGS.input_magnitude_list)]
ninputs = FLAGS.ninputs
nreplications = FLAGS.nreplications
E = nreplications * C # total number of trials
# S is the number of measurements in each datasets, w/ each
# dataset having a different set of observations.
ndatasets = N/S # ok if rounded down
train_percentage = FLAGS.train_percentage
ntime_steps = int(T / FLAGS.dt)
# End of user parameters
lorenz=FLAGS.lorenz
if ninputs >= 1:
rnn = generate_rnn(rng, N, FLAGS.g, FLAGS.tau, FLAGS.dt, FLAGS.max_firing_rate, ninputs)
else:
rnn = generate_rnn(rng, N, FLAGS.g, FLAGS.tau, FLAGS.dt, FLAGS.max_firing_rate)
# Check to make sure the RNN is the one we used in the paper.
if N == 50:
assert abs(rnn['W'][0,0] - 0.06239899) < 1e-8, 'Error in random seed?'
rem_check = nreplications * train_percentage
assert abs(rem_check - int(rem_check)) < 1e-8, \
'Train percentage * nreplications should be integral number.'
#if lorenz:
# lorenz_input = generate_lorenz(ntime_steps, rng)
# rnn = generate_rnn(rng, N, FLAGS.g, FLAGS.tau, FLAGS.dt, FLAGS.max_firing_rate, lorenz_input)
# Initial condition generation, and condition label generation. This
# happens outside of the dataset loop, so that all datasets have the
# same conditions, which is similar to a neurophys setup.
condition_number = 0
x0s = []
condition_labels = []
print(FLAGS.x0_std)
for c in range(C):
x0 = FLAGS.x0_std * rng.randn(N, 1)
x0s.append(np.tile(x0, nreplications)) # replicate x0 nreplications times
# replicate the condition label nreplications times
for ns in range(nreplications):
condition_labels.append(condition_number)
condition_number += 1
x0s = np.concatenate(x0s, axis=1)
#print(x0s.shape)
#print(x0s[1,1:20])
# Containers for storing data across data.
datasets = {}
for n in range(ndatasets):
print(n+1, " of ", ndatasets)
# First generate all firing rates. in the next loop, generate all
# replications this allows the random state for rate generation to be
# independent of n_replications.
dataset_name = 'dataset_N' + str(N) + '_S' + str(S)
if S < N:
dataset_name += '_n' + str(n+1)
# Sample neuron subsets. The assumption is the PC axes of the RNN
# are not unit aligned, so sampling units is adequate to sample all
# the high-variance PCs.
P_sxn = np.eye(S,N)
for m in range(n):
P_sxn = np.roll(P_sxn, S, axis=1)
if input_magnitude > 0.0:
# time of "hits" randomly chosen between [1/4 and 3/4] of total time
if ninputs>1:
for n in range(ninputs):
if n == 0:
input_times = rng.choice(int(ntime_steps/2), size=[E]) + int(ntime_steps/4)
else:
input_times = np.vstack((input_times, (rng.choice(int(ntime_steps/2), size=[E]) + int(ntime_steps/4))))
else:
input_times = rng.choice(int(ntime_steps/2), size=[E]) + int(ntime_steps/4)
else:
input_times = None
print(ninputs)
if ninputs > 1:
rates, x0s, inputs = \
generate_data(rnn, T=T, E=E, x0s=x0s, P_sxn=P_sxn,
input_magnitude=input_magnitude_list,
input_times=input_times, ninputs=ninputs, rng=rng)
else:
rates, x0s, inputs = \
generate_data(rnn, T=T, E=E, x0s=x0s, P_sxn=P_sxn,
input_magnitude=input_magnitude,
input_times=input_times, ninputs=ninputs, rng=rng)
if FLAGS.noise_type == "poisson":
noisy_data = spikify_data(rates, rng, rnn['dt'], rnn['max_firing_rate'])
elif FLAGS.noise_type == "gaussian":
noisy_data = gaussify_data(rates, rng, rnn['dt'], rnn['max_firing_rate'])
else:
raise ValueError("Only noise types supported are poisson or gaussian")
# split into train and validation sets
train_inds, valid_inds = get_train_n_valid_inds(E, train_percentage,
nreplications)
# Split the data, inputs, labels and times into train vs. validation.
rates_train, rates_valid = \
split_list_by_inds(rates, train_inds, valid_inds)
#rates_train_no_input, rates_valid_no_input = \
# split_list_by_inds(rates_noinput, train_inds, valid_inds)
noisy_data_train, noisy_data_valid = \
split_list_by_inds(noisy_data, train_inds, valid_inds)
input_train, inputs_valid = \
split_list_by_inds(inputs, train_inds, valid_inds)
condition_labels_train, condition_labels_valid = \
split_list_by_inds(condition_labels, train_inds, valid_inds)
if ninputs>1:
input_times_train, input_times_valid = \
split_list_by_inds(input_times, train_inds, valid_inds, ninputs)
input_magnitude = input_magnitude_list
else:
input_times_train, input_times_valid = \
split_list_by_inds(input_times, train_inds, valid_inds)
#lorenz_train = np.expand_dims(lorenz_train, axis=1)
#lorenz_valid = np.expand_dims(lorenz_valid, axis=1)
#print((np.array(input_train)).shape)
#print((np.array(lorenz_train)).shape)
# Turn rates, noisy_data, and input into numpy arrays.
rates_train = nparray_and_transpose(rates_train)
rates_valid = nparray_and_transpose(rates_valid)
#rates_train_no_input = nparray_and_transpose(rates_train_no_input)
#rates_valid_no_input = nparray_and_transpose(rates_valid_no_input)
noisy_data_train = nparray_and_transpose(noisy_data_train)
noisy_data_valid = nparray_and_transpose(noisy_data_valid)
input_train = nparray_and_transpose(input_train)
inputs_valid = nparray_and_transpose(inputs_valid)
# Note that we put these 'truth' rates and input into this
# structure, the only data that is used in LFADS are the noisy
# data e.g. spike trains. The rest is either for printing or posterity.
data = {'train_truth': rates_train,
'valid_truth': rates_valid,
#'train_truth_no_input': rates_train_no_input,
#'valid_truth_no_input': rates_valid_no_input,
'input_train_truth' : input_train,
'input_valid_truth' : inputs_valid,
'train_data' : noisy_data_train,
'valid_data' : noisy_data_valid,
'train_percentage' : train_percentage,
'nreplications' : nreplications,
'dt' : rnn['dt'],
'input_magnitude' : input_magnitude,
'input_times_train' : input_times_train,
'input_times_valid' : input_times_valid,
'P_sxn' : P_sxn,
'condition_labels_train' : condition_labels_train,
'condition_labels_valid' : condition_labels_valid,
'conversion_factor': 1.0 / rnn['conversion_factor']}
datasets[dataset_name] = data
if S < N:
# Note that this isn't necessary for this synthetic example, but
# it's useful to see how the input factor matrices were initialized
# for actual neurophysiology data.
datasets = add_alignment_projections(datasets, npcs=FLAGS.npcs)
# Write out the datasets.
write_datasets(FLAGS.save_dir, FLAGS.datafile_name, datasets)
| [
"joelye9@gmail.com"
] | joelye9@gmail.com |
2a4782180f8375d960ca9e96805817fe1a9d44db | 35a253595e158085dbb40d33d44dde026269c8a7 | /198 House Robber.py | 562340100cf91741af888da5d9b11ebcbb944cf3 | [
"MIT"
] | permissive | ee08b397/LeetCode-4 | 7a8174275fbe7e0e667575aedd1ff1a8647776c3 | 3b26870e946b510797b6b284822a1011ce048fbe | refs/heads/master | 2020-12-24T15:13:22.899164 | 2015-09-22T02:41:13 | 2015-09-22T02:41:13 | 43,003,940 | 1 | 0 | null | 2015-09-23T13:52:25 | 2015-09-23T13:52:25 | null | UTF-8 | Python | false | false | 891 | py | """
You are a professional robber planning to rob houses along a street. Each house has a certain amount of money stashed,
the only constraint stopping you from robbing each of them is that adjacent houses have security system connected and it
will automatically contact the police if two adjacent houses were broken into on the same night.
Given a list of non-negative integers representing the amount of money of each house, determine the maximum amount of
money you can rob tonight without alerting the police.
"""
__author__ = 'Daniel'
class Solution:
def rob(self, nums):
"""
DP
O(n)
f_i = max(f_{i-1}, f_{i-2} + A[i])
"""
n = len(nums)
f = [0 for _ in xrange(n+2)]
for i in xrange(2, n+2):
f[i] = max(
f[i-1],
f[i-2] + nums[i-2]
)
return f[-1]
| [
"zhangdanyangg@gmail.com"
] | zhangdanyangg@gmail.com |
0b8470e562b21979ccc4ab4da93335262b2d9c86 | 74e15a8246fff5fd65a4169a0908c2639912992a | /pykeg/plugin/datastore_test.py | ddd244646f429ac166ff2b9aa5dd7b32765e5fa0 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Kegbot/kegbot-server | 5abf5fedf102aa9000e9dbe0ae90f9d4a70b93f3 | e0f953137bc261519444cb769455317074c7253e | refs/heads/main | 2023-02-16T08:57:50.931256 | 2022-08-18T17:21:13 | 2022-08-18T17:21:13 | 1,886,336 | 77 | 68 | MIT | 2023-02-15T19:01:41 | 2011-06-12T22:25:29 | JavaScript | UTF-8 | Python | false | false | 1,397 | py | from django.test import TransactionTestCase
from pykeg.core import models
from pykeg.plugin import datastore
class DatastoreTestCase(TransactionTestCase):
def test_model_datastore(self):
ds = datastore.ModelDatastore(plugin_name="test")
self.assertEqual(0, models.PluginData.objects.all().count())
ds.set("foo", "bar")
q = models.PluginData.objects.all()
self.assertEqual(1, q.count())
self.assertEqual("test", q[0].plugin_name)
self.assertEqual("foo", q[0].key)
self.assertEqual("bar", q[0].value)
# Setting to 'None' clears value
ds.set("foo", None)
self.assertEqual(0, models.PluginData.objects.all().count())
# Complex types survive.
ds.set("obj", {"asdf": 123, "foo": None})
self.assertEqual({"asdf": 123, "foo": None}, ds.get("obj"))
def test_in_memory_datastore(self):
ds = datastore.InMemoryDatastore(plugin_name="test")
self.assertEqual(0, len(ds.data))
ds.set("foo", "bar")
self.assertEqual(1, len(ds.data))
self.assertEqual("bar", ds.data["test:foo"])
# Setting to 'None' clears value
ds.set("foo", None)
self.assertEqual(0, len(ds.data))
# Complex types survive.
ds.set("obj", {"asdf": 123, "foo": None})
self.assertEqual({"asdf": 123, "foo": None}, ds.get("obj"))
| [
"opensource@hoho.com"
] | opensource@hoho.com |
bd87f7ffff21edcd8780035ad4b9bd302bfb6a72 | 0df898bf192b6ad388af160ecbf6609445c34f96 | /middleware/backend/app/magnet/research/schemas.py | 14784fcd60e81faf427db23b928fed336d7760bc | [] | no_license | sasano8/magnet | a5247e6eb0a7153d6bbca54296f61194925ab3dc | 65191c877f41c632d29133ebe4132a0bd459f752 | refs/heads/master | 2023-01-07T10:11:38.599085 | 2020-11-13T02:42:41 | 2020-11-13T02:42:41 | 298,334,432 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | from typing import List, Optional
from magnet import config, BaseModel
class CaseNode(BaseModel):
Config = config.ORM
id: int
name: str
class Target(BaseModel):
Config = config.ORM
id: int
name: str
node_id: int
| [
"y-sasahara@ys-method.com"
] | y-sasahara@ys-method.com |
1d030a1a87fa78e0fb8f511029f3f2a4218a0f6b | 551b75f52d28c0b5c8944d808a361470e2602654 | /examples/IoTDA/V5/TagManagement.py | cb3d92b2c88cd64d06bf9647b4b17b19d38a2d7f | [
"Apache-2.0"
] | permissive | wuchen-huawei/huaweicloud-sdk-python-v3 | 9d6597ce8ab666a9a297b3d936aeb85c55cf5877 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | refs/heads/master | 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 | NOASSERTION | 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null | UTF-8 | Python | false | false | 2,632 | py | # coding: utf-8
from huaweicloudsdkcore.http.http_config import HttpConfig
from huaweicloudsdkcore.auth.credentials import BasicCredentials
from huaweicloudsdkcore.exceptions import exceptions
from huaweicloudsdkiotda.v5 import *
def getResourcesByTags(client):
try:
resource_type = "device"
tags = [
{
"tag_key": "testTagName",
"tag_value": "testTagValue"
}
]
body = QueryResourceByTagsDTO(resource_type=resource_type, tags=tags)
request = ListResourcesByTagsRequest(body=body)
response = client.list_resources_by_tags(request)
print(response)
except exceptions.ClientRequestException as e:
print(e.status_code)
print(e.request_id)
print(e.error_code)
print(e.error_msg)
def bindTagsToResource(client):
try:
resource_id = "5e25d39a3b7c24fa3638804b_nb_0403_1"
resource_type = "device"
tags = [
{
"tag_key": "testTagName",
"tag_value": "testTagValue"
}
]
body = BindTagsDTO(resource_id=resource_id, resource_type=resource_type, tags=tags)
request = TagDeviceRequest(body=body)
response = client.tag_device(request)
print(response)
except exceptions.ClientRequestException as e:
print(e.status_code)
print(e.request_id)
print(e.error_code)
print(e.error_msg)
def unbindTagsToResource(client):
try:
resource_id = "5e25d39a3b7c24fa3638804b_nb_0403_1"
resource_type = "device"
tag_keys = ["testTagName"]
body = UnbindTagsDTO(resource_id=resource_id, resource_type=resource_type, tag_keys=tag_keys)
request = UntagDeviceRequest(body=body)
response = client.untag_device(request)
print(response)
except exceptions.ClientRequestException as e:
print(e.status_code)
print(e.request_id)
print(e.error_code)
print(e.error_msg)
if __name__ == '__main__':
ak = "{your ak string}"
sk = "{your sk string}"
endpoint = "{your endpoint}"
project_id = "{your project id}"
config = HttpConfig.get_default_config()
config.ignore_ssl_verification = True
credentials = BasicCredentials(ak, sk, project_id)
iotda_client = IoTDAClient().new_builder(IoTDAClient) \
.with_http_config(config) \
.with_credentials(credentials) \
.with_endpoint(endpoint) \
.build()
getResourcesByTags(iotda_client)
bindTagsToResource(iotda_client)
unbindTagsToResource(iotda_client)
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
38cef241ffcbaddf58cb3d75921e6a5ce7fd5e7b | 89dedd7f3c7acc81d12e2bcb2e716f9af9e5fa04 | /tools/cygprofile/check_orderfile.py | ea48127db476c6b8a4b449d1e0b3a84efcb28195 | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-unknown",
"MIT"
] | permissive | bino7/chromium | 8d26f84a1b6e38a73d1b97fea6057c634eff68cb | 4666a6bb6fdcb1114afecf77bdaa239d9787b752 | refs/heads/master | 2022-12-22T14:31:53.913081 | 2016-09-06T10:05:11 | 2016-09-06T10:05:11 | 67,410,510 | 1 | 3 | BSD-3-Clause | 2022-12-17T03:08:52 | 2016-09-05T10:11:59 | null | UTF-8 | Python | false | false | 4,024 | py | #!/usr/bin/python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Check that symbols are ordered into a binary as they appear in the orderfile.
"""
import logging
import optparse
import sys
import cyglog_to_orderfile
import cygprofile_utils
import patch_orderfile
import symbol_extractor
_MAX_WARNINGS_TO_PRINT = 200
def _IsSameMethod(name1, name2):
"""Returns true if name1 or name2 are split method forms of the other."""
return patch_orderfile.RemoveSuffixes(name1) == \
patch_orderfile.RemoveSuffixes(name2)
def _CountMisorderedSymbols(symbols, symbol_infos):
"""Count the number of misordered symbols, and log them.
Args:
symbols: ordered sequence of symbols from the orderfile
symbol_infos: ordered list of SymbolInfo from the binary
Returns:
(misordered_pairs_count, matched_symbols_count, unmatched_symbols_count)
"""
name_to_symbol_info = symbol_extractor.CreateNameToSymbolInfo(symbol_infos)
matched_symbol_infos = []
missing_count = 0
misordered_count = 0
# Find the SymbolInfo matching the orderfile symbols in the binary.
for symbol in symbols:
if symbol in name_to_symbol_info:
matched_symbol_infos.append(name_to_symbol_info[symbol])
else:
missing_count += 1
if missing_count < _MAX_WARNINGS_TO_PRINT:
logging.warning('Symbol "%s" is in the orderfile, not in the binary' %
symbol)
logging.info('%d matched symbols, %d un-matched (Only the first %d unmatched'
' symbols are shown)' % (
len(matched_symbol_infos), missing_count,
_MAX_WARNINGS_TO_PRINT))
# In the order of the orderfile, find all the symbols that are at an offset
# smaller than their immediate predecessor, and record the pair.
previous_symbol_info = symbol_extractor.SymbolInfo(
name='', offset=-1, size=0, section='')
for symbol_info in matched_symbol_infos:
if symbol_info.offset < previous_symbol_info.offset and not (
_IsSameMethod(symbol_info.name, previous_symbol_info.name)):
logging.warning('Misordered pair: %s - %s' % (
str(previous_symbol_info), str(symbol_info)))
misordered_count += 1
previous_symbol_info = symbol_info
return (misordered_count, len(matched_symbol_infos), missing_count)
def main():
parser = optparse.OptionParser(usage=
'usage: %prog [options] <binary> <orderfile>')
parser.add_option('--target-arch', action='store', dest='arch',
choices=['arm', 'arm64', 'x86', 'x86_64', 'x64', 'mips'],
help='The target architecture for the binary.')
parser.add_option('--threshold', action='store', dest='threshold', default=1,
help='The maximum allowed number of out-of-order symbols.')
options, argv = parser.parse_args(sys.argv)
if not options.arch:
options.arch = cygprofile_utils.DetectArchitecture()
if len(argv) != 3:
parser.print_help()
return 1
(binary_filename, orderfile_filename) = argv[1:]
symbol_extractor.SetArchitecture(options.arch)
obj_dir = cygprofile_utils.GetObjDir(binary_filename)
symbol_to_sections_map = \
cyglog_to_orderfile.GetSymbolToSectionsMapFromObjectFiles(obj_dir)
section_to_symbols_map = cygprofile_utils.InvertMapping(
symbol_to_sections_map)
symbols = patch_orderfile.GetSymbolsFromOrderfile(orderfile_filename,
section_to_symbols_map)
symbol_infos = symbol_extractor.SymbolInfosFromBinary(binary_filename)
# Missing symbols is not an error since some of them can be eliminated through
# inlining.
(misordered_pairs_count, matched_symbols, _) = _CountMisorderedSymbols(
symbols, symbol_infos)
return (misordered_pairs_count > options.threshold) or (matched_symbols == 0)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
sys.exit(main())
| [
"bino.zh@gmail.com"
] | bino.zh@gmail.com |
a2bed2194305ab6bc2efcb6e7da0d2fcc9b5db94 | f063232b59eb7535e4212ec2b6b477c472fdb56e | /intersection-of-two-linked-lists.py | b97bd5fdd82b5d68474a1634ab27021e37453d30 | [] | no_license | xzjh/OJ_LeetCode | a01d43f6925bb8888bb79ca8a03a75dd8a6eac07 | fa2cfe2ec7774ab4a356520668d5dbee9d63077c | refs/heads/master | 2021-01-20T11:13:36.291125 | 2015-10-01T09:04:47 | 2015-10-01T09:04:47 | 25,239,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param two ListNodes
# @return the intersected ListNode
def getIntersectionNode(self, headA, headB):
if not headA or not headB:
return None
lenA = 1
lenB = 1
nodeA = headA
nodeB = headB
while nodeA.next:
nodeA = nodeA.next
lenA += 1
while nodeB.next:
nodeB = nodeB.next
lenB += 1
nodeA = headA
nodeB = headB
if lenA > lenB:
diff = lenA - lenB
for _ in range(diff):
nodeA = nodeA.next
elif lenA < lenB:
diff = lenB - lenA
for _ in range(diff):
nodeB = nodeB.next
while nodeA and nodeB and nodeA != nodeB:
nodeA = nodeA.next
nodeB = nodeB.next
if nodeA and nodeB:
return nodeA
else:
return None
| [
"jsxzjh@gmail.com"
] | jsxzjh@gmail.com |
ea606e1ffd245c9b3b6dbda9d9727b9c71c0c48f | 7fd898850480206395eba9878ef5316d5bd4dbcf | /Trakttv.bundle/Contents/Code/plex/media_server.py | 6ab79fe9cc16257e514934593086d7ceee34966c | [] | no_license | Qwaint/Plex-Trakt-Scrobbler | cdfbef4566b8db3a05e72a46ae92c655a8f697e5 | 383ffa338ad64e481bd14c71950af42f2f9edd83 | refs/heads/master | 2020-12-11T04:10:13.332420 | 2014-02-01T13:31:58 | 2014-02-01T13:31:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,836 | py | from core.helpers import add_attribute
from core.network import request
# Regular Expressions for GUID parsing
MOVIE_REGEXP = Regex('com.plexapp.agents.*://(?P<imdb_id>tt[-a-z0-9\.]+)')
MOVIEDB_REGEXP = Regex('com.plexapp.agents.themoviedb://(?P<tmdb_id>[0-9]+)')
STANDALONE_REGEXP = Regex('com.plexapp.agents.standalone://(?P<tmdb_id>[0-9]+)')
TVSHOW_REGEXP = Regex(
'com.plexapp.agents.(thetvdb|thetvdbdvdorder|abstvdb|xbmcnfotv|mcm)://'
'(MCM_TV_A_)?' # For Media Center Master
'(?P<tvdb_id>[-a-z0-9\.]+)/'
'(?P<season>[-a-z0-9\.]+)/'
'(?P<episode>[-a-z0-9\.]+)'
)
TVSHOW1_REGEXP = Regex(
'com.plexapp.agents.(thetvdb|thetvdbdvdorder|abstvdb|xbmcnfotv|mcm)://'
'(MCM_TV_A_)?' # For Media Center Master
'(?P<tvdb_id>[-a-z0-9\.]+)'
)
MOVIE_PATTERNS = [
MOVIE_REGEXP,
MOVIEDB_REGEXP,
STANDALONE_REGEXP
]
PMS_URL = 'http://localhost:32400%s' # TODO remove this, replace with PMS.base_url
class PMS(object):
base_url = 'http://localhost:32400'
@classmethod
def request(cls, path='/', response_type='xml', raise_exceptions=False, retry=True, timeout=3, **kwargs):
if not path.startswith('/'):
path = '/' + path
response = request(
cls.base_url + path,
response_type,
raise_exceptions=raise_exceptions,
retry=retry,
timeout=timeout,
**kwargs
)
return response.data if response else None
@classmethod
def metadata(cls, item_id):
# Prepare a dict that contains all the metadata required for trakt.
result = cls.request('library/metadata/%s' % item_id)
if not result:
return None
for section in result.xpath('//Video'):
metadata = {}
# Add attributes if they exist
add_attribute(metadata, section, 'duration', float, lambda x: int(x / 60000))
add_attribute(metadata, section, 'year', int)
add_attribute(metadata, section, 'lastViewedAt', int, target_key='last_played')
add_attribute(metadata, section, 'viewCount', int, target_key='plays')
add_attribute(metadata, section, 'type')
if metadata['type'] == 'movie':
metadata['title'] = section.get('title')
elif metadata['type'] == 'episode':
metadata['title'] = section.get('grandparentTitle')
metadata['episode_title'] = section.get('title')
# Add guid match data
cls.add_guid(metadata, section)
return metadata
Log.Warn('Unable to find metadata for item %s' % item_id)
return None
@staticmethod
def add_guid(metadata, section):
guid = section.get('guid')
if not guid:
return
if section.get('type') == 'movie':
# Cycle through patterns and try get a result
for pattern in MOVIE_PATTERNS:
match = pattern.search(guid)
# If we have a match, update the metadata
if match:
metadata.update(match.groupdict())
return
Log('The movie %s doesn\'t have any imdb or tmdb id, it will be ignored.' % section.get('title'))
elif section.get('type') == 'episode':
match = TVSHOW_REGEXP.search(guid)
# If we have a match, update the metadata
if match:
metadata.update(match.groupdict())
else:
Log('The episode %s doesn\'t have any tmdb id, it will not be scrobbled.' % section.get('title'))
else:
Log('The content type %s is not supported, the item %s will not be scrobbled.' % (
section.get('type'), section.get('title')
))
@classmethod
def client(cls, client_id):
if not client_id:
Log.Warn('Invalid client_id provided')
return None
result = cls.request('clients')
if not result:
return None
found_clients = []
for section in result.xpath('//Server'):
found_clients.append(section.get('machineIdentifier'))
if section.get('machineIdentifier') == client_id:
return section
Log.Info("Unable to find client '%s', available clients: %s" % (client_id, found_clients))
return None
@classmethod
def set_logging_state(cls, state):
# TODO PUT METHOD
result = cls.request(':/prefs?logDebug=%s' % int(state), 'text', method='PUT')
if result is None:
return False
Log.Debug('Response: %s' % result)
return True
@classmethod
def get_logging_state(cls):
result = cls.request(':/prefs')
if result is None:
return False
for setting in result.xpath('//Setting'):
if setting.get('id') == 'logDebug' and setting.get('value'):
value = setting.get('value').lower()
return True if value == 'true' else False
Log.Warn('Unable to determine logging state, assuming disabled')
return False
@classmethod
def get_server_info(cls):
return cls.request()
@classmethod
def get_server_version(cls, default=None):
server_info = cls.get_server_info()
if server_info is None:
return default
return server_info.attrib.get('version') or default
@classmethod
def get_sessions(cls):
return cls.request('status/sessions')
@classmethod
def get_video_session(cls, session_key):
sessions = cls.get_sessions()
if sessions is None:
Log.Warn('Status request failed, unable to connect to server')
return None
for section in sessions.xpath('//MediaContainer/Video'):
if section.get('sessionKey') == session_key and '/library/metadata' in section.get('key'):
return section
Log.Warn('Session not found')
return None
@classmethod
def get_metadata(cls, key):
return cls.request('library/metadata/%s' % key)
@classmethod
def get_metadata_guid(cls, key):
metadata = cls.get_metadata(key)
if metadata is None:
return None
return metadata.xpath('//Directory')[0].get('guid')
@classmethod
def get_metadata_leaves(cls, key):
return cls.request('library/metadata/%s/allLeaves' % key)
@classmethod
def get_sections(cls):
return cls.request('library/sections')
@classmethod
def get_section(cls, name):
return cls.request('library/sections/%s/all' % name)
@classmethod
def get_section_directories(cls, section_name):
section = cls.get_section(section_name)
if section is None:
return []
return section.xpath('//Directory')
@classmethod
def get_section_videos(cls, section_name):
section = cls.get_section(section_name)
if section is None:
return []
return section.xpath('//Video')
@classmethod
def scrobble(cls, video):
if video.get('viewCount') > 0:
Log.Debug('video has already been marked as seen')
return False
result = cls.request(
':/scrobble?identifier=com.plexapp.plugins.library&key=%s' % (
video.get('ratingKey')
),
response_type='text'
)
return result is not None
@classmethod
def rate(cls, video, rating):
result = cls.request(
':/rate?key=%s&identifier=com.plexapp.plugins.library&rating=%s' % (
video.get('ratingKey'), rating
),
response_type='text'
)
return result is not None
| [
"gardiner91@gmail.com"
] | gardiner91@gmail.com |
208f8c6609bfaa29d9f350584a72d47b067aac36 | 011157c49983db38489f26f51db7fe22f8519afc | /problems/812.py | 15ae89e58fec6eb2f61e83be7ee0790ec867c328 | [] | no_license | chasecolford/Leetcode | c0054774d99e7294419039f580c1590495f950b3 | dded74e0c6e7a6c8c8df58bed3640864d0ae3b91 | refs/heads/master | 2023-08-04T11:33:18.003570 | 2021-09-10T21:06:55 | 2021-09-10T21:06:55 | 283,154,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | class Solution:
def largestTriangleArea(self, points: List[List[int]]) -> float:
return max(0.5 * abs(i[0] * j[1] + j[0] * k[1] + k[0] * i[1]- j[0] * i[1] - k[0] * j[1] - i[0] * k[1])
for i, j, k in itertools.combinations(points, 3)) | [
"56804717+ChaseSinify@users.noreply.github.com"
] | 56804717+ChaseSinify@users.noreply.github.com |
b05cf9848fef04d671c3f3771010b9614cef8003 | 741ee09b8b73187fab06ecc1f07f46a6ba77e85c | /AutonomousSourceCode/data/raw/squareroot/8dcd69f5-11fa-4c5f-8331-cd8f0db1aa54__Sarah01.py | b15d6db979b93090c7a22e978745c9ece3dc2371 | [] | no_license | erickmiller/AutomatousSourceCode | fbe8c8fbf215430a87a8e80d0479eb9c8807accb | 44ee2fb9ac970acf7389e5da35b930d076f2c530 | refs/heads/master | 2021-05-24T01:12:53.154621 | 2020-11-20T23:50:11 | 2020-11-20T23:50:11 | 60,889,742 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | def Adder(N1,N2):
MyResult = N1 + N2
return MyResult
def Subtractor(N1,N2):
MyResult = N1 - N2
return MyResult
def Main():
X = input("Enter a value for X ---")
Y = input("Enter a value for Y ---")
if (X >= Y):
print "Subtraction happened"
MyResult = Subtractor(X,Y)
else:
print "Addition happened"
MyResult = Adder(X,Y)
Result1 = math.sqrt(MyResult)
print "the square root of ", MyResult, " is ", Result1
return
def Frog():
print "Yay!"
return
| [
"erickmiller@gmail.com"
] | erickmiller@gmail.com |
8e584b42af7aa4d3ca68a587b7979edf6ce05e75 | 0ac6eeac34c65a200d66be256593f3e064ab1a1a | /TagReadWrite/Utils.py | bb6601e96c8acd37b34652192770527b134782da | [] | no_license | flokli/CrossMgr | cc6c2476def8868a9fce14a6f2a08dd5eea79612 | 21d542edacfd89f645a3ebb426fb16635c1f452e | refs/heads/master | 2020-09-26T08:34:21.857072 | 2019-11-30T20:28:53 | 2019-11-30T20:28:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,988 | py | import datetime
import wx
import os
import re
import sys
import math
import socket
import subprocess
timeoutSecs = None
DEFAULT_HOST = None
def GetDefaultHost():
global DEFAULT_HOST
DEFAULT_HOST = socket.gethostbyname(socket.gethostname())
if DEFAULT_HOST == '127.0.0.1':
reSplit = re.compile('[: \t]+')
try:
co = subprocess.Popen(['ifconfig'], stdout = subprocess.PIPE)
ifconfig = co.stdout.read()
for line in ifconfig.split('\n'):
line = line.strip()
try:
if line.startswith('inet addr:'):
fields = reSplit.split( line )
addr = fields[2]
if addr != '127.0.0.1':
DEFAULT_HOST = addr
break
except:
pass
except:
pass
return DEFAULT_HOST
GetDefaultHost()
'''
wx.ICON_EXCLAMATION Shows an exclamation mark icon.
wx.ICON_HAND Shows an error icon.
wx.ICON_ERROR Shows an error icon - the same as wxICON_HAND.
wx.ICON_QUESTION Shows a question mark icon.
wx.ICON_INFORMATION Shows an information (i) icon.
'''
def MessageOK( parent, message, title = '', iconMask = wx.ICON_INFORMATION, pos = wx.DefaultPosition ):
dlg = wx.MessageDialog(parent, message, title, wx.OK | iconMask, pos)
dlg.ShowModal()
dlg.Destroy()
return True
def MessageOKCancel( parent, message, title = '', iconMask = wx.ICON_QUESTION):
dlg = wx.MessageDialog(parent, message, title, wx.OK | wx.CANCEL | iconMask )
response = dlg.ShowModal()
dlg.Destroy()
return True if response == wx.ID_OK else False
def SetValue( st, value ):
if st.GetValue() != value:
st.SetValue( value )
return True
return False
def SetLabel( st, label ):
if st.GetLabel() != label:
st.SetLabel( label )
return True
return False
def formatTime( secs, highPrecision = False ):
if secs is None:
secs = 0
if secs < 0:
sign = '-'
secs = -secs
else:
sign = ''
f, ss = math.modf(secs)
secs = int(ss)
hours = int(secs // (60*60))
minutes = int( (secs // 60) % 60 )
secs = secs % 60
if highPrecision:
decimal = '.%02d' % int( f * 100 )
else:
decimal = ''
if hours > 0:
return "%s%d:%02d:%02d%s" % (sign, hours, minutes, secs, decimal)
else:
return "%s%02d:%02d%s" % (sign, minutes, secs, decimal)
def formatTimeGap( secs, highPrecision = False ):
if secs is None:
secs = 0
if secs < 0:
sign = '-'
secs = -secs
else:
sign = ''
f, ss = math.modf(secs)
secs = int(ss)
hours = int(secs // (60*60))
minutes = int( (secs // 60) % 60 )
secs = secs % 60
if highPrecision:
decimal = '.%02d' % int( f * 100 )
else:
decimal = ''
if hours > 0:
return "%s%dh%d'%02d%s\"" % (sign, hours, minutes, secs, decimal)
else:
return "%s%d'%02d%s\"" % (sign, minutes, secs, decimal)
def formatTimeCompressed( secs, highPrecision = False ):
f = formatTime( secs, highPrecision )
if f[0] == '0':
return f[1:]
return f
def formatDate( date ):
y, m, d = date.split('-')
d = datetime.date( int(y,10), int(m,10), int(d,10) )
return d.strftime( '%B %d, %Y' )
def StrToSeconds( str = '' ):
secs = 0.0
for f in str.split(':'):
try:
n = float(f)
except ValueError:
n = 0.0
secs = secs * 60.0 + n
return secs
def SecondsToStr( secs = 0 ):
secs = int(secs)
return '%02d:%02d:%02d' % (secs // (60*60), (secs // 60)%60, secs % 60)
def SecondsToMMSS( secs = 0 ):
secs = int(secs)
return '%02d:%02d' % ((secs // 60)%60, secs % 60)
def ordinal( value ):
"""
Converts zero or a *postive* integer (or their string
representations) to an ordinal value.
>>> for i in range(1,13):
... ordinal(i)
...
'1st'
'2nd'
'3rd'
'4th'
'5th'
'6th'
'7th'
'8th'
'9th'
'10th'
'11th'
'12th'
>>> for i in (100, '111', '112',1011):
... ordinal(i)
...
'100th'
'111th'
'112th'
'1011th'
"""
try:
value = int(value)
except ValueError:
return value
if (value % 100)//10 != 1:
return "%d%s" % (value, ['th','st','nd','rd','th','th','th','th','th','th'][value%10])
return "%d%s" % (value, "th")
def getHomeDir():
sp = wx.StandardPaths.Get()
homedir = sp.GetUserDataDir()
try:
if os.path.basename(homedir) == '.CrossMgr':
homedir = os.path.join( os.path.dirname(homedir), '.CrossMgrApp' )
except:
pass
if not os.path.exists(homedir):
os.makedirs( homedir )
return homedir
def getDocumentsDir():
sp = wx.StandardPaths.Get()
return sp.GetDocumentsDir()
#------------------------------------------------------------------------
try:
dirName = os.path.dirname(os.path.abspath(__file__))
except:
dirName = os.path.dirname(os.path.abspath(sys.argv[0]))
if os.path.basename(dirName) == 'library.zip':
dirName = os.path.dirname(dirName)
if 'CrossMgr?' in os.path.basename(dirName):
dirName = os.path.dirname(dirName)
if os.path.isdir( os.path.join(dirName, 'CrossMgrImages') ):
pass
elif os.path.isdir( '/usr/local/CrossMgrImages' ):
dirName = '/usr/local'
imageFolder = os.path.join(dirName, 'CrossMgrImages')
htmlFolder = os.path.join(dirName, 'CrossMgrHtml')
helpFolder = os.path.join(dirName, 'CrossMgrHtmlDoc')
def getDirName(): return dirName
def getImageFolder(): return imageFolder
def getHtmlFolder(): return htmlFolder
def getHelpFolder(): return helpFolder
#------------------------------------------------------------------------
def disable_stdout_buffering():
fileno = sys.stdout.fileno()
temp_fd = os.dup(fileno)
sys.stdout.close()
os.dup2(temp_fd, fileno)
os.close(temp_fd)
sys.stdout = os.fdopen(fileno, "w", 0)
def readDelimitedData( s, delim ):
buffer = s.recv( 4096 )
while 1:
nl = buffer.find( delim )
if nl >= 0:
yield buffer[:nl]
buffer = buffer[nl+len(delim):]
else:
more = s.recv( 4096 )
if more:
buffer = buffer + more
else:
break
yield buffer
#------------------------------------------------------------------------------------------------
reIP = re.compile( '^[0-9.]+$' )
def GetAllIps():
addrInfo = socket.getaddrinfo( socket.gethostname(), None )
ips = []
for a in addrInfo:
try:
ip = a[4][0]
except:
continue
if reIP.search(ip):
ips.append( ip )
return ips
| [
"edward.sitarski@gmail.com"
] | edward.sitarski@gmail.com |
c4dae4c9dd4656c58295539cf766d16b915310a7 | b122b723c2fbadef6f19e8c9ec4e485d48c03dec | /Python/Binary Tree Level Order Traversal II.py | 80a09e56530fae38885f1f95c1f7d46bc5644087 | [] | no_license | zhanglintc/leetcode | 5ba3977172679fde8cdcd3f4940057d55d8112eb | 8edbd2fbad8b10a497c7a10e8cd09cc91eeba079 | refs/heads/master | 2020-12-13T08:49:57.244106 | 2018-09-06T13:52:43 | 2018-09-06T13:52:43 | 18,562,111 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,393 | py | # Binary Tree Level Order Traversal II
# for leetcode problems
# 2014.09.04 by zhanglin
# Problem Link:
# https://leetcode.com/problems/binary-tree-level-order-traversal-ii/
# Problem:
# Given a binary tree, return the bottom-up level order traversal of its nodes' values. (ie, from left to right, level by level from leaf to root).
# For example:
# Given binary tree {3,9,20,#,#,15,7},
# 3
# / \
# 9 20
# / \
# 15 7
# return its bottom-up level order traversal as:
# [
# [15,7],
# [9,20],
# [3]
# ]
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @return a list of lists of integers
def levelOrderBottom(self, root):
dikt = {}
self.levelOrderBottom_helper(root, 1, dikt)
lst = []
for i in dikt:
lst.append(dikt[i])
return lst[::-1] # the only different from "Binary Tree Level Order Traversal"
def levelOrderBottom_helper(self, root, dept, dikt):
if root == None:
return root
if dept not in dikt:
dikt[dept] = []
dikt[dept].append(root.val)
self.levelOrderBottom_helper(root.left, dept + 1, dikt)
self.levelOrderBottom_helper(root.right, dept + 1, dikt)
| [
"zhanglintc623@gmail.com"
] | zhanglintc623@gmail.com |
db56147bf913f8b9dbc17b88ca38061e95d481cc | 628ec414b7807fc50de67345361e41cc68ba3720 | /mayan/apps/sources/serializers.py | 5da2f6af0ab247cf37c2dbf2e97eaeccfabfbd1c | [
"Apache-2.0"
] | permissive | TestingCodeReview/Mayan-EDMS | aafe144424ffa8128a4ff7cee24d91bf1e1f2750 | d493ec34b2f93244e32e1a2a4e6cda4501d3cf4e | refs/heads/master | 2020-05-27T23:34:44.118503 | 2019-04-05T02:04:18 | 2019-04-05T02:04:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,915 | py | from __future__ import unicode_literals
import logging
from rest_framework import serializers
from rest_framework.reverse import reverse
from .models import StagingFolderSource, WebFormSource
logger = logging.getLogger(__name__)
class StagingFolderFileSerializer(serializers.Serializer):
filename = serializers.CharField(max_length=255)
image_url = serializers.SerializerMethodField()
url = serializers.SerializerMethodField()
def get_image_url(self, obj):
return reverse(
'stagingfolderfile-image-view',
args=(obj.staging_folder.pk, obj.encoded_filename,),
request=self.context.get('request')
)
def get_url(self, obj):
return reverse(
'stagingfolderfile-detail',
args=(obj.staging_folder.pk, obj.encoded_filename,),
request=self.context.get('request')
)
class StagingFolderSerializer(serializers.HyperlinkedModelSerializer):
files = serializers.SerializerMethodField()
class Meta:
fields = ('files',)
model = StagingFolderSource
def get_files(self, obj):
try:
return [
StagingFolderFileSerializer(entry, context=self.context).data for entry in obj.get_files()
]
except Exception as exception:
logger.error('unhandled exception: %s', exception)
return []
class WebFormSourceSerializer(serializers.Serializer):
class Meta:
model = WebFormSource
class NewDocumentSerializer(serializers.Serializer):
source = serializers.IntegerField()
document_type = serializers.IntegerField(required=False)
description = serializers.CharField(required=False)
expand = serializers.BooleanField(default=False)
file = serializers.FileField()
filename = serializers.CharField(required=False)
use_file_name = serializers.BooleanField(default=False)
| [
"roberto.rosario.gonzalez@gmail.com"
] | roberto.rosario.gonzalez@gmail.com |
1828f84475f59d71a7e93bde5e4b60ce50d63686 | 6bec763c8553ad9e85bef147014b2ddcc934dde0 | /access_control/models/permission_create.py | 8a436eaf9ab1837d186a20136e7c458b25939706 | [
"BSD-3-Clause"
] | permissive | girleffect/core-management-layer | a0257d73c562ef89d38762aa6a4de892c4fc995c | 22eda532984616cf92b07bfdd9a1fffaee6c813c | refs/heads/develop | 2021-07-18T09:40:44.172628 | 2019-01-31T13:04:27 | 2019-01-31T13:04:27 | 112,724,847 | 0 | 1 | BSD-3-Clause | 2019-01-31T11:09:29 | 2017-12-01T10:04:04 | Python | UTF-8 | Python | false | false | 4,234 | py | # coding: utf-8
"""
Access Control API
# The Access Control API ## Overview The Access Control API is an API exposed to other core components. It uses an API Key in an HTTP header to perform authentication and authorisation. Most of the API calls facilitates CRUD of the entities defined in the Access Control component. Others calls allows the retrieval of information in a form that is convenient for other components (most notably the Management Layer) to consume. # noqa: E501
OpenAPI spec version:
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class PermissionCreate(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'description': 'str'
}
attribute_map = {
'name': 'name',
'description': 'description'
}
def __init__(self, name=None, description=None): # noqa: E501
"""PermissionCreate - a model defined in OpenAPI""" # noqa: E501
self._name = None
self._description = None
self.discriminator = None
self.name = name
if description is not None:
self.description = description
@property
def name(self):
"""Gets the name of this PermissionCreate. # noqa: E501
:return: The name of this PermissionCreate. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this PermissionCreate.
:param name: The name of this PermissionCreate. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
if name is not None and len(name) > 50:
raise ValueError("Invalid value for `name`, length must be less than or equal to `50`") # noqa: E501
self._name = name
@property
def description(self):
"""Gets the description of this PermissionCreate. # noqa: E501
:return: The description of this PermissionCreate. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this PermissionCreate.
:param description: The description of this PermissionCreate. # noqa: E501
:type: str
"""
self._description = description
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PermissionCreate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"cobus.carstens@gmail.com"
] | cobus.carstens@gmail.com |
b49f37735e7b3e05cbe5e2ddc69c4518b3e7cdba | 96a34a048c783a75736bf0ec775df22142f9ee53 | /packages/service-integration/tests/test_osparc_config.py | 77348df499a6c482c24c537408717bc0bf04e8c1 | [
"MIT"
] | permissive | ITISFoundation/osparc-simcore | 77e5b9f7eb549c907f6ba2abb14862154cc7bb66 | f4c57ffc7b494ac06a2692cb5539d3acfd3d1d63 | refs/heads/master | 2023-08-31T17:39:48.466163 | 2023-08-31T15:03:56 | 2023-08-31T15:03:56 | 118,596,920 | 39 | 29 | MIT | 2023-09-14T20:23:09 | 2018-01-23T10:48:05 | Python | UTF-8 | Python | false | false | 3,130 | py | # pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
# pylint: disable=unused-variable
import json
from pathlib import Path
from pprint import pformat
from typing import Any
import pytest
import yaml
from models_library.service_settings_labels import SimcoreServiceSettingLabelEntry
from service_integration.osparc_config import MetaConfig, RuntimeConfig, SettingsItem
@pytest.fixture
def labels(tests_data_dir: Path, labels_fixture_name: str) -> dict[str, str]:
data = yaml.safe_load((tests_data_dir / "docker-compose-meta.yml").read_text())
service_name = {
"legacy": "dy-static-file-server",
"service-sidecared": "dy-static-file-server-dynamic-sidecar",
"compose-sidecared": "dy-static-file-server-dynamic-sidecar-compose-spec",
"rocket": "rocket",
}
labels_annotations = data["services"][service_name[labels_fixture_name]]["build"][
"labels"
]
# patch -> replaces some environs
if compose_spec := labels_annotations.get("simcore.service.compose-spec"):
if compose_spec == "${DOCKER_COMPOSE_SPECIFICATION}":
labels_annotations["simcore.service.compose-spec"] = json.dumps(
yaml.safe_load((tests_data_dir / "compose-spec.yml").read_text())
)
return labels_annotations
@pytest.mark.parametrize(
"labels_fixture_name",
["legacy", "service-sidecared", "compose-sidecared", "rocket"],
)
def test_load_from_labels(
labels: dict[str, str], labels_fixture_name: str, tmp_path: Path
):
meta_cfg = MetaConfig.from_labels_annotations(labels)
runtime_cfg = RuntimeConfig.from_labels_annotations(labels)
print(meta_cfg.json(exclude_unset=True, indent=2))
print(runtime_cfg.json(exclude_unset=True, indent=2))
# create yamls from config
for model in (runtime_cfg, meta_cfg):
config_path = (
tmp_path / f"{model.__class__.__name__.lower()}-{labels_fixture_name}.yml"
)
with open(config_path, "wt") as fh:
data = json.loads(
model.json(exclude_unset=True, by_alias=True, exclude_none=True)
)
yaml.safe_dump(data, fh, sort_keys=False)
# reload from yaml and compare
new_model = model.__class__.from_yaml(config_path)
assert new_model == model
@pytest.mark.parametrize(
"example_data", SimcoreServiceSettingLabelEntry.Config.schema_extra["examples"]
)
def test_settings_item_in_sync_with_service_settings_label(
example_data: dict[str, Any]
):
print(pformat(example_data))
# First we parse with SimcoreServiceSettingLabelEntry since it also supports backwards compatibility
# and will upgrade old version
example_model = SimcoreServiceSettingLabelEntry.parse_obj(example_data)
# SettingsItem is exclusively for NEW labels, so it should not support backwards compatibility
new_model = SettingsItem(
name=example_model.name,
type=example_model.setting_type,
value=example_model.value,
)
# check back
SimcoreServiceSettingLabelEntry.parse_obj(new_model.dict(by_alias=True))
| [
"noreply@github.com"
] | ITISFoundation.noreply@github.com |
68be7d203cae104cbd639acae6fa2fd0e9babfc9 | d602881821bf49fe9ac246b9cc58e46440314725 | /src/utils.py | e1081d7a31a5597ee1b0573aba279a050dc1183c | [
"MIT"
] | permissive | nilax97/HTML2LaTeX-Convertor | 46775ab8b870d7ab609a92fa071efa1e54db22d2 | a7bdc8f53773f920dd6291575c77ecffee910fdf | refs/heads/master | 2022-06-12T04:04:00.584387 | 2020-05-05T08:03:38 | 2020-05-05T08:03:38 | 247,183,707 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | def node_from_tag(text):
clean = text.split("<")[1].split(">")[0]
if(clean[-1]=='/'):
clean = clean[:-2]
elif(clean[0]=='/'):
clean = clean[1:]
clean = clean.split()
tag = clean[0].upper()
attr = []
values = []
i = 0
while(True and len(clean)>1):
i = i + 1
if(i==len(clean)):
break
if "=" in clean[i]:
temp = clean[i].split("=")
attr.append(temp[0].strip())
if(temp[1]!=""):
values.append(temp[1].replace("\"","").replace("\'","").strip())
else:
values.append(clean[i+1].replace("\"","").replace("\'","").strip())
i = i+1
else:
attr.append(clean[i])
temp = clean[i+1].split("=")
if(temp[1]!=""):
values.append(temp[1].replace("\"","").replace("\'","").strip())
i = i+1
else:
values.append(clean[i+2].replace("\"","").replace("\'","").strip())
i = i+2
return tag,attr,values | [
"agarwal.nilaksh@gmail.com"
] | agarwal.nilaksh@gmail.com |
3dac9cd531fdff6070c6f84ff2603f3c5ed04258 | 01bb8cdc7b8a0baa6e345e5bdc1a663b2a44a3f7 | /Chapter_7_User_Input_And_While_Loop/Practice2/9.counting.py | 3262a0a4021f7ba91a7f0a92bad8ee7c7ce740fa | [] | no_license | rishabhchopra1096/Python_Crash_Course_Code | 1e8076b38f89565fad9d9b68de879a972b0c96c8 | c1e1b0c90371d5913c201f46a0c8ceaec19b5af0 | refs/heads/master | 2021-04-25T21:25:37.351986 | 2017-11-03T08:11:13 | 2017-11-03T08:11:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | c_n = 0
while c_n < 10:
c_n += 1
if c_n % 2 == 0:
continue
else:
print(c_n)
# Starting from 0 , we add 1 and then
# We check weather the number is even or not.
# 1 is noteven , so it is printed.
# Loop starts again , 1 is added to 1 = 2
# If the number is even , we go back to the first line of the loop.
# The loop is entered as 2 < 10.
# We add 1 to 2 , which becomes 3.
| [
"noreply@github.com"
] | rishabhchopra1096.noreply@github.com |
76831e371c436f3e90d22d6a2e80b3045e8d2c8f | 16fe74651e6692ea3d8d0302b40ac42f3d58e0ca | /minimum_height_trees.py | 56664999a98a2eefaa414b7e83a4f5222312baa2 | [
"MIT"
] | permissive | Ahmed--Mohsen/leetcode | 7574f71b10dfb9582f62e856bbc2559d3b21b2a1 | ad8967a5d85ac54f53b3fcce04df1b4bdec5fd9e | refs/heads/master | 2021-01-18T14:34:06.987665 | 2015-12-23T21:17:27 | 2015-12-23T21:17:27 | 33,744,104 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,530 | py | # -*- coding: utf-8 -*-
"""
For a undirected graph with tree characteristics, we can choose any node as the root. The result graph is then a rooted tree. Among all possible rooted trees, those with minimum height are called minimum height trees (MHTs). Given such a graph, write a function to find all the MHTs and return a list of their root labels.
Format
The graph contains n nodes which are labeled from 0 to n - 1. You will be given the number n and a list of undirected edges (each edge is a pair of labels).
You can assume that no duplicate edges will appear in edges. Since all edges are undirected, [0, 1] is the same as [1, 0] and thus will not appear together in edges.
Example 1:
Given n = 4, edges = [[1, 0], [1, 2], [1, 3]]
0
|
1
/ \
2 3
return [1]
Example 2:
Given n = 6, edges = [[0, 3], [1, 3], [2, 3], [4, 3], [5, 4]]
0 1 2
\ | /
3
|
4
|
5
return [3, 4]
Hint:
How many MHTs can a graph have at most?
Note:
(1) According to the definition of tree on Wikipedia: “a tree is an undirected graph in which any two vertices are connected by exactly one path. In other words, any connected graph without simple cycles is a tree.”
(2) The height of a rooted tree is the number of edges on the longest downward path between the root and a leaf.
"""
class Solution(object):
"""
:type n: int
:type edges: List[List[int]]
:rtype: List[int]
"""
def findMinHeightTrees(self, n, edges):
# the idea is to move from the leave nodes and move
# in-ward till we end up with either one or two roots
# same idea as topological sort
# base case
if n == 1: return [0]
# keep track of the the undirected edges
adj = [set() for i in range(n)]
for i, j in edges:
adj[i].add(j)
adj[j].add(i)
# leaves are those nodes that have in-degree of length 1
leaves = [i for i in range(n) if len(adj[i]) == 1]
# do BFS topological sorting
while n > 2:
n -= len(leaves)
# next level to the current leaves
next_leaves = []
# visit all neighbors to each leave
for i in leaves:
# no need to visit all i neighbors, we are only insterested
# in the shortest path so any neighbor is valid
j = adj[i].pop()
adj[j].remove(i)
# new leave found
if len(adj[j]) == 1:
next_leaves.append(j)
# set next level to be visited
leaves = next_leaves
return leaves
s = Solution()
print s.findMinHeightTrees(4, [[1,0],[1,2],[1,3]]) | [
"ahmed7890@gmail.com"
] | ahmed7890@gmail.com |
f4905f97c634eff4c0a17d3953c22ba496f165dd | af177f43b9e879b849cae739073bb63d2fae96f5 | /Core/migrations/0013_alter_book_isbn_number.py | a6698229dc641a5774d3eddfd67cbac6e5601dcb | [] | no_license | conradylx/STX_Library | 9d5ac5399f7d9402c00908f13712d228cfd9b412 | 7edffe5d7c433bbe4e163d664706bba5f15918b8 | refs/heads/master | 2023-07-12T01:13:01.189052 | 2021-08-17T12:36:53 | 2021-08-17T12:36:53 | 390,417,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | # Generated by Django 3.2.5 on 2021-08-01 15:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Core', '0012_auto_20210731_1434'),
]
operations = [
migrations.AlterField(
model_name='book',
name='isbn_number',
field=models.CharField(max_length=40, verbose_name='ISBN'),
),
]
| [
"50596942+conradylx@users.noreply.github.com"
] | 50596942+conradylx@users.noreply.github.com |
972772172fb486be96c1e5a2785a3676c73ab5c0 | 9b20743ec6cd28d749a4323dcbadb1a0cffb281b | /11_Time_Series_Forecasting_with_Python/A_02/save_model.py | eec597448c0a8c70db1c341abf5a7e609ebe8f37 | [] | no_license | jggrimesdc-zz/MachineLearningExercises | 6e1c7e1f95399e69bba95cdfe17c4f8d8c90d178 | ee265f1c6029c91daff172b3e7c1a96177646bc5 | refs/heads/master | 2023-03-07T19:30:26.691659 | 2021-02-19T08:00:49 | 2021-02-19T08:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | # save and load an ARIMA model that causes an error
from pandas import read_csv
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.arima_model import ARIMAResults
# load data
series = read_csv('daily-total-female-births.csv', header=0, index_col=0, parse_dates=True, squeeze=True)
# prepare data
X = series.values
X = X.astype('float32')
# fit model
model = ARIMA(X, order=(1, 1, 1))
model_fit = model.fit()
# save model
model_fit.save('model.pkl')
# load model
loaded = ARIMAResults.load('model.pkl')
| [
"jgrimes@jgrimes.tech"
] | jgrimes@jgrimes.tech |
8a874264aad962b142b5d59bcc9e2b52791eec44 | 50f4d2bb1b1222bcb2eb0122c48a0dd254deddfc | /Algorithm Concept/Quicksort.py | 4e4c886e297de61f0a86b4de7d19a28a4d1c6f92 | [] | no_license | yejinee/Algorithm | 9ae1c40382e9dcd868a28d42fe1cc543b790c7f5 | 81d409c4d0ea76cf152a5f334e53a870bc0656a7 | refs/heads/master | 2023-04-13T01:29:44.579635 | 2023-04-05T06:23:26 | 2023-04-05T06:23:26 | 235,014,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,324 | py | """
Q) NO.2751 수 정렬하기2 - 퀵정렬로 해결
[Heap Sort]
-Time complexity
(1) Worst case= O(n^2)
(2) Average case = O(nlogn)
(1) pivot을 정하고 고정시키는 방식으로 정렬
(2) pivot을 기준으로 left, right부분으로 나눠서 이 과정을 반복
- Partition process
: list에서 제일 첫번째 원소를 pivot으로 정하고 pivot을 고정시키는 과정
<순서>
1. i은 첫번째 원소, j는 마지막 원소 부터 시작
2.
(1) i의 경우 : 하나씩 index을 늘려가면서 pivot보다 큰 값 나오면 stop
(2) j의 경우 : 하나씩 index를 줄여가면서 pivot보다 작은 값나오면 stop
3.
IF i<j : i가 가리키는 값과 j가 가리키는 값을 서로 바꿈
ELSE: pivot값과 j가 가리키는 값을 서로 바꾸고 pivot을 고정시킨다.
"""
def partition(Arr,l,h):
pivot=Arr[l]
i=l
j=h
count=0
while i<j:
while Arr[i]<=pivot and i<h:
i+=1
while Arr[j]>=pivot and j>l:
j-=1
if i<j:
Arr[i],Arr[j]=Arr[j],Arr[i]
Arr[l], Arr[j]=Arr[j],Arr[l]
return Arr,j
def quicksort(Arr,l, h):
if l<h:
Arr,fix=partition(Arr,l,h)
quicksort(Arr,l,fix-1)
quicksort(Arr,fix+1,h)
A=[15,22,13,27,12,10,20,25]
quicksort(A,0,len(A)-1)
print(A)
| [
"kimyj9609@gmail.com"
] | kimyj9609@gmail.com |
f2bffb6fc6832a4dc5369a88546849b84f879c32 | d89a482aaf3001bbc4515f39af9ba474e1ae6062 | /stir/stir_model.py | 3ee1dc44261f1b9c08c97aff51f8673474e43f6c | [] | no_license | hongtao510/u_tool | 2925e3694aba81714cf83018c3f8520a7b503228 | 98c962cfb1f53c4971fb2b9ae22c882c0fae6497 | refs/heads/master | 2021-01-10T20:40:24.793531 | 2014-03-14T22:57:37 | 2014-03-14T22:57:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,609 | py | import numpy as np
import logging
import sys
import math
from django.utils import simplejson
def toJSON(stir_object):
stir_vars = vars(stir_object)
stir_json = simplejson.dumps(stir_vars)
return stir_json
def fromJSON(json_string):
stir_vars = simplejson.loads(json_string)
new_stir = stir(True,False,vars_dict=stir_vars)
return new_stir
class StirModel(object):
def __init__(self,set_variables=True, run_methods=True,
chemical_name=None,application_rate=None,column_height=None,spray_drift_fraction=None,direct_spray_duration=None,
molecular_weight=None,vapor_pressure=None,avian_oral_ld50=None, body_weight_assessed_bird=None, body_weight_tested_bird=None,
mineau_scaling_factor=None,mammal_inhalation_lc50=None,duration_mammal_inhalation_study=None,body_weight_assessed_mammal=None,
body_weight_tested_mammal=None,mammal_oral_ld50=None,
vars_dict=None):
self.set_default_variables()
if set_variables:
if vars_dict != None:
self.__dict__.update(vars_dict)
else:
self.set_variables(chemical_name,application_rate,column_height,spray_drift_fraction,direct_spray_duration,
molecular_weight,vapor_pressure,avian_oral_ld50, body_weight_assessed_bird, body_weight_tested_bird, mineau_scaling_factor,
mammal_inhalation_lc50,duration_mammal_inhalation_study,body_weight_assessed_mammal, body_weight_tested_mammal,
mammal_oral_ld50)
if run_methods:
self.run_methods()
def set_default_variables(self):
#inputs
self.chemical_name = ''
self.application_rate = 1
self.column_height = 1
self.spray_drift_fraction = 1
self.direct_spray_duration = 1
self.molecular_weight = 1
self.vapor_pressure = 1
self.avian_oral_ld50 = 1
self.body_weight_assessed_bird = 1
self.body_weight_tested_bird = 1
self.mineau_scaling_factor = 1
self.mammal_inhalation_lc50 = 1
self.duration_mammal_inhalation_study = 1
self.body_weight_assessed_mammal = 1
self.body_weight_tested_mammal = 1
self.mammal_oral_ld50 = 1
#outputs
self.sat_air_conc = -1
self.inh_rate_avian = -1
self.vid_avian = -1
self.inh_rate_mammal = -1
self.vid_mammal = -1
self.ar2 = ''
self.air_conc = -1
self.sid_avian = -1
self.sid_mammal = -1
self.cf = ''
self.mammal_inhalation_ld50 = -1
self.adjusted_mammal_inhalation_ld50 = -1
self.estimated_avian_inhalation_ld50 = -1
self.adjusted_avian_inhalation_ld50 = -1
self.ratio_vid_avian = -1
self.ratio_sid_avian = -1
self.ratio_vid_mammal = -1
self.ratio_sid_mammal = -1
self.loc_vid_avian = ''
self.loc_sid_avian = ''
self.loc_vid_mammal = ''
self.loc_sid_mammal = ''
def __str__(self):
#inputs
string_rep = ''
string_rep = string_rep + self.chemical_name + "\n"
string_rep = string_rep + "application_rate = %.2e \n" % self.application_rate
string_rep = string_rep + "column_height = %.2e \n" % self.column_height
string_rep = string_rep + "spray_drift_fraction = %.2e \n" % self.spray_drift_fraction
string_rep = string_rep + "direct_spray_duration = %.2e \n" % self.direct_spray_duration
string_rep = string_rep + "molecular_weight = %.2e \n" % self.molecular_weight
string_rep = string_rep + "vapor_pressure = %.2e \n" % self.vapor_pressure
string_rep = string_rep + "avian_oral_ld50 = %.2e \n" % self.avian_oral_ld50
string_rep = string_rep + "body_weight_assessed_bird = %.2e \n" % self.body_weight_assessed_bird
string_rep = string_rep + "body_weight_tested_bird = %.2e \n" % self.body_weight_tested_bird
string_rep = string_rep + "mineau_scaling_factor = %.2e \n" % self.mineau_scaling_factor
string_rep = string_rep + "mammal_inhalation_lc50 = %.2e \n" % self.mammal_inhalation_lc50
string_rep = string_rep + "duration_mammal_inhalation_study = %.2e \n" % self.duration_mammal_inhalation_study
string_rep = string_rep + "body_weight_assessed_mammal = %.2e \n" % self.body_weight_assessed_mammal
string_rep = string_rep + "body_weight_tested_mammal = %.2e \n" % self.body_weight_tested_mammal
string_rep = string_rep + "mammal_oral_ld50 = %.2e \n" % self.mammal_oral_ld50
#outputs
string_rep = string_rep + "sat_air_conc = %.2e \n" % self.sat_air_conc
string_rep = string_rep + "inh_rate_avian = %.2e \n" % self.inh_rate_avian
string_rep = string_rep + "vid_avian = %.2e \n" % self.vid_avian
string_rep = string_rep + "inh_rate_mammal = %.2e \n" % self.inh_rate_mammal
string_rep = string_rep + "vid_mammal = %.2e \n" % self.vid_mammal
string_rep = string_rep + "ar2 = %.2e \n" % self.ar2
string_rep = string_rep + "air_conc = %.2e \n" % self.air_conc
string_rep = string_rep + "sid_avian = %.2e \n" % self.sid_avian
string_rep = string_rep + "sid_mammal = %.2e \n" % self.sid_mammal
string_rep = string_rep + "cf = %.2e \n" % self.cf
string_rep = string_rep + "mammal_inhalation_ld50 = %.2e \n" % self.self.mammal_inhalation_ld50
string_rep = string_rep + "adjusted_mammal_inhalation_ld50 = %.2e \n" % self.adjusted_mammal_inhalation_ld50
string_rep = string_rep + "estimated_avian_inhalation_ld50 = %.2e \n" % self.estimated_avian_inhalation_ld50
string_rep = string_rep + "adjusted_avian_inhalation_ld50 = %.2e \n" % self.adjusted_avian_inhalation_ld50
string_rep = string_rep + "ratio_vid_avian = %.2e \n" % self.ratio_vid_avian
string_rep = string_rep + "ratio_sid_avian = %.2e \n" % self.ratio_sid_avian
string_rep = string_rep + "ratio_vid_mammal = %.2e \n" % self.ratio_vid_mammal
string_rep = string_rep + "ratio_sid_mammal = %.2e \n" % self.ratio_sid_mammal
string_rep = string_rep + "loc_vid_avian =" + self.loc_vid_avian + "\n"
string_rep = string_rep + "loc_sid_avian =" + self.loc_sid_avian + "\n"
string_rep = string_rep + "loc_vid_mammal =" + self.loc_vid_mammal + "\n"
string_rep = string_rep + "loc_sid_mammal =" + self.loc_sid_mammal + "\n"
return string_rep
def set_variables(self,chemical_name,application_rate,column_height,spray_drift_fraction,direct_spray_duration,
molecular_weight,vapor_pressure,avian_oral_ld50,body_weight_assessed_bird,body_weight_tested_bird,mineau_scaling_factor,
mammal_inhalation_lc50,duration_mammal_inhalation_study,body_weight_assessed_mammal,body_weight_tested_mammal,
mammal_oral_ld50):
self.chemical_name = chemical_name
self.application_rate = application_rate
self.column_height = column_height
self.spray_drift_fraction = spray_drift_fraction
self.direct_spray_duration = direct_spray_duration
self.molecular_weight = molecular_weight
self.vapor_pressure = vapor_pressure
self.avian_oral_ld50 = avian_oral_ld50
self.body_weight_assessed_bird = body_weight_assessed_bird
self.body_weight_tested_bird = body_weight_tested_bird
self.mineau_scaling_factor = mineau_scaling_factor
self.mammal_inhalation_lc50 = mammal_inhalation_lc50
self.duration_mammal_inhalation_study = duration_mammal_inhalation_study
self.body_weight_assessed_mammal = body_weight_assessed_mammal
self.body_weight_tested_mammal = body_weight_tested_mammal
self.mammal_oral_ld50 = mammal_oral_ld50
def set_unit_testing_variables(self):
self.chemical_name_expected = None
self.sat_air_conc_expected = None
self.inh_rate_avian_expected = None
self.vid_avian_expected = None
self.inh_rate_mammal_expected = None
self.vid_mammal_expected = None
self.ar2_expected = None
self.air_conc_expected = None
self.sid_avian_expected = None
self.sid_mammal_expected = None
self.cf_expected = None
self.mammal_inhalation_ld50_expected = None
self.adjusted_mammal_inhalation_ld50_expected = None
self.estimated_avian_inhalation_ld50_expected = None
self.adjusted_avian_inhalation_ld50_expected = None
self.ratio_vid_avian_expected = None
self.ratio_sid_avian_expected = None
self.ratio_vid_mammal_expected = None
self.ratio_sid_mammal_expected = None
self.loc_vid_avian_expected = None
self.loc_sid_avian_expected = None
self.loc_vid_mammal_expected = None
self.loc_sid_mammal_expected = None
def run_methods(self):
try:
self.CalcSatAirConc() #eq. 1
self.CalcInhRateAvian() #eq. 2
self.CalcVidAvian() #eq. 3
self.CalcInhRateMammal() #eq. 4
self.CalcVidMammal() #eq. 5
self.CalcConcAir() #eq. 6
self.CalcSidAvian() #eq. 7
self.CalcSidMammal() #eq. 8
self.CalcConvertMammalInhalationLC50toLD50() #eq. 9
self.CalcAdjustedMammalInhalationLD50() #eq. 10
self.CalcEstimatedAvianInhalationLD50() #eq. 11
self.CalcAdjustedAvianInhalationLD50() #eq. 12
self.ReturnRatioVidAvian() #results #1
self.ReturnLocVidAvian() #results #2
self.ReturnRatioSidAvian() #results #3
self.ReturnLocSidAvian() #results #4
self.ReturnRatioVidMammal() #results #5
self.ReturnLocVidMammal() #results #6
self.ReturnRatioSidMammal() #results #7
self.ReturnLocSidMammal() #results #8
except TypeError:
print "Type Error: Your variables are not set correctly."
#eq. 1 saturated air concentration in mg/m^3
def CalcSatAirConc(self):
if self.sat_air_conc == -1:
self.vapor_pressure = float(self.vapor_pressure)
self.molecular_weight = float(self.molecular_weight)
air_vol = 24.45
pressure = 760.0
conv = 1000000.0
self.sat_air_conc = (self.vapor_pressure * self.molecular_weight * conv)/(pressure * air_vol)
return self.sat_air_conc
#eq. 2 Avian inhalation rate
def CalcInhRateAvian(self):
if self.inh_rate_avian == -1:
self.body_weight_assessed_bird = float(self.body_weight_assessed_bird)
magic1 = 284.
magic2 = 0.77
conversion = 60.
activity_factor = 3.
self.inh_rate_avian = magic1 * (self.body_weight_assessed_bird**magic2) * conversion * activity_factor
return self.inh_rate_avian
#eq. 3 Maximum avian vapor inhalation dose
def CalcVidAvian(self):
if self.vid_avian == -1:
self.sat_air_conc = float(self.sat_air_conc)
self.inh_rate_avian = float(self.inh_rate_avian)
self.body_weight_assessed_bird = float(self.body_weight_assessed_bird)
duration_hours = 1.
conversion_factor = 1000000. # cm3/m3
self.vid_avian = (self.sat_air_conc * self.inh_rate_avian * duration_hours)/(conversion_factor * self.body_weight_assessed_bird) # 1 (hr) is duration of exposure
return self.vid_avian
#eq. 4 Mammalian inhalation rate
def CalcInhRateMammal(self):
if self.inh_rate_mammal == -1:
self.body_weight_assessed_mammal = float(self.body_weight_assessed_mammal)
magic1 = 379.0
magic2 = 0.8
minutes_conversion = 60.
activity_factor = 3
self.inh_rate_mammal = magic1 * (self.body_weight_assessed_mammal**magic2) * minutes_conversion * activity_factor
return self.inh_rate_mammal
#eq. 5 Maximum mammalian vapor inhalation dose
def CalcVidMammal(self):
if self.vid_mammal == -1:
self.sat_air_conc = float(self.sat_air_conc) # eq. 1
self.inh_rate_mammal = float(self.inh_rate_mammal) # eq. 4
self.body_weight_assessed_mammal = float(self.body_weight_assessed_mammal)
duration_hours = 1.
conversion_factor = 1000000.
self.vid_mammal = (self.sat_air_conc * self.inh_rate_mammal * duration_hours)/(conversion_factor * self.body_weight_assessed_mammal) # 1 hr = duration of exposure
return self.vid_mammal
#eq. 6 Air column concentration after spray
def CalcConcAir(self):
if self.air_conc == -1:
self.application_rate = float(self.application_rate)
self.column_height = float(self.column_height)
conversion_factor = 100. #cm/m
# conversion of application rate from lbs/acre to mg/cm2
cf_g_lbs = 453.59237
cf_mg_g = 1000.
cf_cm2_acre = 40468564.2
self.ar2 = (self.application_rate*cf_g_lbs*cf_mg_g)/cf_cm2_acre
self.air_conc = self.ar2/(self.column_height * conversion_factor)
return self.air_conc
#eq. 7 Avian spray droplet inhalation dose
def CalcSidAvian(self):
if self.sid_avian == -1:
self.air_conc = float(self.air_conc)
self.inh_rate_avian = float(self.inh_rate_avian)
self.direct_spray_duration = float(self.direct_spray_duration)
self.spray_drift_fraction = float(self.spray_drift_fraction)
self.body_weight_assessed_bird = float(self.body_weight_assessed_bird)
self.sid_avian = (self.air_conc * self.inh_rate_avian * self.direct_spray_duration * self.spray_drift_fraction)/(60.0 * self.body_weight_assessed_bird)
return self.sid_avian
#eq. 8 Mammalian spray droplet inhalation dose
def CalcSidMammal(self):
if self.sid_mammal == -1:
self.air_conc = float(self.air_conc)
self.inh_rate_mammal = float(self.inh_rate_mammal)
self.direct_spray_duration = float(self.direct_spray_duration)
self.spray_drift_fraction = float(self.spray_drift_fraction)
self.body_weight_assessed_mammal = float(self.body_weight_assessed_mammal)
self.sid_mammal = (self.air_conc * self.inh_rate_mammal * self.direct_spray_duration * self.spray_drift_fraction)/(60.0 * self.body_weight_assessed_mammal)
return self.sid_mammal
#eq. 9 Conversion of mammalian LC50 to LD50
def CalcConvertMammalInhalationLC50toLD50(self):
if self.mammal_inhalation_ld50 == -1:
self.mammal_inhalation_lc50 = float(self.mammal_inhalation_lc50)
#conversion factor
self.inh_rate_mammal = float(self.inh_rate_mammal)
self.body_weight_tested_mammal = float(self.body_weight_tested_mammal)
self.cf = ((self.inh_rate_mammal * 0.001)/self.body_weight_tested_mammal)
self.duration_mammal_inhalation_study = float(self.duration_mammal_inhalation_study)
activity_factor = 1
absorption = 1
self.mammal_inhalation_ld50 = self.mammal_inhalation_lc50 * absorption * self.cf * self.duration_mammal_inhalation_study * activity_factor
return self.mammal_inhalation_ld50
#eq. 10 Adjusted mammalian inhalation LD50
def CalcAdjustedMammalInhalationLD50(self):
if self.adjusted_mammal_inhalation_ld50 == -1:
self.mammal_inhalation_ld50 = float(self.mammal_inhalation_ld50)
self.body_weight_assessed_mammal = float(self.body_weight_assessed_mammal)
self.body_weight_tested_mammal = float(self.body_weight_tested_mammal)
magicpower = 0.25
self.adjusted_mammal_inhalation_ld50 = self.mammal_inhalation_ld50 * (self.body_weight_tested_mammal/self.body_weight_assessed_mammal)**magicpower
return self.adjusted_mammal_inhalation_ld50
#eq. 11 Estimated avian inhalation LD50
def CalcEstimatedAvianInhalationLD50(self):
if self.estimated_avian_inhalation_ld50 == -1:
self.avian_oral_ld50 = float(self.avian_oral_ld50)
self.mammal_inhalation_ld50 = float(self.mammal_inhalation_ld50)
self.mammal_oral_ld50 = float(self.mammal_oral_ld50)
three_five = 3.5
self.estimated_avian_inhalation_ld50 = (self.avian_oral_ld50 * self.mammal_inhalation_ld50)/(three_five * self.mammal_oral_ld50)
return self.estimated_avian_inhalation_ld50
#eq. 12 Adjusted avian inhalation LD50
def CalcAdjustedAvianInhalationLD50(self):
if self.adjusted_avian_inhalation_ld50 == -1:
self.estimated_avian_inhalation_ld50 = float(self.estimated_avian_inhalation_ld50)
self.body_weight_assessed_bird = float(self.body_weight_assessed_bird)
self.body_weight_tested_bird = float(self.body_weight_tested_bird)
self.mineau_scaling_factor = float(self.mineau_scaling_factor)
self.adjusted_avian_inhalation_ld50 = self.estimated_avian_inhalation_ld50 * (self.body_weight_assessed_bird/self.body_weight_tested_bird)**(self.mineau_scaling_factor - 1)
return self.adjusted_avian_inhalation_ld50
# ----------------------------------------------
# results
# ----------------------------------------------
# results #1: Ratio of avian vapor dose to adjusted inhalation LD50
def ReturnRatioVidAvian(self):
if self.ratio_vid_avian == -1:
self.vid_avian = float(self.vid_avian)
self.adjusted_avian_inhalation_ld50 = float(self.adjusted_avian_inhalation_ld50)
self.ratio_vid_avian = self.vid_avian/self.adjusted_avian_inhalation_ld50
return self.ratio_vid_avian
# results #2: Level of Concern for avian vapor phase risk
def ReturnLocVidAvian(self):
if self.ratio_vid_avian < 0.1:
self.loc_vid_avian = 'Exposure not Likely Significant'
else:
self.loc_vid_avian = 'Proceed to Refinements'
return self.loc_vid_avian
# results #3: Ratio of avian droplet inhalation dose to adjusted inhalation LD50
def ReturnRatioSidAvian(self):
if self.ratio_sid_avian == -1:
self.sid_avian = float(self.sid_avian)
self.adjusted_avian_inhalation_ld50 = float(self.adjusted_avian_inhalation_ld50)
self.ratio_sid_avian = self.sid_avian/self.adjusted_avian_inhalation_ld50
return self.ratio_sid_avian
# results #4: Level of Concern for avian droplet inhalation risk
def ReturnLocSidAvian(self):
if self.ratio_sid_avian < 0.1:
self.loc_sid_avian = 'Exposure not Likely Significant'
else:
self.loc_sid_avian = 'Proceed to Refinements'
return self.loc_sid_avian
# results #5: Ratio of mammalian vapor dose to adjusted inhalation LD50
def ReturnRatioVidMammal(self):
if self.ratio_vid_mammal == -1:
self.vid_mammal = float(self.vid_mammal)
self.adjusted_mammal_inhalation_ld50 = float(self.adjusted_mammal_inhalation_ld50)
self.ratio_vid_mammal = self.vid_mammal/self.adjusted_mammal_inhalation_ld50
return self.ratio_vid_mammal
# results #6: Level of Concern for mammalian vapor phase risk
def ReturnLocVidMammal(self):
if self.ratio_vid_mammal < 0.1:
self.loc_vid_mammal = 'Exposure not Likely Significant'
else:
self.loc_vid_mammal = 'Proceed to Refinements'
return self.loc_vid_mammal
# results #7: Ratio of mammalian droplet inhalation dose to adjusted inhalation LD50
def ReturnRatioSidMammal(self):
if self.ratio_sid_mammal == -1:
self.sid_mammal = float(self.sid_mammal)
self.adjusted_mammal_inhalation_ld50 = float(self.adjusted_mammal_inhalation_ld50)
self.ratio_sid_mammal = self.sid_mammal/self.adjusted_mammal_inhalation_ld50
return self.ratio_sid_mammal
# results #8: Level of Concern for mammaliam droplet inhalation risk
def ReturnLocSidMammal(self):
if self.ratio_sid_mammal < 0.1:
self.loc_sid_mammal = 'Exposure not Likely Significant'
else:
self.loc_sid_mammal = 'Proceed to Refinements'
return self.loc_sid_mammal
def main():
test_stir = stir(True,True,1,1,1,1,1,1,1,1)
print vars(test_stir)
stir_json = toJSON(test_stir)
new_stir = fromJSON(stir_json)
print vars(new_stir)
if __name__ == '__main__':
main() | [
"hongtao510@gmail.com"
] | hongtao510@gmail.com |
241bfa7d4854c270c2e7607c981567bfa15c8063 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_lactated.py | e6bcd5dc984c751dcdeb351346c6cd15ead3cc72 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
#calss header
class _LACTATED():
def __init__(self,):
self.name = "LACTATED"
self.definitions = lactate
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['lactate']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
889aa45605c9e36c5387d79620b32ed507e15049 | 1ab243320bc1f1ee9dde3b0a1e3f1a418e6d5299 | /apps/save/views.py | 00b3d01a4bc5ea6a4679803c23838e86289c276f | [] | no_license | sjweil9/Djangos_Dungeon | a6bdd9fd4b8012ebdfb14caf2de41635a53ab089 | 2d2f1ceceddd6200db3de591c8f93926d973a704 | refs/heads/master | 2021-09-05T07:05:17.403789 | 2018-01-25T02:18:07 | 2018-01-25T02:18:07 | 108,317,359 | 0 | 1 | null | 2018-01-25T02:17:13 | 2017-10-25T19:39:19 | JavaScript | UTF-8 | Python | false | false | 4,624 | py | from django.shortcuts import render, redirect, HttpResponse
from .models import *
from django.contrib import messages
import json, ast
def game(req):
return render(req, 'save/game.html')
def signin(req):
if 'id' in req.session:
return redirect('/dashboard')
return render(req, 'save/signin.html')
def save(req):
if req.method == "POST":
if 'id' not in req.session:
return redirect('/')
if 'char' not in req.session:
return redirect('/dashboard')
Character.objects.save_char(req.session['char'], req.POST)
return HttpResponse()
def load(req):
if 'id' not in req.session:
return redirect('/')
if 'char' not in req.session:
return redirect('/dashboard')
loaded = Character.objects.get(id=req.session['char'])
if not loaded.character:
data = {'name': req.session['charname']}
data = json.dumps(data)
return HttpResponse(data, content_type='application/json')
character = loaded.character
return HttpResponse(character, content_type='application/json')
def register(req):
if req.method == "POST":
errors = User.objects.validate_user(req.POST)
if len(errors):
for tag, error in errors.iteritems():
messages.error(req, error, extra_tags=tag)
else:
user = User.objects.create_user(req.POST)
if user:
req.session['id'] = user.id
return redirect('/intro')
else:
messages.error(req, "That username is already taken!", extra_tags="usernamelen")
req.session['status'] = "register"
return redirect('/')
def login(req):
if req.method == "POST":
res = User.objects.login(req.POST)
if res['status']:
req.session['id'] = res['user'].id
return redirect('/dashboard')
else:
messages.error(req, res['error'], extra_tags="login")
req.session['status'] = "login"
return redirect('/')
def logout(req):
req.session.clear()
return redirect('/')
def intro(req):
if 'id' not in req.session:
return redirect('/')
return render(req, 'save/intro.html')
def dashboard(req):
if 'id' not in req.session:
return redirect('/')
context = {
'user': User.objects.get(id=req.session['id']),
'characters': [],
'totalchars': []
}
characters = list(Character.objects.filter(user=context['user']))
for character in characters:
if character.character:
loaded = json.loads(character.character)
loaded['id'] = character.id
context['characters'].append(loaded)
totalchars = list(Character.objects.all())
for character in totalchars:
if character.character:
loaded = json.loads(character.character)
loaded['id'] = character.id
context['totalchars'].append(loaded)
context['totalchars'] = sorted(context['totalchars'], key=lambda k: k['xp'], reverse=True)
return render(req, 'save/dashboard.html', context)
def newchar(req):
if 'id' not in req.session:
return redirect('/')
if req.method == "POST":
new = Character.objects.new_char(req.session['id'])
if new:
req.session['char'] = new.id
req.session['charname'] = req.POST['charname']
return redirect('/game')
else:
messages.error(req, "You may only have 3 characters. Please delete one to make another.", extra_tags="char")
return redirect('/dashboard')
def start(req, charid):
if 'id' not in req.session:
return redirect('/')
if Character.objects.filter(id=charid).exists():
character = Character.objects.get(id=charid)
if character.user.id == req.session['id']:
req.session['char'] = character.id
return redirect('/game')
return redirect('/dashboard')
def character(req, charid):
if 'id' not in req.session:
return redirect('/')
if Character.objects.filter(id=charid).exists():
loaded = Character.objects.get(id=charid)
chardata = json.loads(loaded.character)
return render(req, 'save/char.html', {'character': chardata})
return redirect('/dashboard')
def delete(req, charid):
if 'id' not in req.session:
return redirect('/')
if Character.objects.filter(id=charid).exists():
loaded = Character.objects.get(id=charid)
if loaded.user.id == req.session['id']:
Character.objects.delete_char(charid)
return redirect('/dashboard') | [
"stephen.weil@gmail.com"
] | stephen.weil@gmail.com |
77782568d1cafff34f9406d2f741714b19433619 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02663/s559574798.py | 36a0de215ceb319a077ac34b3f6f7bad3915d65f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | H1,M1,H2,M2,K = map(int,input().split())
h1 = 60*H1+M1
h2 = 60*H2+M2
d = h2-h1
print(d-K) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
13be5404a48b9269fddcac2f7bcee5c857ff102f | fd0d8b010d45f959f0660afb192c7349e266a329 | /competitive/AtCoder/ABC216/B.py | 079182f20988da04944c1de663e36a16a784ef31 | [
"MIT"
] | permissive | pn11/benkyokai | b650f5957545fdefbea7773aaae3f61f210f69ce | 9ebdc46b529e76b7196add26dbc1e62ad48e72b0 | refs/heads/master | 2023-01-28T01:38:29.566561 | 2021-10-03T04:20:14 | 2021-10-03T04:20:14 | 127,143,471 | 0 | 0 | MIT | 2023-01-07T07:19:05 | 2018-03-28T13:20:51 | Jupyter Notebook | UTF-8 | Python | false | false | 1,168 | py | import bisect
from collections import deque
from copy import deepcopy
from fractions import Fraction
from functools import reduce
import heapq as hq
import io
from itertools import combinations, permutations
import math
from math import factorial
import re
import sys
sys.setrecursionlimit(10000)
#from numba import njit
import numpy as np
_INPUT_1 = """\
3
tanaka taro
sato hanako
tanaka taro
"""
_INPUT_2 = """\
3
saito ichiro
saito jiro
saito saburo
"""
_INPUT_3 = """\
4
sypdgidop bkseq
sypdgidopb kseq
ozjekw mcybmtt
qfeysvw dbo
"""
def solve():
N = int(input())
D = {}
for _ in range(N):
s, t = [x for x in input().split()]
S = D.get(s, set())
S.add(t)
D[s] = S
num = 0
for k, v in D.items():
num += len(v)
if num != N:
print('Yes')
else:
print('No')
if __file__ != './Main.py':
if '_INPUT_1' in globals():
sys.stdin = io.StringIO(_INPUT_1)
solve()
if '_INPUT_2' in globals():
sys.stdin = io.StringIO(_INPUT_2)
solve()
if '_INPUT_3' in globals():
sys.stdin = io.StringIO(_INPUT_3)
solve()
else:
solve()
| [
"pn11@users.noreply.github.com"
] | pn11@users.noreply.github.com |
84dab7469a92a1687a7871963fbe15489cf73d99 | cefab48dff8fc40786f0a45f3df272646365e9f5 | /python/magnetics/probe_g2.py | d0e6c52d313b1d86545c376d95c08c12931dea54 | [] | no_license | shaunhaskey/pyMARS | d40265bd2d445f0429ae7177f2e75d83f0ba8b30 | e2424088492a8ab2f34acf62db42a77e44d5bc3b | refs/heads/master | 2020-12-25T17:24:28.392539 | 2016-08-01T22:14:27 | 2016-08-01T22:14:27 | 17,684,575 | 0 | 0 | null | 2014-03-13T03:41:59 | 2014-03-12T21:21:08 | Python | UTF-8 | Python | false | false | 4,291 | py | '''
Appears to be older than probe_g.py - this one doesn't contain my Biot-Savart calcuations
Just does probe_g and MARS comparison.
SH:14Sept2012
'''
import numpy as num
import os, time
import PythonMARS_funcs as pyMARS
import results_class as res
import RZfuncs
N = 6; n = 2; I = num.array([1.,-1.,0.,1,-1.,0.])
#print 'phi_location %.2f deg'%(phi_location)
template_dir = '/u/haskeysr/mars/templates/PROBE_G_TEMPLATE/'
base_run_dir = '/u/haskeysr/PROBE_G_RUNS/'
project_name = 'phi_scan/'
run_dir = base_run_dir + project_name
print run_dir
os.system('mkdir '+run_dir)
os.system('cp -r ' + template_dir +'* ' + run_dir)
print 'go to new directory'
os.chdir(run_dir + 'PROBE_G')
probe_g_template = file('probe_g.in', 'r')
probe_g_template_txt = probe_g_template.read()
probe_g_template.close()
diiid = file('diiid.in', 'r')
diiid_txt = diiid.read()
diiid.close()
#a, b = coil_responses6(1,1,1,1,1,1,Navg=120,default=1)
probe = [ '67A', '66M', '67B', 'ESL', 'ISL','UISL','LISL','Inner_pol','Inner_rad']
# probe type 1: poloidal field, 2: radial field
probe_type = num.array([ 1, 1, 1, 0, 0, 0, 0, 1,0])
# Poloidal geometry
Rprobe = num.array([ 2.265, 2.413, 2.265, 2.477, 2.431, 2.300, 2.300,1.,1.])
Zprobe = num.array([ 0.755, 0.0,-0.755, 0.000, 0.000, 0.714,-0.714,0.,0.])
tprobe = num.array([ -67.5, -90.0,-112.5, 0.000, 0.000, 22.6, -22.6,-90.,0.])*2*num.pi/360 #DTOR # poloidal inclination
lprobe = num.array([ 0.155, 0.140, 0.155, 1.194, 0.800, 0.680, 0.680, 0.05,0.05]) # Length of probe
probe_name = 'UISL'
k = probe.index(probe_name)
#Generate interpolation points
Rprobek, Zprobek = pyMARS.pickup_interp_points(Rprobe[k], Zprobe[k], lprobe[k], tprobe[k], probe_type[k], 800)
#Generate the points string and modify the .in file
r_flattened = Rprobek.flatten()
z_flattened = Zprobek.flatten()
phi_flattened = num.linspace(0,360,num=800)
r_flattened = phi_flattened * 0 + num.average(Rprobek.flatten())
z_flattened = phi_flattened * 0 + num.average(Zprobek.flatten())
print 'r',r_flattened
print 'z', z_flattened
print 'phi', phi_flattened
points_string = ''
print len(r_flattened)
for i in range(0,len(r_flattened)):
points_string+='%.3f %.3f %.3f\n'%(r_flattened[i], phi_flattened[i], z_flattened[i])
changes = {'<<npts>>' : str(len(r_flattened)),
'<<points>>' : points_string}
for tmp_key in changes.keys():
probe_g_template_txt = probe_g_template_txt.replace(tmp_key, changes[tmp_key])
probe_g_template = file('probe_g.in', 'w')
probe_g_template.write(probe_g_template_txt)
probe_g_template.close()
diiid_changes = {'<<upper>>': '1000 -1000 0 1000 -1000 0',
'<<lower>>': '1000 -1000 0 1000 -1000 0'}
for tmp_key in diiid_changes:
diiid_txt = diiid_txt.replace(tmp_key, diiid_changes[tmp_key])
diiid = file('diiid.in', 'w')
diiid.write(diiid_txt)
diiid.close()
#run probe_g
os.system('./probe_g')
#Read the output file
results = num.loadtxt('probe_gb.out', skiprows=8)
B_R = results[:,4]
B_phi =results[:,3]
B_Z= results[:,5]
phi_out = results[:,0]
R_out = results[:,1]
Z_out = results[:,2]
'''
print 'get the answer from MARS'
I0EXP = RZfuncs.I0EXP_calc(N,n,I)
base_dir = '/u/haskeysr/mars/grid_check10/qmult1.000/exp1.000/marsrun/'
Nchi=513
#plas_run = res.data(base_dir + 'RUNrfa.p', I0EXP = I0EXP, Nchi=Nchi)
vac_run = res.data(base_dir + 'RUNrfa.vac', I0EXP = I0EXP, Nchi=Nchi)
grid_r = vac_run.R*vac_run.R0EXP
grid_z = vac_run.Z*vac_run.R0EXP
'''
import matplotlib.pyplot as pt
fig = pt.figure()
ax = fig.add_subplot(111)
ax.plot(phi_flattened, B_R*10000., 'r-')
ax.plot(phi_flattened, B_Z*10000., 'k-')
fig.canvas.draw()
fig.show()
'''
Brprobek, Bzprobek = pyMARS.pickup_field_interpolation(grid_r, grid_z, vac_run.Br, vac_run.Bz, vac_run.Bphi, num.array(Rprobek), num.array(Zprobek))
import matplotlib.pyplot as pt
fig = pt.figure()
ax = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
ax.plot(Rprobek, num.abs(Brprobek), 'b-')
ax2.plot(Rprobek, num.abs(B_R*10000), 'b--')
ax.plot(Zprobek, num.abs(Bzprobek), 'k-')
ax2.plot(Zprobek, num.abs(B_Z*10000), 'k--')
fig.canvas.draw()
fig.show()
fig = pt.figure()
ax = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
ax.plot(Rprobek, R_out, 'b-')
ax2.plot(Zprobek, Z_out, 'b--')
fig.canvas.draw()
fig.show()
'''
| [
"shaunhaskey@gmail.com"
] | shaunhaskey@gmail.com |
d9f4f1cea64200274bfa01806a671624371f6713 | 2b167e29ba07e9f577c20c54cb943861d0ccfa69 | /simulationsOFC/pareto2/arch5_pod100_old/copyfile.py | bf351a7f1acedff879227fffabadbccab5ef7f28 | [] | no_license | LiYan1988/kthOld_OFC | 17aeeed21e195d1a9a3262ec2e67d6b1d3f9ff0f | b1237577ea68ad735a65981bf29584ebd889132b | refs/heads/master | 2021-01-11T17:27:25.574431 | 2017-01-23T05:32:35 | 2017-01-23T05:32:35 | 79,773,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,672 | py | # -*- coding: utf-8 -*-
"""
Created on Sat May 28 23:27:05 2016
@author: li
"""
import os
#dirname = os.path.dirname(os.path.realpath(__file__))
dirname=''
for i in range(20):
src = os.path.join(dirname, 'template_runsimu_connections.py')
dst = 'pareto'+str(i)+'.py'
dst = os.path.join(dirname, dst)
newline = "i = "+str(i)+" \n"
destination = open( dst, "w" )
source = open( src, "r" )
for l, line in enumerate(source):
if l!=22:
destination.write(line)
else:
destination.write(newline)
source.close()
destination.close()
# bash files
for i in range(20):
src = os.path.join(dirname, 'template_runsimu_connections.sh')
dst = 'pareto'+str(i)+'.sh'
dst = os.path.join(dirname, dst)
newline3 = "#SBATCH -J arch5_old_"+str(i)+"\n"
newline6 = "#SBATCH -o arch5_old_"+str(i)+".stdout\n"
newline7 = "#SBATCH -e arch5_old_"+str(i)+".stderr\n"
newline17 = "pdcp pareto"+str(i)+".py $TMPDIR\n"
newline21 = "python pareto"+str(i)+".py\n"
destination = open( dst, "w" )
source = open( src, "r" )
for l, line in enumerate(source):
if l==3:
destination.write(newline3)
elif l==6:
destination.write(newline6)
elif l==7:
destination.write(newline7)
elif l==17:
destination.write(newline17)
elif l==21:
destination.write(newline21)
else:
destination.write(line)
source.close()
destination.close()
f = open('run_sbatch.txt', 'w')
for i in range(20):
line = 'sbatch pareto'+str(i)+'.sh\n'
f.write(line)
f.close() | [
"li.yan.ly414@gmail.com"
] | li.yan.ly414@gmail.com |
334d40336a44583c5a3fb7a02ac6cf793065ded1 | ece0d321e48f182832252b23db1df0c21b78f20c | /engine/2.80/scripts/addons_contrib/oscurart_mesh_cache_tools.py | d03242a458076a27dc5fe53a594e6492e4cc4780 | [
"GPL-3.0-only",
"Font-exception-2.0",
"GPL-3.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain-disclaimer",
"Bitstream-Vera",
"LicenseRef-scancode-blender-2010",
"LGPL-2.1-or-later",
"GPL-2.0-or-lat... | permissive | byteinc/Phasor | 47d4e48a52fa562dfa1a2dbe493f8ec9e94625b9 | f7d23a489c2b4bcc3c1961ac955926484ff8b8d9 | refs/heads/master | 2022-10-25T17:05:01.585032 | 2019-03-16T19:24:22 | 2019-03-16T19:24:22 | 175,723,233 | 3 | 1 | Unlicense | 2022-10-21T07:02:37 | 2019-03-15T00:58:08 | Python | UTF-8 | Python | false | false | 17,124 | py | bl_info = {
"name": "Mesh Cache Tools",
"author": "Oscurart",
"version": (1, 0, 1),
"blender": (2, 70, 0),
"location": "Tools > Mesh Cache Tools",
"description": "Tools for Management Mesh Cache Process",
"warning": "",
"wiki_url": "",
"tracker_url": "https://developer.blender.org/maniphest/task/edit/form/2/",
"category": "Import-Export"}
import bpy
import os
import struct
from bpy_extras.io_utils import ImportHelper
from bpy.types import (
Operator,
Panel,
PropertyGroup,
AddonPreferences,
)
from bpy.props import (
BoolProperty,
IntProperty,
StringProperty,
PointerProperty,
CollectionProperty,
)
from bpy.app.handlers import persistent
class OscurartMeshCacheModifiersSettings(PropertyGroup):
array: BoolProperty(default=True)
bevel: BoolProperty(default=True)
boolean: BoolProperty(default=True)
build: BoolProperty(default=True)
decimate: BoolProperty(default=True)
edge_split: BoolProperty(default=True)
mask: BoolProperty(default=True)
mirror: BoolProperty(default=True)
multires: BoolProperty(default=True)
remesh: BoolProperty(default=True)
screw: BoolProperty(default=True)
skin: BoolProperty(default=True)
solidify: BoolProperty(default=True)
subsurf: BoolProperty(default=True)
triangulate: BoolProperty(default=True)
wireframe: BoolProperty(default=True)
cloth: BoolProperty(default=True)
# ----------------- AUTO LOAD PROXY
# bpy.context.scene.pc_auto_load_proxy.remove(0)
class CreaPropiedades(Operator):
bl_idname = "scene.pc_auto_load_proxy_create"
bl_label = "Create Auto Load PC Proxy List"
def execute(self, context):
for gr in bpy.data.collections:
if gr.library is not None:
i = bpy.context.scene.pc_auto_load_proxy.add()
i.name = gr.name
i.use_auto_load = False
return {'FINISHED'}
class RemuevePropiedades(Operator):
bl_idname = "scene.pc_auto_load_proxy_remove"
bl_label = "Remove Auto Load PC Proxy List"
def execute(self, context):
for i in bpy.context.scene.pc_auto_load_proxy:
bpy.context.scene.pc_auto_load_proxy.remove(0)
return {'FINISHED'}
class OscurartMeshCacheSceneAutoLoad(PropertyGroup):
name: StringProperty(
name="GroupName",
default=""
)
use_auto_load: BoolProperty(
name="Bool",
default=False
)
@persistent
def CargaAutoLoadPC(dummy):
for gr in bpy.context.scene.pc_auto_load_proxy:
if gr.use_auto_load:
for ob in bpy.data.collections[gr.name].objects:
for MOD in ob.modifiers:
if MOD.type == "MESH_CACHE":
MOD.cache_format = "PC2"
MOD.forward_axis = "POS_Y"
MOD.up_axis = "POS_Z"
MOD.flip_axis = set(())
MOD.frame_start = bpy.context.scene.pc_pc2_start
abspath = os.path.abspath(bpy.path.abspath("//" + bpy.context.scene.pc_pc2_folder))
MOD.filepath = "%s/%s.pc2" % (abspath, ob.name)
bpy.app.handlers.load_post.append(CargaAutoLoadPC)
# - PANELS -
class View3DMCPanel():
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
class OscEPc2ExporterPanel(View3DMCPanel, Panel):
bl_category = "Tools"
bl_label = "Mesh Cache Tools"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
scene = context.scene
row = layout.column(align=1)
row.prop(scene, "pc_pc2_folder", text="Folder")
row.operator("buttons.set_meshcache_folder", icon='FILEBROWSER', text="Select Folder Path")
row = layout.box().column(align=1)
row.label(text="EXPORTER:")
row.operator("group.linked_group_to_local", text="Linked To Local", icon="LINKED")
row.operator("object.remove_subsurf_modifier", text="Remove Gen Modifiers", icon="MOD_SUBSURF")
row.prop(scene.mesh_cache_tools_settings, "array", text="Array")
row.prop(scene.mesh_cache_tools_settings, "bevel", text="Bevel")
row.prop(scene.mesh_cache_tools_settings, "boolean", text="Boolean")
row.prop(scene.mesh_cache_tools_settings, "build", text="Build")
row.prop(scene.mesh_cache_tools_settings, "decimate", text="Decimate")
row.prop(scene.mesh_cache_tools_settings, "edge_split", text="Edge Split")
row.prop(scene.mesh_cache_tools_settings, "mask", text="Mask")
row.prop(scene.mesh_cache_tools_settings, "mirror", text="Mirror")
row.prop(scene.mesh_cache_tools_settings, "multires", text="Multires")
row.prop(scene.mesh_cache_tools_settings, "remesh", text="Remesh")
row.prop(scene.mesh_cache_tools_settings, "screw", text="Screw")
row.prop(scene.mesh_cache_tools_settings, "skin", text="Skin")
row.prop(scene.mesh_cache_tools_settings, "solidify", text="Solidify")
row.prop(scene.mesh_cache_tools_settings, "subsurf", text="Subsurf")
row.prop(scene.mesh_cache_tools_settings, "triangulate", text="Triangulate")
row.prop(scene.mesh_cache_tools_settings, "wireframe", text="Wireframe")
# row = layout.column(align=1)
row.prop(scene, "pc_pc2_start", text="Frame Start")
row.prop(scene, "pc_pc2_end", text="Frame End")
row.prop(scene, "pc_pc2_exclude", text="Exclude Token:")
row.prop_search(scene, "pc_pc2_group", bpy.data, "collections", text="")
row.operator("export_shape.pc2_selection", text="Export!", icon="POSE_DATA")
row.prop(scene, "pc_pc2_world_space", text="World Space")
row = layout.box().column(align=1)
row.label(text="IMPORTER:")
row.operator("import_shape.pc2_selection", text="Import", icon="POSE_DATA")
row.operator("object.modifier_mesh_cache_up", text="MC Top", icon="TRIA_UP")
row = layout.box().column(align=1)
row.label(text="PROXY AUTO LOAD:")
row.operator("scene.pc_auto_load_proxy_create", text="Create List", icon="GROUP")
row.operator("scene.pc_auto_load_proxy_remove", text="Remove List", icon="X")
for i in scene.pc_auto_load_proxy:
if bpy.data.collections[i.name].library is not None:
row = layout.row()
row.prop(bpy.data.collections[i.name], "name", text="")
row.prop(i, "use_auto_load", text="")
def OscSetFolder(self, context, filepath):
fp = filepath if os.path.isdir(filepath) else os.path.dirname(filepath)
try:
os.chdir(os.path.dirname(bpy.data.filepath))
except Exception as e:
self.report({'WARNING'}, "Folder could not be set: {}".format(e))
return {'CANCELLED'}
rfp = os.path.relpath(fp)
for sc in bpy.data.scenes:
sc.pc_pc2_folder = rfp
return {'FINISHED'}
class OscMeshCacheButtonSet(Operator, ImportHelper):
bl_idname = "buttons.set_meshcache_folder"
bl_label = "Set Mesh Cache Folder"
filename_ext = ".txt"
def execute(self, context):
return OscSetFolder(self, context, self.filepath)
def OscFuncExportPc2(self):
start = bpy.context.scene.pc_pc2_start
end = bpy.context.scene.pc_pc2_end
folderpath = bpy.context.scene.pc_pc2_folder
framerange = end - start
for ob in bpy.data.collections[bpy.context.scene.pc_pc2_group].objects[:]:
if any(token not in ob.name for token in bpy.context.scene.pc_pc2_exclude.split(",")):
bpy.context.window_manager.progress_begin(0, 100) # progressbar
if ob.type == "MESH":
with open("%s/%s.pc2" % (os.path.normpath(folderpath), ob.name), mode="wb") as file:
# header
headerFormat = '<12siiffi'
headerStr = struct.pack(headerFormat,
b'POINTCACHE2\0', 1, len(ob.data.vertices[:]), 0, 1.0, (end + 1) - start)
file.write(headerStr)
# bake
obmat = ob.matrix_world
for i, frame in enumerate(range(start, end + 1)):
print("Percentage of %s bake: %s " % (ob.name, i * 100 / framerange))
bpy.context.window_manager.progress_update(i * 100 / framerange) # progressbarUpdate
bpy.context.scene.frame_set(frame)
me = bpy.data.meshes.new_from_object(
scene=bpy.context.scene,
object=ob,
apply_modifiers=True,
settings="RENDER",
calc_tessface=True,
calc_undeformed=False
)
# rotate
if bpy.context.scene.pc_pc2_world_space:
me.transform(obmat)
me.calc_normals()
# create archive
for vert in me.vertices[:]:
file.write(struct.pack("<3f", *vert.co))
# drain mesh
bpy.data.meshes.remove(me)
print("%s Bake finished!" % (ob.name))
bpy.context.window_manager.progress_end() # progressBarClose
print("Bake Totally Finished!")
class OscPc2ExporterBatch(Operator):
bl_idname = "export_shape.pc2_selection"
bl_label = "Export pc2 for selected Objects"
bl_description = "Export pc2 for selected Objects"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return(bpy.context.scene.pc_pc2_group != "" and bpy.context.scene.pc_pc2_folder != 'Set me Please!')
def execute(self, context):
OscFuncExportPc2(self)
return {'FINISHED'}
class OscRemoveSubsurf(Operator):
bl_idname = "object.remove_subsurf_modifier"
bl_label = "Remove Subdivision Surface Modifier"
bl_description = "Remove Subdivision Surface Modifier"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return(bpy.context.scene.pc_pc2_group != "")
def execute(self, context):
GENERATE = [
'MULTIRES', 'ARRAY', 'BEVEL', 'BOOLEAN', 'BUILD',
'DECIMATE', 'MASK', 'MIRROR', 'REMESH', 'SCREW',
'SKIN', 'SOLIDIFY', 'SUBSURF', 'TRIANGULATE'
]
for OBJ in bpy.data.collections[bpy.context.scene.pc_pc2_group].objects[:]:
for MOD in OBJ.modifiers[:]:
if MOD.type in GENERATE:
if eval("bpy.context.scene.mesh_cache_tools_settings.%s" % (MOD.type.lower())):
OBJ.modifiers.remove(MOD)
return {'FINISHED'}
class OscPc2iMporterBatch(Operator):
bl_idname = "import_shape.pc2_selection"
bl_label = "Import pc2 for selected Objects"
bl_description = "Import pc2 for selected Objects"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return(bpy.context.scene.pc_pc2_folder != 'Set me Please!')
def execute(self, context):
for OBJ in bpy.context.selected_objects[:]:
MOD = OBJ.modifiers.new("MeshCache", 'MESH_CACHE')
MOD.filepath = "//%s%s%s.pc2" % (bpy.context.scene.pc_pc2_folder, os.sep, OBJ.name)
MOD.cache_format = "PC2"
MOD.forward_axis = "POS_Y"
MOD.up_axis = "POS_Z"
MOD.flip_axis = set(())
MOD.frame_start = bpy.context.scene.pc_pc2_start
return {'FINISHED'}
def OscLinkedGroupToLocal():
try:
ACTOBJ = bpy.context.active_object
if not ACTOBJ.id_data.instance_collection:
return False
GROBJS = [ob for ob in ACTOBJ.id_data.instance_collection.objects[:] if ob.type == "MESH"]
for ob in ACTOBJ.id_data.instance_collection.objects[:]:
bpy.context.collection.objects.link(ob)
NEWGROUP = bpy.data.collections.new("%s_CLEAN" % (ACTOBJ.name))
bpy.context.collection.objects.unlink(ACTOBJ)
NEWOBJ = []
for ob in GROBJS:
NEWGROUP.objects.link(ob)
NEWOBJ.append(ob)
except:
return False
return True
class OscGroupLinkedToLocal(Operator):
bl_idname = "group.linked_group_to_local"
bl_label = "Group Linked To Local"
bl_description = "Group Linked To Local"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
group_check = OscLinkedGroupToLocal()
if not group_check:
self.report({'WARNING'},
"There is no objects to link or the object already linked. Operation Cancelled")
return {'CANCELLED'}
return {'FINISHED'}
class OscMeshCacheUp(Operator):
bl_idname = "object.modifier_mesh_cache_up"
bl_label = "Mesh Cache To Top"
bl_description = "Send Mesh Cache Modifiers top"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
obj = context.object
return (obj and obj.type == "MESH")
def execute(self, context):
actob = bpy.context.view_layer.objects.active
for ob in bpy.context.selected_objects[:]:
bpy.context.view_layer.objects.active = ob
for mod in ob.modifiers[:]:
if mod.type == "MESH_CACHE":
for up in range(ob.modifiers.keys().index(mod.name)):
bpy.ops.object.modifier_move_up(modifier=mod.name)
bpy.context.view_layer.objects.active = actob
return {'FINISHED'}
# Add-ons Preferences Update Panel
# Define Panel classes for updating
panels = (
OscEPc2ExporterPanel,
)
def update_panel(self, context):
message = "Mesh Cache Tools: Updating Panel locations has failed"
try:
for panel in panels:
if "bl_rna" in panel.__dict__:
bpy.utils.unregister_class(panel)
for panel in panels:
panel.bl_category = context.preferences.addons[__name__].preferences.category
bpy.utils.register_class(panel)
except Exception as e:
print("\n[{}]\n{}\n\nError:\n{}".format(__name__, message, e))
pass
class OscurartMeshCacheToolsAddonPreferences(AddonPreferences):
# this must match the addon name, use '__package__'
# when defining this in a submodule of a python package.
bl_idname = __name__
category: StringProperty(
name="Category",
description="Choose a name for the category of the panel",
default="Tools",
update=update_panel,
)
def draw(self, context):
layout = self.layout
row = layout.row()
col = row.column()
col.label(text="Category:")
col.prop(self, "category", text="")
classes = (
OscurartMeshCacheModifiersSettings,
OscGroupLinkedToLocal,
OscMeshCacheButtonSet,
OscMeshCacheUp,
OscPc2ExporterBatch,
OscPc2iMporterBatch,
OscRemoveSubsurf,
OscurartMeshCacheToolsAddonPreferences,
RemuevePropiedades,
OscurartMeshCacheSceneAutoLoad,
CreaPropiedades,
OscEPc2ExporterPanel,
)
# Register
def register():
for cls in classes:
bpy.utils.register_class(cls)
from bpy.types import Scene
Scene.mesh_cache_tools_settings = PointerProperty(
type=OscurartMeshCacheModifiersSettings
)
Scene.pc_auto_load_proxy = CollectionProperty(
type=OscurartMeshCacheSceneAutoLoad
)
Scene.pc_pc2_rotx = BoolProperty(default=True, name="Rotx = 90")
Scene.pc_pc2_world_space = BoolProperty(default=True, name="World Space")
Scene.pc_pc2_modifiers = BoolProperty(default=True, name="Apply Modifiers")
Scene.pc_pc2_subsurf = BoolProperty(default=True, name="Turn Off SubSurf")
Scene.pc_pc2_start = IntProperty(default=0, name="Frame Start")
Scene.pc_pc2_end = IntProperty(default=100, name="Frame End")
Scene.pc_pc2_group = StringProperty()
Scene.pc_pc2_folder = StringProperty(default="Set me Please!")
Scene.pc_pc2_exclude = StringProperty(default="*")
update_panel(None, bpy.context)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
from bpy.types import Scene
del Scene.mesh_cache_tools_settings
del Scene.pc_auto_load_proxy
del Scene.pc_pc2_rotx
del Scene.pc_pc2_world_space
del Scene.pc_pc2_modifiers
del Scene.pc_pc2_subsurf
del Scene.pc_pc2_start
del Scene.pc_pc2_end
del Scene.pc_pc2_group
del Scene.pc_pc2_folder
del Scene.pc_pc2_exclude
if __name__ == "__main__":
register()
| [
"admin@irradiate.net"
] | admin@irradiate.net |
b7af641910c25e095ec765b876ffc1ff2b93a6f5 | 028d788c0fa48a8cb0cc6990a471e8cd46f6ec50 | /Python-Advanced/Multidimensional-Lists/Lab/01_sum_matrix_elements.py | 8b2f7f607ffc3ecd86832a616a452d0d289331c1 | [] | no_license | Sheko1/SoftUni | d6b8e79ae545116f4c0e5705ad842f12d77a9c9d | a9fbeec13a30231b6a97c2b22bb35257ac1481c0 | refs/heads/main | 2023-07-13T15:39:48.826925 | 2021-08-21T12:51:02 | 2021-08-21T12:51:02 | 317,266,200 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | def get_matrix():
row, column = [int(n) for n in input().split(", ")]
result = []
for r_1 in range(row):
el = [int(el) for el in input().split(", ")]
result.append(el)
return result
matrix = get_matrix()
matrix_sum = 0
for r in range(len(matrix)):
for c in range(len(matrix[r])):
matrix_sum += matrix[r][c]
print(matrix_sum)
print(matrix)
| [
"martinkypar@gmail.com"
] | martinkypar@gmail.com |
b4e8ec957d1b648d015a016c4e06df18db2ebfb7 | 504d6796ed53540b57532f3c85a148bf6ddce2fc | /button.py | f7dc0e9f31bc5f463498cb67e13c6936fb840d96 | [] | no_license | YGragon/AlienInvasion | 1633d8319ee40400f50f236a904295eeae725886 | 81cf5a7988333e7a26a2934af66d571a26ade3c1 | refs/heads/master | 2021-08-23T15:56:11.077293 | 2017-12-05T14:43:25 | 2017-12-05T14:43:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,189 | py | import pygame.font
class Button():
"""docstring for Button"""
def __init__(self, ai_settings, screen, msg):
"""初始化按钮的属性"""
self.screen = screen
self.screen_rect = screen.get_rect()
# 设置按钮的尺寸和其他属性
self.width, self.height = 200, 50
self.button_color = (0, 255, 0)
self.text_color = (255, 255, 255)
self.font = pygame.font.SysFont(None, 48)
# 创建按钮的rect, 并使其居中
self.rect = pygame.Rect(0, 0, self.width, self. height)
self.rect.center = self.screen_rect.center
# 按钮的标签只需创建一次
self.prep_msg(msg)
def prep_msg(self, msg):
"""将msg渲染为图像,并失去在按钮上居中"""
self.msg_image = self.font.render(msg, True, self.text_color, self.button_color)
self.msg_image_rect = self.msg_image.get_rect()
self.msg_image_rect.center = self.rect.center
def draw_botton(self):
# 绘制一个用颜色填充的按钮再绘制文本
self.screen.fill(self.button_color, self.rect)
self.screen.blit(self.msg_image, self.msg_image_rect)
| [
"1105894953@qq.com"
] | 1105894953@qq.com |
6d09ae55a40604788f8a470b1e4e72fbee35e4cb | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /070_oop/001_classes/examples/Learning Python/030_010_Bound Methods and Other Callable Objects.py | 42e5cbd1bb55eb8bcbbd19028cb76cacc9337cb0 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 2,994 | py | class Number:
def __init__(self, base):
self.base = base
def double(self):
return self.base * 2
def triple(self):
return self.base * 3
x = Number(2) # Class instance objects
y = Number(3) # State + methods
z = Number(4)
print('#' * 23 + ' Normal immediate calls')
print(x.double()) # Normal immediate calls
print('#' * 23 + ' List of bound methods')
acts = [x.double, y.double, y.triple, z.double] # List of bound methods
for act in acts: # Calls are deferred
print(act()) # Call as though functions
#
bound = x.double
print(bound.__self__, bound.__func__)
# (<__main__.Number object at 0x0278F610>, <function double at 0x027A4ED0>)
print(bound.__self__.base)
#
print('#' * 23 + ' Calls bound.__func__(bound.__self__, ...)')
print(bound()) # Calls bound.__func__(bound.__self__, ...)
#
def square(arg):
return arg ** 2 # Simple functions (def or lambda)
#
class Sum:
def __init__(self, val): # Callable instances
self.val = val
def __call__(self, arg):
return self.val + arg
#
class Product:
def __init__(self, val): # Bound methods
self.val = val
def method(self, arg):
return self.val * arg
#
sobject = Sum(2)
pobject = Product(3)
actions = [square, sobject, pobject.method] # Function, instance, method
#
print('#' * 23 + ' Function, instance, method. All 3 called same way. Call any 1-arg callable')
for act in actions: # All 3 called same way
print(act(5)) # Call any 1-arg callable
#
#
print('#' * 23 + ' Index, comprehensions, maps')
actions[-1](5) # Index, comprehensions, maps
#
[act(5) for act in actions]
list(map(lambda act: act(5), actions))
class Negate:
def __init__(self, val): # Classes are callables too
self.val = -val # But called for object, not work
def __repr__(self): # Instance print format
return str(self.val)
print('#' * 23 + ' Call a class too')
actions = [square, sobject, pobject.method, Negate] # Call a class too
for act in actions:
print(act(5))
#
print('#' * 23 + ' Runs __repr__ not __str__!')
print([act(5) for act in actions]) # Runs __repr__ not __str__!
#
#
table = {act(5): act for act in actions} # 2.6/3.0 dict comprehension
print('#' * 23 + ' 2.6/3.0 str.format')
for (key, value) in table.items():
print('{0:2} => {1}'.format(key, value)) # 2.6/3.0 str.format
# -5 => <class '__main__.Negate'>
# 25 => <function square at 0x025D4978>
# 15 => <bound method Product.method of <__main__.Product object at 0x025D0F90>>
# 7 => <__main__.Sum object at 0x025D0F70>
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
4994d7a5ad3a9ce58cc29a553e7a2db8735d7e33 | fe31602a910e70fa77d89fcd4c705cc677b0a898 | /pipeline/type/tmodel.py | eb11ade00b352a0abf20821c7d2f3f85e2555a78 | [] | no_license | WUT-IDEA/Y2019_CZH_GraduationDesignCode | e0748b4412bc6c8d160584dff7faf3d6f3395d90 | 83b807060c68b3edef574532b32e8ae7a759d63f | refs/heads/master | 2020-06-05T04:25:15.351883 | 2019-06-17T09:05:28 | 2019-06-17T09:05:28 | 192,312,562 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,022 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
from keras.backend.tensorflow_backend import set_session
import tensorflow as tf
import keras
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.4
set_session(tf.Session(config=config))
from keras.layers import Input, Dense, Embedding, LSTM, Bidirectional
from keras.models import Model
import numpy as np
import datetime
class LossHistory(keras.callbacks.Callback):
def __init__(self, logpath, modelpath):
super().__init__()
self.logpath = logpath
self.modelpath = modelpath
if not os.path.exists(modelpath):
os.mkdir(modelpath)
def set_model(self, model):
self.model = model
self.writer = open(self.logpath, "w")
def on_epoch_end(self, epoch, logs=None):
#if epoch == 0 or (epoch + 1) % 5 == 0:
self.model.save("{}/model_{}.h5".format(self.modelpath, epoch+1))
# self.writer.write("epoch {}, loss:{}, valid_loss:{}\n".format(epoch+1, logs['loss'], logs['val_loss']))
self.writer.write("epoch {}, loss:{}\n".format(epoch + 1, logs['loss']))
# read file by line
def read_byline(filepath):
q = []
t = []
y = []
with open(filepath, 'r') as reader:
for line in reader:
parts = line.strip().split("\t")
q.append(parts[0].split(" "))
t.append(parts[1].split(" "))
y.append(parts[2].strip())
q = np.asarray(q, dtype='int32')
y = np.asarray(y, dtype="int32")
t = np.asarray(t, dtype='int32')
return q, t, y
# Question
embedding_matrix_q = np.loadtxt("../../data/glove_test.txt", dtype=np.float32)
print(embedding_matrix_q.shape)
EMBEDDING_DIM_Q=300
MAX_SEQUENCE_LENGTH_Q=24
# define model 58968
sequence_input_q=Input(shape=(MAX_SEQUENCE_LENGTH_Q,), dtype='int32')
embedding_layer_q=Embedding(input_dim=62957,
output_dim=EMBEDDING_DIM_Q,
weights=[embedding_matrix_q],
input_length=MAX_SEQUENCE_LENGTH_Q,
trainable=False)
embedded_sequences_q=embedding_layer_q(sequence_input_q)
q_bilstm=Bidirectional(LSTM(100))(embedded_sequences_q)
# subject type
embedding_matrix_t=np.loadtxt("data/glove_type.txt")
print(embedding_matrix_t.shape)
EMBEDDING_DIM_T=300
MAX_SEQUENCE_LENGTH_T=6
# define model
sequence_input_t=Input(shape=(MAX_SEQUENCE_LENGTH_T,), dtype='int32')
embedding_layer_t=Embedding(input_dim=1053,
output_dim=EMBEDDING_DIM_T,
weights=[embedding_matrix_t],
input_length=MAX_SEQUENCE_LENGTH_T,
mask_zero=True,
trainable=False)
embedded_sequences_t=embedding_layer_t(sequence_input_t)
t_lstm=Bidirectional(LSTM(100))(embedded_sequences_t)
from keras.layers import concatenate
concatenatecon_layer=concatenate([q_bilstm, t_lstm],axis=-1)
dense1=Dense(100,activation="sigmoid")(concatenatecon_layer)
output=Dense(1,activation="sigmoid")(dense1)
# output=Dense(1,activation="sigmoid")(concatenatecon_layer)
model=Model(inputs=[sequence_input_q,sequence_input_t],outputs=output)
model.compile(optimizer="adam",
loss="binary_crossentropy",
metrics=["accuracy"])
print(model.summary())
input_q, input_t, y = read_byline("training_data/train_data.txt")
BATCH_SIZE=100
EPOCHS=60
history = LossHistory("log_t_binary.txt", "t_binary_model")
start_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
model.fit(x=[input_q,input_t],
y=y,
batch_size=BATCH_SIZE,
callbacks=[history],
epochs=EPOCHS)
model.save("e_model.h5")
endTime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print("startTime:"+start_time)
print("endTime:"+endTime) | [
"zhengyunpei@zhengyunpeideMacBook-Pro.local"
] | zhengyunpei@zhengyunpeideMacBook-Pro.local |
e57efe4423fa8d3f59604669b12ba4f71a8595b6 | 6d154b8fdea96187fe12c6c4324ec4f8980dcdfe | /Shortner/migrations/0004_alter_url_key.py | bc907321bb8551cfa5e5e3a7cf0af62000b338ed | [] | no_license | mohammad-osoolian/UrlShortner | ba245a5aa1595e43044cfb93badbae76d293f616 | 8ffbe0b1a951997a420381f64492bc38deb98c05 | refs/heads/master | 2023-07-18T11:02:55.092219 | 2021-08-29T11:12:55 | 2021-08-29T11:12:55 | 400,747,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | # Generated by Django 3.2.6 on 2021-08-28 11:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Shortner', '0003_url_created'),
]
operations = [
migrations.AlterField(
model_name='url',
name='key',
field=models.CharField(max_length=255, null=True),
),
]
| [
"="
] | = |
2d3df858a925cc60c824469990783925594963c8 | 1f86353c4740d1b9a118d709e5f94f56570127b0 | /bgp/appbgp/main.py | a56a796710a8d72147061ada7339e2151c530f8f | [] | no_license | gswcfl/BGP-v.1 | 0c6ec0aef69a27ed959bac2984e75682f94cb0c9 | 9bf0eaa87f50b24bb2ee115a489c9482bb529e3c | refs/heads/master | 2021-01-01T19:04:34.795585 | 2017-07-27T06:30:23 | 2017-07-27T06:30:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,759 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import MySQLdb
import os
import commands
import pexpect
import telnetlib
import time
import threading
import sys
sys.path.append(os.path.dirname(os.path.abspath('__file__')))
from huawei_h3c_ssh import huawei_h3c_ssh
from huawei_h3c_telnet import huawei_h3c_telnet
from FiberHome import FiberHome_telnet
#######################################################################################################################
def ping_test(ip_info):
"""
ping check function.
"""
command = 'ping ' + ip_info + ' -c 2 -i 0.2 -w 0.2'
(status,output) = commands.getstatusoutput(command)
if status == 0:
return "True"
else:
return "False"
#######################################################################################################################
if __name__ == "__main__":
# "10.192.7.130":["北京电信大郊亭163","扩展二套系统"],
# "10.192.23.2":[""],
# "10.192.23.130":[""],
# "10.192.35.90":["北京铁通西客站","扩展一套系统"],
# "10.192.128.20":[""],
# "10.192.128.23":[""],
# "10.192.131.26":[""],
# "10.192.133.30":[""],
# "10.192.217.2":[""],
# "10.192.217.130":["",],
# "10.192.213.2":["广州南方基地302","扩展一套系统"],
# "10.192.213.130":["广州南方基地302","扩展二套系统"],
# "10.192.215.2":["广州南方基地304","扩展一套系统"],
# "10.192.215.130":["广州南方基地304","扩展二套系统"],
# "10.192.96.20":[""],
# "10.192.96.23":[""],
# "10.192.149.2":["上海移动武胜","扩展一套系统"],
# "10.192.149.130":["上海移动武胜","扩展二套系统"],
# "10.192.145.2":["上海联通金桥三期","扩展一套系统"],
# "10.192.145.130":["上海联通金桥三期","扩展二套系统"],
#"10.192.217.2":["广州联通东莞","扩展一套系统","TELNET","HUAWEI","adminjt","j0t0x5t","2k0s1k3g1","display bgp peer"],
#"10.192.217.130":["广州联通东莞","扩展二套系统","TELNET","H3C","adminjt","j0t0x5t","2k0s1k3g2","display bgp peer"],
#
# password_info = {
# "10.192.15.130":["北京移动大白楼","扩展二套系统","TELNET","FiberHome","hsoft123","Banner@2015","null","show ip bgp summary"],
# "10.192.15.2":["北京移动大白楼","扩展一套系统","TELNET","FiberHome","hsoft123","Banner@2015","null","show ip bgp summary"],
# "10.192.21.130":["北京移动三台IP专网","扩展二套系统","TELNET","HUAWEI","hsoft","hsoft","null","display bgp peer"],
# "10.192.21.2":["北京移动三台IP专网","扩展一套系统","TELNET","HUAWEI","hsoft","hsoft","null","display bgp peer"],
# "10.192.35.50":["北京联通电报CNC","扩展一套系统","SSH","HUAWEI","adminjt","j0t0x5t","2k0s1k3g1","display bgp peer"],
# "10.192.37.54":["北京联通电报CNC","扩展二套系统","SSH","HUAWEI","adminjt","j0t0x5t","2k0s1k3g2","display bgp peer"],
# "10.192.17.2":["北京联通东古城","扩展一套系统","SSH","H3C","adminjt","j0t0x5t","2k0s1k3g1","display bgp peer"],
# "10.192.17.130":["北京联通东古城","扩展二套系统","SSH","H3C","adminjt","j0t0x5t","2k0s1k3g2","display bgp peer"],
# "10.192.11.2":["北京联通沙河","扩展一套系统","SSH","H3C","adminjt","j0t0x5t","2k0s1k3g1","display bgp peer"],
# "10.192.11.130":["北京联通沙河","扩展二套系统","SSH","H3C","adminjt","j0t0x5t","2k0s1k3g2","display bgp peer"],
# "10.192.35.18":["北京电信西单CN2","扩展一套系统","SSH","HUAWEI","adminjt","j0t0x5t","2k0s1k3g1","display bgp peer"],
# "10.192.37.22":["北京电信西单CN2","扩展二套系统","SSH","HUAWEI","adminjt","j0t0x5t","2k0s1k3g2","display bgp peer"],
# "10.192.7.2":["北京电信大郊亭163","扩展一套系统","SSH","HUAWEI","adminjt","j0t0x5t","2k0s1k3g1","display bgp peer"],
# "10.192.37.94":["北京铁通西客站","扩展二套系统","SSH","HUAWEI","adminjt","j0t0x5t","2k0s1k3g2","display bgp peer"],
# "10.192.29.2":["北京科技软件园","扩展一套系统","SSH","H3C","adminjt","j0t0x5t","2k0s1k3g1","display bgp peer"],
# "10.192.29.130":["北京科技软件园","扩展二套系统","SSH","H3C","adminjt","j0t0x5t","2k0s1k3g2","display bgp peer"],
# "10.192.27.2":["北京教育清华","扩展一套系统","SSH","H3C","adminjt","j0t0x5t","2k0s1k3g1","display bgp peer"],
# "10.192.27.130":["北京教育清华","扩展二套系统","SSH","H3C","adminjt","j0t0x5t","2k0s1k3g2","display bgp peer"],
# "10.192.131.34":["广州联通科学城169","扩展一套系统","SSH","H3C","adminjt","j0t0x5t","2k0s1k3g1","display bgp peer"],
# "10.192.133.38":["广州联通科技城169","扩展二套系统","SSH","H3C","adminjt","j0t0x5t","2k0s1k3g2","display bgp peer"],
# "10.192.131.50":["广州联通科学城CNC","扩展一套系统","SSH","H3C","adminjt","j0t0x5t","2k0s1k3g1","display bgp peer"],
# "10.192.133.54":["广州联通科学城CNC","扩展二套系统","SSH","H3C","adminjt","j0t0x5t","2k0s1k3g2","display bgp peer"],
# "10.192.131.2":["广州电信天河163","扩展一套系统","SSH","HUAWEI","adminjt","j0t0x5t","2k0s1k3g1","display bgp peer"],
# "10.192.133.6":["广州电信天河163","扩展二套系统","SSH","HUAWEI","adminjt","j0t0x5t","2k0s1k3g2","display bgp peer"],
# "10.192.131.10":["广州电信同和163","扩展一套系统","SSH","HUAWEI","adminjt","j0t0x5t","2k0s1k3g1","display bgp peer"],
# "10.192.133.14":["广州电信同和163","扩展二套系统","SSH","HUAWEI","adminjt","j0t0x5t","2k0s1k3g2","display bgp peer"],
# "10.192.131.18":["广州电信同和CN2","扩展一套系统","SSH","HUAWEI","adminjt","j0t0x5t","2k0s1k3g1","display bgp peer"],
# "10.192.133.22":["广州电信同和CN2","扩展二套系统","SSH","HUAWEI","adminjt","j0t0x5t","2k0s1k3g2","display bgp peer"],
# "10.192.131.66":["广州移动清河东","扩展一套系统","SSH","H3C","adminjt","j0t0x5t","2k0s1k3g1","display bgp peer"],
# "10.192.133.70":["广州移动清河东","扩展二套系统","SSH","H3C","adminjt","j0t0x5t","2k0s1k3g2","display bgp peer"],
# "10.192.131.90":["广州铁通东山","扩展一套系统","SSH","HUAWEI","adminjt","j0t0x5t","2k0s1k3g1","display bgp peer"],
# "10.192.133.94":["广州铁通东山","扩展二套系统","SSH","HUAWEI","adminjt","j0t0x5t","2k0s1k3g2","display bgp peer"],
# "10.192.99.26":["上海联通通联169","扩展一套系统","SSH","HUAWEI","adminjt","j0t0x5t","2k0s1k3g1","display bgp peer"],
# "10.192.101.30":["上海联通通联169","扩展二套系统","SSH","HUAWEI","adminjt","j0t0x5t","2k0s1k3g2","display bgp peer"],
# "10.192.99.50":["上海联通通联CNC","扩展一套系统","SSH","H3C","adminjt","j0t0x5t","2k0s1k3g1","display bgp peer"],
# "10.192.101.54":["上海联通通联CNC","扩展二套系统","SSH","H3C","adminjt","j0t0x5t","2k0s1k3g2","display bgp peer"],
# "10.192.99.34":["--上海联通金桥","扩展一套系统","SSH","H3C","adminjt","j0t0x5t","2k0s1k3g1","display bgp peer"],
# "10.192.101.38":["--上海联通金桥","扩展二套系统","SSH","H3C","adminjt","j0t0x5t","2k0s1k3g2","display bgp peer"],
# "10.192.99.2":["上海电信武胜163","扩展一套系统","SSH","HUAWEI","adminjt","j0t0x5t","2k0s1k3g1","display bgp peer"],
# "10.192.101.6":["上海电信武胜163","扩展二套系统","SSH","HUAWEI","adminjt","j0t0x5t","2k0s1k3g2","display bgp peer"],
# "10.192.99.10":["上海电信信息园163","扩展一套系统","TELNET","HUAWEI","adminjt","j0t0x5t","2k0s1k3g1","display bgp peer"],
# "10.192.101.14":["上海电信信息园163","扩展二套系统","SSH","HUAWEI","adminjt","j0t0x5t","2k0s1k3g2","display bgp peer"],
# "10.192.99.18":["上海电信民生CN2","扩展一套系统","SSH","HUAWEI","adminjt","j0t0x5t","2k0s1k3g1","display bgp peer"],
# "10.192.101.22":["上海电信民生CN2","扩展二套系统","SSH","HUAWEI","adminjt","j0t0x5t","2k0s1k3g2","display bgp peer"],
# "10.192.147.2":["上海移动迎春路","扩展一套系统","TELNET","FiberHome","adminjt","j0t0x5t","nll","show ip bgp summary"],
# "10.192.147.130":["上海移动迎春路","扩展二套系统","TELNET","FiberHome","adminjt","j0t0x5t","nll","show ip bgp summary"]
# }
ip_info = ""
address_info = ""
system_info = ""
login_info = ""
changjia_info = ""
username_info = ""
password1_info = ""
password2_info = ""
command_info = ""
result = []
db = MySQLdb.connect('10.52.249.100','admin_user','111111','password_info')
cursor = db.cursor()
sql = "select * from password;"
cursor.execute(sql)
n = cursor.fetchall()
db.commit()
for i in n:
ip_info = i[0]
address_info = i[1]
system_info = i[2]
login_info = i[3]
changjia_info = i[4]
username_info = i[5]
password1_info = i[6]
password2_info = i[7]
#command_info = password_info[i][7]
#print ip_info+"\t"+address_info +"\t"+system_info+"\t"+ login_info+"\t"+ changjia_info+"\t"+ username_info+"\t"+ password1_info+"\t"+ password2_info+"\t"+ command_info
if ping_test(ip_info):
#print ip_info +"\t"+ "True"
if changjia_info == "HUAWEI" and login_info == "SSH":
t = threading.Thread(target=huawei_h3c_ssh,args=(ip_info,address_info,system_info,username_info,password1_info,password2_info,"display bgp peer"))
result.append(t)
elif changjia_info == "HUAWEI" and login_info == "TELNET":
t = threading.Thread(target=huawei_h3c_telnet,args=(ip_info,address_info,system_info,username_info,password1_info,password2_info,"display bgp peer"))
result.append(t)
elif changjia_info == "H3C" and login_info == "SSH":
t = threading.Thread(target=huawei_h3c_ssh,args=(ip_info,address_info,system_info,username_info,password1_info,password2_info,"display bgp peer"))
result.append(t)
elif changjia_info == "H3C" and login_info == "TELNET":
t = threading.Thread(target=huawei_h3c_telnet,args=(ip_info,address_info,system_info,username_info,password1_info,password2_info,"display bgp peer"))
result.append(t)
elif changjia_info == "FiberHome" and login_info == "TELNET":
t = threading.Thread(target=FiberHome_telnet,args=(ip_info,address_info,system_info,username_info,password1_info,password2_info,"show ip bgp summary"))
result.append(t)
else:
db = MySQLdb.connect('10.52.249.100','bgp_user','111111','reuslt_info')
cursor = db.cursor()
sql = ('update bgp_info set result="ping timeout...",time_info="%s" where ip_info="%s";' %(time.strftime("%Y-%m-%d %H:%M:%S"),ip_info))
cursor.execute(sql)
db.commit()
db.close()
#f = file("result.txt","a+")
#f.write(str(ip_info)+"#"+str(address_info)+"#"+str(system_info)+"#"+"ping timeout"+"#"+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"\n")
#print ip_info +"\t"+ "ping timeout" + address_info + system_info
#f.flush()
#f.close()
for i in range(len(result)):
result[i].start()
for i in range(len(result)):
result[i].join()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
ccd817fac9a6c0e11148d92497b0f5a6c2934cb8 | cdbea65d6127779075759bf30ba2cd97d2feb3bc | /petstagram/accounts/migrations/0001_initial.py | a1d63cc03012845babc42670df6fb87da874de1c | [
"MIT"
] | permissive | DimAntDim/SoftUni_Petstagram_Workshop | 9285bbded707d0ef5d467314ebcba1a7df69b370 | b4d6da5fa0d19de4b434046d0b7c73a40c8343b5 | refs/heads/main | 2023-08-21T10:21:37.328351 | 2021-11-01T10:17:16 | 2021-11-01T10:17:16 | 375,528,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,530 | py | # Generated by Django 3.2.3 on 2021-07-19 15:25
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='PetstagramUser',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=254, unique=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| [
"66394357+DimAntDim@users.noreply.github.com"
] | 66394357+DimAntDim@users.noreply.github.com |
1cc53a307474feed659f6c0dba91367c367f464a | 1a1d61424d83663318b8f1ba30712538680a135a | /apps/payinfo/migrations/0003_auto_20181130_2120.py | c7c3df32d3bce51fd10c0d3036e65cc70133f6d8 | [] | no_license | htzs12/django_online | 411ba5c4a20544a07ce6a644306b1c127e6311be | 5c9c2a1a742d3dd97a430651f2bd14012f6eb3a2 | refs/heads/master | 2022-12-24T15:11:39.747641 | 2018-12-01T13:46:39 | 2018-12-01T13:46:39 | 154,823,539 | 0 | 0 | null | 2022-12-02T15:19:33 | 2018-10-26T11:23:21 | Python | UTF-8 | Python | false | false | 412 | py | # Generated by Django 2.0.5 on 2018-11-30 13:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payinfo', '0002_auto_20181130_2118'),
]
operations = [
migrations.AlterField(
model_name='payinfo',
name='path',
field=models.FilePathField(path='/media', verbose_name='路径'),
),
]
| [
"www.htzs@qq.com"
] | www.htzs@qq.com |
0493b6b20bc6c75d3b668bdb0e66d23160bc5ba8 | 80e1a973c97c13fd63afc347409ca0d7fcff2795 | /pic/migrations/0004_images_date_posted.py | bb27ab1575016f858bc4cf761dbe6a5113d41c84 | [] | no_license | prathmesh2048/cyberboxer-assignment | d22584f60870560d4fd1cc7b62bfe8b377b55a3c | c0eb91e289b72f7f254a072d7d166ac42076859d | refs/heads/master | 2023-08-11T10:19:49.315590 | 2021-09-19T09:10:00 | 2021-09-19T09:10:00 | 408,081,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | # Generated by Django 3.2.7 on 2021-09-18 19:38
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('pic', '0003_alter_images_image_name'),
]
operations = [
migrations.AddField(
model_name='images',
name='date_posted',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
| [
"prathmeshnandurkar123@gmail.com"
] | prathmeshnandurkar123@gmail.com |
ab2c950a36542bbe73a995c6ffa86c89825b1586 | f062d7c7146e7cd98cda5ebe7f0f2d591f55a309 | /backend/testcg_dev_8072/settings.py | b363528b9797c312c706df34edaccebd9ee3f903 | [] | no_license | crowdbotics-apps/testcg-dev-8072 | 0836821bc0973a17040631b75f5c3fd7b9ba8f4e | 54e6e21ff0453e7be1588f51459f678b598d9325 | refs/heads/master | 2022-11-19T07:06:05.366542 | 2020-07-23T19:02:47 | 2020-07-23T19:02:47 | 282,033,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,901 | py | """
Django settings for testcg_dev_8072 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'testcg_dev_8072.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'testcg_dev_8072.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
c616242aab638f27aa212ca80de8c1162b0f3f38 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/2d0891e0897159d0010afa9be18d1421fcab47c2-<get_device_facts>-fix.py | 561f9e445087fabeb1ba7535484f2e8f2b8bf73f | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,938 | py | def get_device_facts(self):
device_facts = {
}
device_facts['devices'] = {
}
lspci = self.module.get_bin_path('lspci')
if lspci:
(rc, pcidata, err) = self.module.run_command([lspci, '-D'], errors='surrogate_then_replace')
else:
pcidata = None
try:
block_devs = os.listdir('/sys/block')
except OSError:
return device_facts
devs_wwn = {
}
try:
devs_by_id = os.listdir('/dev/disk/by-id')
except OSError:
pass
else:
for link_name in devs_by_id:
if link_name.startswith('wwn-'):
try:
wwn_link = os.readlink(os.path.join('/dev/disk/by-id', link_name))
except OSError:
continue
devs_wwn[os.path.basename(wwn_link)] = link_name[4:]
links = self.get_all_device_links()
device_facts['device_links'] = links
for block in block_devs:
virtual = 1
sysfs_no_links = 0
try:
path = os.readlink(os.path.join('/sys/block/', block))
except OSError:
e = sys.exc_info()[1]
if (e.errno == errno.EINVAL):
path = block
sysfs_no_links = 1
else:
continue
sysdir = os.path.join('/sys/block', path)
if (sysfs_no_links == 1):
for folder in os.listdir(sysdir):
if ('device' in folder):
virtual = 0
break
d = {
}
d['virtual'] = virtual
d['links'] = {
}
for (link_type, link_values) in iteritems(links):
d['links'][link_type] = link_values.get(block, [])
diskname = os.path.basename(sysdir)
for key in ['vendor', 'model', 'sas_address', 'sas_device_handle']:
d[key] = get_file_content(((sysdir + '/device/') + key))
sg_inq = self.module.get_bin_path('sg_inq')
if sg_inq:
device = ('/dev/%s' % block)
(rc, drivedata, err) = self.module.run_command([sg_inq, device])
if (rc == 0):
serial = re.search('Unit serial number:\\s+(\\w+)', drivedata)
if serial:
d['serial'] = serial.group(1)
for (key, test) in [('removable', '/removable'), ('support_discard', '/queue/discard_granularity')]:
d[key] = get_file_content((sysdir + test))
if (diskname in devs_wwn):
d['wwn'] = devs_wwn[diskname]
d['partitions'] = {
}
for folder in os.listdir(sysdir):
m = re.search((('(' + diskname) + '[p]?\\d+)'), folder)
if m:
part = {
}
partname = m.group(1)
part_sysdir = ((sysdir + '/') + partname)
part['links'] = {
}
for (link_type, link_values) in iteritems(links):
part['links'][link_type] = link_values.get(partname, [])
part['start'] = get_file_content((part_sysdir + '/start'), 0)
part['sectors'] = get_file_content((part_sysdir + '/size'), 0)
part['sectorsize'] = get_file_content((part_sysdir + '/queue/logical_block_size'))
if (not part['sectorsize']):
part['sectorsize'] = get_file_content((part_sysdir + '/queue/hw_sector_size'), 512)
part['size'] = bytes_to_human((float(part['sectors']) * 512.0))
part['uuid'] = get_partition_uuid(partname)
self.get_holders(part, part_sysdir)
d['partitions'][partname] = part
d['rotational'] = get_file_content((sysdir + '/queue/rotational'))
d['scheduler_mode'] = ''
scheduler = get_file_content((sysdir + '/queue/scheduler'))
if (scheduler is not None):
m = re.match('.*?(\\[(.*)\\])', scheduler)
if m:
d['scheduler_mode'] = m.group(2)
d['sectors'] = get_file_content((sysdir + '/size'))
if (not d['sectors']):
d['sectors'] = 0
d['sectorsize'] = get_file_content((sysdir + '/queue/logical_block_size'))
if (not d['sectorsize']):
d['sectorsize'] = get_file_content((sysdir + '/queue/hw_sector_size'), 512)
d['size'] = bytes_to_human((float(d['sectors']) * 512.0))
d['host'] = ''
m = re.match('.+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\\.[0-7])/', sysdir)
if (m and pcidata):
pciid = m.group(1)
did = re.escape(pciid)
m = re.search((('^' + did) + '\\s(.*)$'), pcidata, re.MULTILINE)
if m:
d['host'] = m.group(1)
self.get_holders(d, sysdir)
device_facts['devices'][diskname] = d
return device_facts | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
27fa27939f9f0ed47d071348ff6a30e7f3939e4b | 6045f8519065f17b9d832a8e051723a520b58e3c | /09. Volleyball.py | adb6b6ba2810dd04ef0de6ab78c52138001659bf | [] | no_license | a-angeliev/Python-Fundamentals-SoftUni | a308a6c94eb705a3319f6e081543c1cad0b1b37d | a9a5eba0376ebc7395daeda527408d1e59d58316 | refs/heads/master | 2023-07-19T05:55:28.104160 | 2021-09-11T18:25:58 | 2021-09-11T18:25:58 | 399,575,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | from math import floor
year = input()
holydays = int(input())
selo_weekends = int(input())
sofia_weekends = (48 - selo_weekends) * 3/4
sofia_weekends_p = sofia_weekends
holydays_p = holydays* 2/3
if year == "leap":
all_game = (sofia_weekends_p + holydays_p+selo_weekends)*115/100
else:
all_game = sofia_weekends_p+holydays_p+selo_weekends
print(floor(all_game)) | [
"nachko01@gmail.com"
] | nachko01@gmail.com |
a4bc595b22f210716af0ffe15d947d0da8517d34 | b68fea9d645de59ee31da970d3dc435460fde9de | /exercise/__init__.py | 97d19822dd916b50911cb1201bb3b888c295e0b9 | [
"BSD-3-Clause"
] | permissive | shagun30/djambala-2 | 03fde4d1a5b2a17fce1b44f63a489c30d0d9c028 | 06f14e3dd237d7ebf535c62172cfe238c3934f4d | refs/heads/master | 2021-01-10T04:20:30.735479 | 2008-05-22T05:02:08 | 2008-05-22T05:02:08 | 54,959,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | """
/dms/exercise/
Aufgaben mit Abgabemoeglichkeit innerhalb des Django Content Management Systems
Hans Rauch
hans.rauch@gmx.net
Die Programme des dms-Systems koennen frei genutzt und den spezifischen
Beduerfnissen entsprechend angepasst werden.
"""
| [
"hans.rauch@gmx.net"
] | hans.rauch@gmx.net |
05fbc00b1e537495eebafabde55fad0c2743994b | cb61ba31b27b232ebc8c802d7ca40c72bcdfe152 | /leetcode/3. Longest Substring Without Repeating Characters/soln.py | b6def6444faec318c2e67438c047fdc782f1f20e | [
"Apache-2.0"
] | permissive | saisankargochhayat/algo_quest | c7c48187c76b5cd7c2ec3f0557432606e9096241 | a24f9a22c019ab31d56bd5a7ca5ba790d54ce5dc | refs/heads/master | 2021-07-04T15:21:33.606174 | 2021-02-07T23:42:43 | 2021-02-07T23:42:43 | 67,831,927 | 5 | 1 | Apache-2.0 | 2019-10-28T03:51:03 | 2016-09-09T20:51:29 | Python | UTF-8 | Python | false | false | 785 | py | class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
from collections import defaultdict
c_chars = defaultdict(int)
l, r, res = 0, 0, 0
contract = False
while r < len(s):
# Expand
if s[r] in c_chars and c_chars[s[r]] > 0: # check if contract is needed
contract = True
c_chars[s[r]] += 1
r += 1
# Contract
if contract:
d_char = s[r-1] # Char to remove
while s[l] != d_char and l < r:
c_chars[s[l]] -= 1
l += 1
c_chars[s[l]] -= 1
l += 1
contract = False
res = max(res, r-l)
return res | [
"saisankargochhayat@gmail.com"
] | saisankargochhayat@gmail.com |
5185c854bd919b0d68828945beeb75f2093739f9 | 78ac05658a95cc15fee374aeccef97ff155912b9 | /TATSSI/time_series/generator.py | a08e2d52c8884dcf32e0ece2ad0eea3fadef5f03 | [] | no_license | rral0/TATSSI | 1546d1c7a2ba343f4a68aa1b8c9489014a504337 | ace4105a26de6ab257ef3127e58342fbeb975be7 | refs/heads/master | 2023-04-20T11:24:33.039622 | 2021-05-18T12:55:48 | 2021-05-18T12:55:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,076 | py |
import os
from pathlib import Path
import gdal
import xarray as xr
from rasterio import logging as rio_logging
import subprocess
from collections import namedtuple
from datetime import datetime as dt
from glob import glob
# Import TATSSI utils
from TATSSI.input_output.utils import *
from .ts_utils import *
from TATSSI.qa.EOS import catalogue
from TATSSI.qa.EOS.quality import qualityDecoder
from TATSSI.input_output.translate import Translate
import logging
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger(__name__)
class Generator():
"""
Class to generate time series of a specific TATSSI product
"""
def __init__(self, source_dir, product, version,
year=None, start=None, end=None, data_format='hdf',
progressBar=None, preprocessed=False, extent=None):
"""
Constructor for Generator class
"""
self.time_series = namedtuple('time_series', 'data qa')
# Set private attributes
self.__datasets = None
self.__qa_datasets = None
# Check that source_dir exist and has some files
if not os.path.exists(source_dir):
raise(IOError("Source directory does not exist!"))
# Set as source dir the absolute path
self.source_dir = os.path.abspath(source_dir)
# Check that the source dir has the requested product
# to create time series
fnames = glob(os.path.join(self.source_dir,
f"*{product}*{version}*.{data_format}"))
if len(fnames) == 0 and preprocessed == False:
err_msg = (f"There are no {product} files in "
f"{self.source_dir}")
raise(IOError(err_msg))
#elif len(fnames) > 0:
else:
self.product = f"{product}.{version}"
self.product_name = product
self.version = version
# Sort files
fnames.sort()
self.fnames = fnames
# Year
self.__set_year(year)
# Start and End dates
self.__set_start_end_dates(start, end)
self.progressBar = progressBar
self.extent = extent
def __get_product_dates_range(self):
"""
Get temporalExtentStart and temporalExtentEnd for
a specific product (product.version)
:param product: product and version to get temporal extent
:return temporalExtentStart
temporalExtentEnd: datetime objects
"""
# Get valid years for product
_catalogue = catalogue.Catalogue()
_products =_catalogue.products
_products = _products[_products.ProductAndVersion == self.product]
temporalExtentStart = string_to_date(
_products.TemporalExtentStart.values[0])
temporalExtentEnd = string_to_date(
_products.TemporalExtentEnd.values[0])
return temporalExtentStart, temporalExtentEnd
def __set_start_end_dates(self, start, end):
"""
Set start and end dates in format:
YYYY-mm-dd same as: '%Y-%m-%d'
"""
if start is None or end is None:
self.start, self.end = None, None
return
temporalExtentStart, temporalExtentEnd = \
self.__get_product_dates_range()
_start = string_to_date(start)
if _start >= temporalExtentStart:
self.start = _start.strftime('%Y-%m-%d')
else:
msg = (f"Start date {start} is not within "
f"{self.product} temporal extent")
raise Exception(msg)
_end = string_to_date(end)
if _end <= temporalExtentEnd:
self.end = _end.strftime('%Y-%m-%d')
else:
msg = (f"End date {start} is not within "
f"{self.product} temporal extent")
raise Exception(msg)
def __set_year(self, year):
"""
Sets year
"""
if year is None:
self.year = None
return
try:
year = int(year)
except ValueError as e:
msg = f"Year {year} is not a valid calendar year"
raise Exception(msg)
temporalExtentStart, temporalExtentEnd = \
self.__get_product_dates_range()
if year >= temporalExtentStart.year:
self.year = year
else:
msg = f"Year {year} is not within product.version extent"
raise Exception(msg)
def generate_time_series(self, overwrite=True, vrt=False):
"""
Generate tile series using all files in source dir
for the corresponding product and version.
Time series will be generated as follows:
- Parameter
- Files for every time step
- QA parameter
- Files for every time step
:param overwrite: Boolean. Overwrite output files
:param vrt: Boolean. Whether or not to use GDAL VRT files
"""
# List of output datasets
self.__datasets = []
# Translate to TATTSI format (Cloud Optimized GTiff)
# or a GDAL VRT if requested
if vrt == True:
output_format = 'VRT'
options = None
extension = 'vrt'
else:
output_format = 'GTiff'
options = Translate.driver_options
extension = 'tif'
# Number of files to process
n_files = len(self.fnames)
msg = f"Creating COGs..."
if self.progressBar is not None:
self.progressBar.setFormat(msg)
for i, fname in enumerate(self.fnames):
_has_subdatasets, diver_name = has_subdatasets(fname)
if _has_subdatasets is True:
# For each Scientific Dataset
for sds in get_subdatasets(fname):
if diver_name == 'HDF4':
# SDS name is the last elemtent of : separated string
sds_name = sds[0].split(':')[-1]
sds_name = sds_name.replace(" ", "_")
elif diver_name == 'HDF5':
# SDS name is the last elemtent of : separated string
# and last element of a / substring
sds_name = sds[0].split(':')[-1].split('/')[-1]
if i == 0:
# Create output dir
output_dir = self.__create_output_dir(sds_name,
overwrite)
self.__datasets.append(output_dir)
else:
output_dir = os.path.join(self.source_dir, sds_name)
# Generate output fname
output_fname = generate_output_fname(
output_dir, fname, extension)
# Translate to selected format
options = Translate.driver_options
Translate(source_img=sds[0],
target_img=output_fname,
output_format=output_format,
options=options,
extent=self.extent)
else:
# Get dimensions
rows, cols, bands = get_image_dimensions(fname)
sds_name = "output"
for band in range(bands):
if band == 0:
# Create output dir
output_dir = self.__create_output_dir(f"b{band+1}",
overwrite)
self.__datasets.append(output_dir)
else:
output_dir = os.path.join(self.source_dir,
f"b{band+1}")
# Generate output fname
output_fname = generate_output_fname(
output_dir, fname, extension)
# Translate to selected format
options = Translate.driver_options
Translate(source_img=fname,
target_img=output_fname,
output_format=output_format,
options=options,
extent=self.extent)
if self.progressBar is not None:
self.progressBar.setValue((i/n_files) * 100.0)
# Create layerstack of bands or subdatasets
msg = f"Generating {self.product} layer stacks..."
LOG.info(msg)
if self.progressBar is not None:
self.progressBar.setFormat(msg)
for dataset in self.__datasets:
self.__generate_layerstack(dataset, extension)
# For the associated product layers, decode the
# corresponding bands or sub datasets
self.__decode_qa(extension)
def __get_layerstacks(self):
"""
For every variable or band, get its associated VRT
layerstack.
:return vrt_fnames: List with all VRTs in the time series
"""
subdirs = next(os.walk(self.source_dir))[1]
subdirs.sort()
vrt_fnames = []
for subdir in subdirs:
vrt_fname = os.path.join(self.source_dir,
subdir, f'{subdir}.vrt')
if len(vrt_fname) == 0:
msg = (f"Verify that {self.source_dir} has the "
f"corresponding subdatasets for:\n"
f"product - {self.product_name}\n"
f"version - {self.version}\n"
f"dataset - {subdir}\n"
f"Has TimeSeriesGenerator been executed?")
raise Exception(msg)
# If vrt exists add it to vrt_fnames
vrt_fnames.append(vrt_fname)
vrt_fnames.sort()
return vrt_fnames
def load_time_series(self, chunked=False):
"""
Read all layer stacks
:param chunked: Boolean. Whether or not the time series
will be splited to load and process
per chunk.
:return: time series (ts) tupple with two elements:
data - all products layers in a xarray dataset
where each layers is a variable
qa - all decoded QA layers in a named tuple
where each QA is a named tuple field and each
decoded QA is a xarray dataset variable
"""
# Get all datasets, including the non-decoded QA layers
vrt_fnames = self.__get_layerstacks()
datasets = self.__get_datasets(vrt_fnames, chunked=chunked)
# Get all decoded QA layers
qa_layer_names = self.__get_qa_layers()
# Insert a 'qa' prefix in case there is an invalid field name
qa_layer_names_prefix = ['qa' + s for s in qa_layer_names]
# Create named tupple where to store QAs
qa_datasets = namedtuple('qa', ' '.join(qa_layer_names_prefix))
for i, qa_layer in enumerate(qa_layer_names):
# Get all VRTs in the second subdirectory level - QAs
if qa_layer[0] == '_' or qa_layer[-1] == '_':
qa_layer_wildcard = f"*{qa_layer[1:-1]}*"
else:
qa_layer_wildcard = f"*{qa_layer}*"
vrt_dir = os.path.join(self.source_dir, qa_layer_wildcard,
'*', '*.vrt')
vrt_fnames = glob(vrt_dir)
vrt_fnames.sort()
if len(vrt_fnames) == 0:
raise Exception(f"VRTs dir {vrts} is empty.")
# Set the attribute of the QA layer with the
# corresponding dataset
setattr(qa_datasets, qa_layer_names_prefix[i],
self.__get_datasets(vrt_fnames, level=1,
chunked=chunked))
# Return time series object
ts = self.time_series(data=datasets, qa=qa_datasets)
return ts
def __get_datasets(self, vrt_fnames, level=0, chunked=False):
"""
Load all VRTs from vrt_fnames list into a xarray dataset
"""
# Disable RasterIO logging, just show ERRORS
log = rio_logging.getLogger()
log.setLevel(rio_logging.ERROR)
datasets = None
subdataset_name = None
times = None
_fill_value = None
for vrt in vrt_fnames:
# Read each VRT file
if chunked == True:
chunks = get_chunk_size(vrt)
data_array = xr.open_rasterio(vrt, chunks=chunks)
else:
data_array = xr.open_rasterio(vrt)
data_array = data_array.rename(
{'x': 'longitude',
'y': 'latitude',
'band': 'time'})
# Extract time from metadata
if times is None:
times = get_times(vrt)
data_array['time'] = times
dataset_name = Path(vrt).parents[0].name
if level == 0:
# Standard layer has an _ prefix
dataset_name = f"_{dataset_name}"
# Check that _FillValue is not NaN
if data_array.nodatavals[0] is np.NaN:
# Use _FillValue from VRT firts band metadata
if _fill_value is None:
_fill_value = get_fill_value_band_metadata(vrt)
data_array.attrs['nodatavals'] = \
tuple(np.full((len(data_array.nodatavals))
,_fill_value))
if datasets is None:
# Create new dataset
datasets = data_array.to_dataset(name=dataset_name)
else:
# Merge with existing dataset
tmp_dataset = data_array.to_dataset(name=dataset_name)
datasets = datasets.merge(tmp_dataset)
tmp_dataset = None
subdataset_name = None
# Back to default logging settings
logging.basicConfig(level=logging.INFO)
# If a specific temporal subset is requested, create subset
if self.year is not None:
time_slice = slice(f'{self.year-1}-11-29',
f'{self.year+1}-02-01')
datasets = datasets.sel(time=time_slice)
elif self.start is not None and self.end is not None:
time_slice = slice(f'{self.start}',
f'{self.end}')
datasets = datasets.sel(time=time_slice)
return datasets
def __get_qa_layers(self):
"""
Get the QA layer names associated with a product
:return qa_layer_names: List of QA layer names
"""
# Get QA layers for product
qa_catalogue = catalogue.Catalogue()
# Get product QA definition
qa_defs = qa_catalogue.get_qa_definition(self.product_name,
self.version)
qa_layer_names = []
# Decode QA layers
for qa_def in qa_defs:
for qa_layer in qa_def.QualityLayer.unique():
qa_layer_names.append(qa_layer)
qa_layer_names.sort()
return qa_layer_names
def __decode_qa(self, extension):
"""
Decode QA layers
:param extension: Format used to create the QA time series
"""
# TODO Comments for this method
# List of output QA datasets
self.__qa_datasets = []
qa_layer_names = self.__get_qa_layers()
# Decode QA layers
for i, qa_layer in enumerate(qa_layer_names):
if self.progressBar is not None:
msg = f"Decoding files for {qa_layer}..."
self.progressBar.setFormat(msg)
qa_fnames = self.__get_qa_files(qa_layer, extension)
# Number of files for this QA layer
n_files = len(qa_fnames)
# Decode all files
for qa_fname in qa_fnames:
qualityDecoder(qa_fname, self.product, qa_layer,
bitField='ALL', createDir=True)
if self.progressBar is not None:
self.progressBar.setValue((i/n_files) * 100.0)
for qa_layer in qa_layer_names:
msg = (f"Generating {self.product} QA layer stacks "
f"for {qa_layer}...")
LOG.info(msg)
if self.progressBar is not None:
self.progressBar.setFormat(msg)
# Get all bit fields per QA layer sub directories
if qa_layer[0] == '_' :
tmp_qa_layer = qa_layer[1::]
else:
tmp_qa_layer = qa_layer
qa_dataset_dir = os.path.join(self.source_dir, tmp_qa_layer)
bit_fields_dirs = [x[0] for x in os.walk(qa_dataset_dir)][1:]
for bit_fields_dir in bit_fields_dirs:
self.__qa_datasets.append(bit_fields_dir)
# Create layerstack of bands or subdatasets
for qa_dataset in self.__qa_datasets:
self.__generate_layerstack(qa_dataset, extension='tif')
def __get_qa_files(self, qa_layer, extension):
"""
Get associated files for QA layer
:param qa_layer: QA to get files from
:param extension of files, either tif or vrt
:return qa_fnames: Sorted list with QA files
"""
# Trim qa_layer string, it might contain extra _
if qa_layer[0] == '_' or qa_layer[-1] == '_' :
_qa_layer = qa_layer[1:-1]
else:
_qa_layer = qa_layer
# Get the dataset dir where QA files are
qa_dir = [s for s in self.__datasets if _qa_layer in s]
if len(qa_dir) > 1:
raise Exception((f"QA layer {qa_layer} directory might "
f"be stored in more than one directory. "
f"Verify QA catalogue or QA layer dir."))
# Get files
qa_fnames = f'{self.product_name}*{self.version}*.{extension}'
qa_fnames = glob(os.path.join(qa_dir[0], qa_fnames))
qa_fnames.sort()
if len(qa_fnames) == 0:
# For preprocessed time series...
qa_fnames = f'*{_qa_layer}*.{extension}'
qa_fnames = glob(os.path.join(qa_dir[0], qa_fnames))
qa_fnames.sort()
if len(qa_fnames) == 0:
raise Exception(f"QA dir {qa_dir} is empty.")
return qa_fnames
def __generate_layerstack(self, dataset, extension):
"""
Generate VRT layerstack for all files within a directory
:param dataset: Full path directory of the dataset where to
create a layerstack of all files within it
:param extension: File extension.
"""
sds_name = os.path.basename(dataset)
fname = f"{sds_name}.vrt"
fname = os.path.join(dataset, fname)
output_fnames = os.path.join(dataset, f'*.{extension}')
# TODO Create a text file with input files instead of wildcards
# -input_file_list my_list.txt
conda_path = os.path.dirname(os.environ['CONDA_EXE'])
command = os.path.join(conda_path, 'gdalbuildvrt')
command = (f'{command} -separate -overwrite '
f'{fname} {output_fnames}')
run_command(command)
LOG.info(f"Layer stack for {sds_name} created successfully.")
def __create_output_dir(self, sub_dir, overwrite=True):
"""
Create output dir as a sub dir of source dir
:return subdir: Full path of created sub dir
"""
try:
sub_dir = os.path.join(self.source_dir, sub_dir)
Path(sub_dir).mkdir(parents=True, exist_ok=overwrite)
except FileExistsError as e:
raise(e)
except IOError:
raise(e)
return sub_dir
| [
"gerardo.lopezsaldana@assimila.eu"
] | gerardo.lopezsaldana@assimila.eu |
8cb7748b4389c09f5172df27555d7538b45545be | 5252110b29700692453d59943d243195c2724c84 | /tools/rsdet/train_5p.py | b9890eda1aeb865c07b876b36289d9fe2bb7eec6 | [
"Apache-2.0"
] | permissive | Karlinik/RotationDetection | 7cf3bcc7f5c1ef20ed30c9f24335960e407a6f6d | efd20d56d3964b89fa356e79a052bb53f6ac8ddb | refs/heads/main | 2023-08-29T08:26:29.080134 | 2021-10-19T04:35:32 | 2021-10-19T04:35:32 | 422,503,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,719 | py | # -*- coding:utf-8 -*-
# Author: Xue Yang <yangxue-2019-sjtu@sjtu.edu.cn>
#
# License: Apache-2.0 license
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
sys.path.append("../../")
from tools.train_base import Train
from libs.configs import cfgs
from libs.models.detectors.rsdet import build_whole_network_5p
from libs.utils.coordinate_convert import backward_convert, get_horizen_minAreaRectangle
from dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo
os.environ["CUDA_VISIBLE_DEVICES"] = cfgs.GPU_GROUP
class TrainRSDet(Train):
def get_gtboxes_and_label(self, gtboxes_and_label_h, gtboxes_and_label_r, num_objects):
return gtboxes_and_label_h[:int(num_objects), :].astype(np.float32), \
gtboxes_and_label_r[:int(num_objects), :].astype(np.float32)
def main(self):
with tf.Graph().as_default() as graph, tf.device('/cpu:0'):
num_gpu = len(cfgs.GPU_GROUP.strip().split(','))
global_step = slim.get_or_create_global_step()
lr = self.warmup_lr(cfgs.LR, global_step, cfgs.WARM_SETP, num_gpu)
tf.summary.scalar('lr', lr)
optimizer = tf.train.MomentumOptimizer(lr, momentum=cfgs.MOMENTUM)
rsdet = build_whole_network_5p.DetectionNetworkRSDet(cfgs=self.cfgs,
is_training=True)
with tf.name_scope('get_batch'):
if cfgs.IMAGE_PYRAMID:
shortside_len_list = tf.constant(cfgs.IMG_SHORT_SIDE_LEN)
shortside_len = tf.random_shuffle(shortside_len_list)[0]
else:
shortside_len = cfgs.IMG_SHORT_SIDE_LEN
img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch, img_h_batch, img_w_batch = \
self.reader.next_batch(dataset_name=cfgs.DATASET_NAME,
batch_size=cfgs.BATCH_SIZE * num_gpu,
shortside_len=shortside_len,
is_training=True)
# data processing
inputs_list = []
for i in range(num_gpu):
img = tf.expand_dims(img_batch[i], axis=0)
pretrain_zoo = PretrainModelZoo()
if self.cfgs.NET_NAME in pretrain_zoo.pth_zoo or self.cfgs.NET_NAME in pretrain_zoo.mxnet_zoo:
img = img / tf.constant([cfgs.PIXEL_STD])
gtboxes_and_label_r = tf.py_func(backward_convert,
inp=[gtboxes_and_label_batch[i]],
Tout=tf.float32)
gtboxes_and_label_r = tf.reshape(gtboxes_and_label_r, [-1, 6])
gtboxes_and_label_h = get_horizen_minAreaRectangle(gtboxes_and_label_batch[i])
gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [-1, 5])
num_objects = num_objects_batch[i]
num_objects = tf.cast(tf.reshape(num_objects, [-1, ]), tf.float32)
img_h = img_h_batch[i]
img_w = img_w_batch[i]
inputs_list.append([img, gtboxes_and_label_h, gtboxes_and_label_r, num_objects, img_h, img_w])
tower_grads = []
biases_regularizer = tf.no_regularizer
weights_regularizer = tf.contrib.layers.l2_regularizer(cfgs.WEIGHT_DECAY)
with tf.variable_scope(tf.get_variable_scope()):
for i in range(num_gpu):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i):
with slim.arg_scope(
[slim.model_variable, slim.variable],
device='/device:CPU:0'):
with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane,
slim.conv2d_transpose, slim.separable_conv2d,
slim.fully_connected],
weights_regularizer=weights_regularizer,
biases_regularizer=biases_regularizer,
biases_initializer=tf.constant_initializer(0.0)):
gtboxes_and_label_h, gtboxes_and_label_r = tf.py_func(self.get_gtboxes_and_label,
inp=[inputs_list[i][1],
inputs_list[i][2],
inputs_list[i][3]],
Tout=[tf.float32, tf.float32])
gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [-1, 5])
gtboxes_and_label_r = tf.reshape(gtboxes_and_label_r, [-1, 6])
img = inputs_list[i][0]
img_shape = inputs_list[i][-2:]
img = tf.image.crop_to_bounding_box(image=img,
offset_height=0,
offset_width=0,
target_height=tf.cast(img_shape[0], tf.int32),
target_width=tf.cast(img_shape[1], tf.int32))
outputs = rsdet.build_whole_detection_network(input_img_batch=img,
gtboxes_batch_h=gtboxes_and_label_h,
gtboxes_batch_r=gtboxes_and_label_r,
gpu_id=i)
gtboxes_in_img_h = self.drawer.draw_boxes_with_categories(img_batch=img,
boxes=gtboxes_and_label_h[
:, :-1],
labels=gtboxes_and_label_h[
:, -1],
method=0)
gtboxes_in_img_r = self.drawer.draw_boxes_with_categories(img_batch=img,
boxes=gtboxes_and_label_r[
:, :-1],
labels=gtboxes_and_label_r[
:, -1],
method=1)
tf.summary.image('Compare/gtboxes_h_gpu:%d' % i, gtboxes_in_img_h)
tf.summary.image('Compare/gtboxes_r_gpu:%d' % i, gtboxes_in_img_r)
if cfgs.ADD_BOX_IN_TENSORBOARD:
detections_in_img = self.drawer.draw_boxes_with_categories_and_scores(
img_batch=img,
boxes=outputs[0],
scores=outputs[1],
labels=outputs[2],
method=1)
tf.summary.image('Compare/final_detection_gpu:%d' % i, detections_in_img)
loss_dict = outputs[-1]
total_loss_dict, total_losses = self.loss_dict(loss_dict, num_gpu)
if i == num_gpu - 1:
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
# weight_decay_loss = tf.add_n(slim.losses.get_regularization_losses())
total_losses = total_losses + tf.add_n(regularization_losses)
tf.get_variable_scope().reuse_variables()
grads = optimizer.compute_gradients(total_losses)
if cfgs.GRADIENT_CLIPPING_BY_NORM is not None:
grads = slim.learning.clip_gradient_norms(grads, cfgs.GRADIENT_CLIPPING_BY_NORM)
tower_grads.append(grads)
self.log_printer(rsdet, optimizer, global_step, tower_grads, total_loss_dict, num_gpu, graph)
if __name__ == '__main__':
trainer = TrainRSDet(cfgs)
trainer.main() | [
"yangxue0827@126.com"
] | yangxue0827@126.com |
257da03094424402f2f0aa6083bd458537c6060c | ca552cedf457ab4ad455b089f31b9fc13882c2aa | /app/core/migrations/0001_initial.py | 678152fef9b44a3515f6e92dd97d1c86f63db474 | [
"MIT"
] | permissive | akashjadhav3/django-recipe-app-api | 8837f45dbaacf502a57e90f10dca9b936d7eb893 | 3124c1d6a9c3b8badc02ef9f1a0acb2a779c86dd | refs/heads/master | 2023-03-24T13:10:37.909128 | 2020-08-02T18:07:51 | 2020-08-02T18:07:51 | 283,174,460 | 0 | 0 | MIT | 2021-03-19T23:51:08 | 2020-07-28T10:07:20 | Python | UTF-8 | Python | false | false | 1,709 | py | # Generated by Django 2.1.15 on 2020-08-01 15:00
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
d8ad11dced85b9f98cd2e9948220ded7a12f67e4 | 68a088346090ae4e929c208906b14181da0f92f6 | /第一阶段/2. Python01/day03/exercise/04_order.py | ec663c7ecb7258a59854c76da7d2f1df58f90655 | [] | no_license | LONG990122/PYTHON | d1530e734ae48416b5f989a4d97bd1d66d165b91 | 59a2a2a0b033c8ad0cb33d6126c252e9d574eff7 | refs/heads/master | 2020-07-07T09:38:03.501705 | 2019-09-23T16:28:31 | 2019-09-23T16:28:31 | 203,316,565 | 0 | 0 | null | 2019-10-23T15:02:33 | 2019-08-20T06:47:44 | HTML | UTF-8 | Python | false | false | 270 | py | # 1. 写一个程序,输入一段字符串,如果字符串不为
# 空,则把第一个字符的编码打印出来
s = input("请输入一段字符串: ")
if s != '':
# code = ord('s')
code = ord(s[0])
print('第一个字符的编码是:', code)
| [
"54302090+LONG990122@users.noreply.github.com"
] | 54302090+LONG990122@users.noreply.github.com |
702fc0631f68e8c5ce509203a5dfb60626bb656f | 210ecd63113ce90c5f09bc2b09db3e80ff98117a | /AbletonX1Mk2/APC40/TransportComponent.py | 33d1f976439e8a980d7df0f90542991af7714aed | [] | no_license | ajasver/MidiScripts | 86a765b8568657633305541c46ccc1fd1ea34501 | f727a2e63c95a9c5e980a0738deb0049363ba536 | refs/heads/master | 2021-01-13T02:03:55.078132 | 2015-07-16T18:27:30 | 2015-07-16T18:27:30 | 38,516,112 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,954 | py | #Embedded file name: /Users/versonator/Jenkins/live/Binary/Core_Release_64_static/midi-remote-scripts/APC40/TransportComponent.py
import Live
from _Framework.Control import ButtonControl
from _Framework.TransportComponent import TransportComponent as TransportComponentBase
from _Framework.SubjectSlot import subject_slot
class TransportComponent(TransportComponentBase):
""" TransportComponent that only uses certain buttons if a shift button is pressed """
rec_quantization_button = ButtonControl()
def __init__(self, *a, **k):
super(TransportComponent, self).__init__(*a, **k)
self._last_quant_value = Live.Song.RecordingQuantization.rec_q_eight
self._on_quantization_changed.subject = self.song()
self._update_quantization_state()
self.set_quant_toggle_button = self.rec_quantization_button.set_control_element
@rec_quantization_button.pressed
def rec_quantization_button(self, value):
if not self._last_quant_value != Live.Song.RecordingQuantization.rec_q_no_q:
raise AssertionError
quant_value = self.song().midi_recording_quantization
self._last_quant_value = quant_value != Live.Song.RecordingQuantization.rec_q_no_q and quant_value
self.song().midi_recording_quantization = Live.Song.RecordingQuantization.rec_q_no_q
else:
self.song().midi_recording_quantization = self._last_quant_value
@subject_slot('midi_recording_quantization')
def _on_quantization_changed(self):
if self.is_enabled():
self._update_quantization_state()
def _update_quantization_state(self):
quant_value = self.song().midi_recording_quantization
quant_on = quant_value != Live.Song.RecordingQuantization.rec_q_no_q
if quant_on:
self._last_quant_value = quant_value
self.rec_quantization_button.color = 'DefaultButton.On' if quant_on else 'DefaultButton.Off' | [
"admin@scoopler.com"
] | admin@scoopler.com |
a5d637c2475a685c654bafa50d05cf531743adfe | bb3ae8193289e98e01bea265646f7c77f20558af | /venv/Scripts/pisa-script.py | 30f2451464da560f085e14fd18b91fdb2fbe6889 | [] | no_license | chrisstianandres/almacen_yamaha | 4edbbc827bba7143f466d11c066e522cb8357b25 | 711096cd958e92cb6ec9423730a92120ac614337 | refs/heads/master | 2023-05-13T14:15:30.184461 | 2021-06-07T15:02:46 | 2021-06-07T15:02:46 | 370,217,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | #!D:\PycharmProjects\almacen_yamaha\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'xhtml2pdf==0.2.5','console_scripts','pisa'
__requires__ = 'xhtml2pdf==0.2.5'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('xhtml2pdf==0.2.5', 'console_scripts', 'pisa')()
)
| [
"chrisstianandres@gmail.com"
] | chrisstianandres@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.