code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def remove_send_last_message(self, connection):
if (connection in self._send_last_message):
del self._send_last_message[connection]
LOGGER.debug('Removed send_last_message function for connection %s', connection)
else:
LOGGER.warning('Attempted to remove send_last_message function for connection %s, but no send_last_message function was registered', connection) | Removes a send_last_message function previously registered
with the Dispatcher.
Args:
connection (str): A locally unique identifier provided
by the receiver of messages. | codesearchnet |
def repeat(self, caller: Caller[RequestT, ResponseT], request: RequestT, timeout: float, metrics_collector: Optional[_MetricsCollector]) -> ResponseT:
pass | Implements a repeater strategy for RequestResponseIO when a repeater
is enabled.
Args:
caller: a `~apache_beam.io.requestresponse.Caller` object that
calls the API.
request: input request to repeat.
timeout: time to wait for the request to complete.
metrics_collector: (Optional) a
`~apache_beam.io.requestresponse._MetricsCollector` object
to collect the metrics for RequestResponseIO. | github-repos |
def _GetTypeFromScope(self, package, type_name, scope):
if (type_name not in scope):
components = _PrefixWithDot(package).split('.')
while components:
possible_match = '.'.join((components + [type_name]))
if (possible_match in scope):
type_name = possible_match
break
else:
components.pop((- 1))
return scope[type_name] | Finds a given type name in the current scope.
Args:
package: The package the proto should be located in.
type_name: The name of the type to be found in the scope.
scope: Dict mapping short and full symbols to message and enum types.
Returns:
The descriptor for the requested type. | codesearchnet |
def author_name_contains_fullnames(author_name):
def _is_initial(name_part):
return ((len(name_part) == 1) or (u'.' in name_part))
parsed_name = ParsedName(author_name)
if (len(parsed_name) == 1):
return False
elif any([_is_initial(name_part) for name_part in parsed_name]):
return False
return True | Recognizes whether the name contains full name parts and not initials or only lastname.
Returns:
bool: True if name has only full name parts, e.g. 'Ellis John', False otherwise. So for example, False is
returned for 'Ellis, J.' or 'Ellis'. | codesearchnet |
def chip_as_adjacency_list(device: 'cirq.google.XmonDevice') -> Dict[(GridQubit, List[GridQubit])]:
c_set = set(device.qubits)
c_adj = {}
for n in device.qubits:
c_adj[n] = []
for m in [above(n), left_of(n), below(n), right_of(n)]:
if (m in c_set):
c_adj[n].append(m)
return c_adj | Gives adjacency list representation of a chip.
The adjacency list is constructed in order of above, left_of, below and
right_of consecutively.
Args:
device: Chip to be converted.
Returns:
Map from nodes to list of qubits which represent all the neighbours of
given qubit. | codesearchnet |
def param_static_shapes(cls, sample_shape):
if isinstance(sample_shape, tensor_shape.TensorShape):
if not sample_shape.is_fully_defined():
raise ValueError('TensorShape sample_shape must be fully defined')
sample_shape = sample_shape.as_list()
params = cls.param_shapes(sample_shape)
static_params = {}
for name, shape in params.items():
static_shape = tensor_util.constant_value(shape)
if static_shape is None:
raise ValueError('sample_shape must be a fully-defined TensorShape or list/tuple')
static_params[name] = tensor_shape.TensorShape(static_shape)
return static_params | param_shapes with static (i.e. `TensorShape`) shapes.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`. Assumes that the sample's
shape is known statically.
Subclasses should override class method `_param_shapes` to return
constant-valued tensors when constant values are fed.
Args:
sample_shape: `TensorShape` or python list/tuple. Desired shape of a call
to `sample()`.
Returns:
`dict` of parameter name to `TensorShape`.
Raises:
ValueError: if `sample_shape` is a `TensorShape` and is not fully defined. | github-repos |
def StartsWithIgnoreCase(self, value):
self._awql = self._CreateSingleValueCondition(value, 'STARTS_WITH_IGNORE_CASE')
return self._query_builder | Sets the type of the WHERE clause as "starts with ignore case".
Args:
value: The value to be used in the WHERE condition.
Returns:
The query builder that this WHERE builder links to. | codesearchnet |
def export_disks(self, standalone, dst_dir, compress, collect_only=False, with_threads=True, *args, **kwargs):
vm_export_mgr = export.VMExportManager(*args, disks=self.vm.disks, dst=dst_dir, compress=compress, with_threads=with_threads, standalone=standalone, **kwargs)
if collect_only:
return {self.vm.name(): vm_export_mgr.collect_paths()}
else:
return {self.vm.name(): vm_export_mgr.export()} | Export all the disks of self.
Args:
standalone (bool): if true, merge the base images and the layered
image into a new file (Supported only in qcow2 format)
dst_dir (str): dir to place the exported disks
compress(bool): if true, compress each disk.
collect_only(bool): If true, return only a dict which maps between
the name of the vm to the paths of the disks that will be
exported (don't export anything).
with_threads(bool): If True, export disks in parallel
Returns:
(dict): which maps between the name of the vm to the paths of
the disks that will be exported | codesearchnet |
def assign_selective_dynamics(self, slab):
sd_list = []
sd_list = [[False, False, False] if site.properties['surface_properties'] == 'subsurface'
else [True, True, True] for site in slab.sites]
new_sp = slab.site_properties
new_sp['selective_dynamics'] = sd_list
return slab.copy(site_properties=new_sp) | Helper function to assign selective dynamics site_properties
based on surface, subsurface site properties
Args:
slab (Slab): slab for which to assign selective dynamics | juraj-google-style |
def smash(self):
self._initialize_smash()
try:
stack_name = self._config.get('environment', {}).get('stack_name', None)
response = self._cloudFormation.describe_stacks(StackName=stack_name)
logging.debug('smash pre-flight returned: {}'.format(
json.dumps(response,
indent=4,
default=json_util.default
)))
except ClientError as wtf:
logging.warning('your stack is in another castle [0].')
return False
except Exception as wtf:
logging.error('failed to find intial status of smash candidate: {}'.format(wtf))
return False
response = self._cloudFormation.delete_stack(StackName=stack_name)
logging.info('delete started for stack: {}'.format(stack_name))
logging.debug('delete_stack returned: {}'.format(json.dumps(response, indent=4)))
return self.poll_stack() | Smash the given stack
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems. | juraj-google-style |
def connections(self):
if (not self.__connections):
self.__connections = Connections(self.__connection)
return self.__connections | Gets the Connections API client.
Returns:
Connections: | codesearchnet |
def data_it(db_data, user_type):
data_type = {
'array': (list),
'dict': (dict),
'entity': (dict),
'list': (list),
'str': (string_types),
'string': (string_types),
}
if user_type is None:
if db_data is None:
return True
elif user_type.lower() in ['null', 'none']:
if db_data is None:
return True
elif user_type.lower() in 'binary':
try:
base64.b64decode(db_data)
return True
except Exception:
return False
elif data_type.get(user_type.lower()) is not None:
if isinstance(db_data, data_type.get(user_type.lower())):
return True
return False | Validate data is type.
Args:
db_data (dict|str|list): The data store in Redis.
user_data (str): The user provided data.
Returns:
bool: True if the data passed validation. | juraj-google-style |
def get_actions(self, parent_environ=None):
interp = Python(target_environ={}, passive=True)
executor = self._create_executor(interp, parent_environ)
self._execute(executor)
return executor.actions | Get the list of rex.Action objects resulting from interpreting this
context. This is provided mainly for testing purposes.
Args:
parent_environ Environment to interpret the context within,
defaults to os.environ if None.
Returns:
A list of rex.Action subclass instances. | codesearchnet |
def ParseChat(self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
participants = self._GetRowValue(query_hash, row, 'participants')
author = self._GetRowValue(query_hash, row, 'author')
dialog_partner = self._GetRowValue(query_hash, row, 'dialog_partner')
from_displayname = self._GetRowValue(query_hash, row, 'from_displayname')
accounts = []
participants = participants.split(' ')
for participant in participants:
if participant != author:
accounts.append(participant)
to_account = ', '.join(accounts)
if not to_account:
to_account = dialog_partner or 'Unknown User'
from_account = '{0:s} <{1:s}>'.format(from_displayname, author)
event_data = SkypeChatEventData()
event_data.from_account = from_account
event_data.query = query
event_data.text = self._GetRowValue(query_hash, row, 'body_xml')
event_data.title = self._GetRowValue(query_hash, row, 'title')
event_data.to_account = to_account
timestamp = self._GetRowValue(query_hash, row, 'timestamp')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Chat from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses a chat message.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query. | juraj-google-style |
def create_customer(self, *, full_name, email):
payload = {
"fullName": full_name,
"email": email
}
return self.client._post(self.url + 'customers', json=payload, headers=self.get_headers()) | Creation of a customer in the system.
Args:
full_name: Customer's complete name.
Alphanumeric. Max: 255.
email: Customer's email address.
Alphanumeric. Max: 255.
Returns: | juraj-google-style |
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4,
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly') | Check that make_pair's template arguments are deduced.
G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. | juraj-google-style |
class TFDebertaV2StableDropout(keras.layers.Layer):
def __init__(self, drop_prob, **kwargs):
super().__init__(**kwargs)
self.drop_prob = drop_prob
@tf.custom_gradient
def xdropout(self, inputs):
mask = tf.cast(1 - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)), tf.bool)
scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=self.compute_dtype)
if self.drop_prob > 0:
inputs = tf.where(mask, tf.cast(0.0, dtype=self.compute_dtype), inputs) * scale
def grad(upstream):
if self.drop_prob > 0:
return tf.where(mask, tf.cast(0.0, dtype=self.compute_dtype), upstream) * scale
else:
return upstream
return (inputs, grad)
def call(self, inputs: tf.Tensor, training: tf.Tensor=False):
if training:
return self.xdropout(inputs)
return inputs | Optimized dropout module for stabilizing the training
Args:
drop_prob (float): the dropout probabilities | github-repos |
async def trio_open_connection(host, port, *, ssl=False, **kwargs):
import trio
if (not ssl):
sock = (await trio.open_tcp_stream(host, port))
else:
if isinstance(ssl, bool):
ssl_context = None
else:
ssl_context = ssl
sock = (await trio.open_ssl_over_tcp_stream(host, port, ssl_context=ssl_context))
(await sock.do_handshake())
sock.close = sock.aclose
return sock | Allows connections to be made that may or may not require ssl.
Somewhat surprisingly trio doesn't have an abstraction for this like
curio even though it's fairly trivial to write. Down the line hopefully.
Args:
host (str): Network location, either by domain or IP.
port (int): The requested port.
ssl (bool or SSLContext): If False or None, SSL is not required. If
True, the context returned by trio.ssl.create_default_context will
be used. Otherwise, this may be an SSLContext object.
kwargs: A catch all to soak up curio's additional kwargs and
ignore them. | codesearchnet |
async def download_file(context, url, abs_filename, session=None, chunk_size=128):
session = (session or context.session)
loggable_url = get_loggable_url(url)
log.info('Downloading %s', loggable_url)
parent_dir = os.path.dirname(abs_filename)
async with session.get(url) as resp:
if (resp.status == 404):
(await _log_download_error(resp, '404 downloading %(url)s: %(status)s; body=%(body)s'))
raise Download404('{} status {}!'.format(loggable_url, resp.status))
elif (resp.status != 200):
(await _log_download_error(resp, 'Failed to download %(url)s: %(status)s; body=%(body)s'))
raise DownloadError('{} status {} is not 200!'.format(loggable_url, resp.status))
makedirs(parent_dir)
with open(abs_filename, 'wb') as fd:
while True:
chunk = (await resp.content.read(chunk_size))
if (not chunk):
break
fd.write(chunk)
log.info('Done') | Download a file, async.
Args:
context (scriptworker.context.Context): the scriptworker context.
url (str): the url to download
abs_filename (str): the path to download to
session (aiohttp.ClientSession, optional): the session to use. If
None, use context.session. Defaults to None.
chunk_size (int, optional): the chunk size to read from the response
at a time. Default is 128. | codesearchnet |
def __init__(self, *args, **kwargs):
super(ContractTransaction, self).__init__(*args, **kwargs)
self.Type = TransactionType.ContractTransaction | Create an instance.
Args:
*args:
**kwargs: | juraj-google-style |
def raster_to_gtiff(tif, geotif, change_nodata=False, change_gdal_type=False):
rst_file = RasterUtilClass.read_raster(tif)
nodata = rst_file.noDataValue
if change_nodata:
if (not MathClass.floatequal(rst_file.noDataValue, DEFAULT_NODATA)):
nodata = DEFAULT_NODATA
rst_file.data[(rst_file.data == rst_file.noDataValue)] = DEFAULT_NODATA
gdal_type = rst_file.dataType
if change_gdal_type:
gdal_type = GDT_Float32
RasterUtilClass.write_gtiff_file(geotif, rst_file.nRows, rst_file.nCols, rst_file.data, rst_file.geotrans, rst_file.srs, nodata, gdal_type) | Converting Raster format to GeoTIFF.
Args:
tif: source raster file path.
geotif: output raster file path.
change_nodata: change NoDataValue to -9999 or not.
gdal_type (:obj:`pygeoc.raster.GDALDataType`): GDT_Float32 as default.
change_gdal_type: If True, output the Float32 data type. | codesearchnet |
def run_inference(self, batch: Sequence[pandas.DataFrame], model: Union[xgboost.Booster, xgboost.XGBModel], inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:
return self._inference_fn(batch, model, inference_args) | Runs inferences on a batch of pandas dataframes.
Args:
batch: A sequence of examples as pandas dataframes. Each
row in a dataframe is a single example. The dimensions
must match the dimensions of the data used to train
the model.
model: XGBoost booster or XBGModel (sklearn interface). Must
implement predict(X). Where the parameter X is a pandas dataframe.
inference_args: Any additional arguments for an inference.
Returns:
An Iterable of type PredictionResult. | github-repos |
def __init__(self, expected_methods):
if not expected_methods:
raise ValueError("There must be at least one expected method")
Error.__init__(self)
self._expected_methods = expected_methods | Init exception.
Args:
# expected_methods: A sequence of MockMethod objects that should have been
# called.
expected_methods: [MockMethod]
Raises:
ValueError: if expected_methods contains no methods. | juraj-google-style |
def verify(self, token, **kwargs):
path = '/runners/verify'
post_data = {'token': token}
self.gitlab.http_post(path, post_data=post_data, **kwargs) | Validates authentication credentials for a registered Runner.
Args:
token (str): The runner's authentication token
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabVerifyError: If the server failed to verify the token | codesearchnet |
def cmd_path(self, cmd):
for binscript in self.bin.files:
if binscript.path.endswith('/{0}'.format(cmd)):
return binscript.path
raise ValueError('The command {0} was not found.'.format(cmd)) | Get the path of a command in the virtual if it exists.
Args:
cmd (str): The command to look for.
Returns:
str: The full path to the command.
Raises:
ValueError: If the command is not present. | codesearchnet |
def archive(self, output_path):
if (self.path is None):
raise ArgumentError('Cannot archive a recipe yet without a reference to its original yaml file in self.path')
outfile = zipfile.ZipFile(output_path, 'w', zipfile.ZIP_DEFLATED)
outfile.write(self.path, arcname='recipe_script.yaml')
written_files = set()
for (_factory, args, _resources, files) in self.steps:
for arg_name in files:
file_path = args[arg_name]
if (file_path in written_files):
continue
if (os.path.basename(file_path) != file_path):
raise ArgumentError('Cannot archive a recipe yet that references file not in the same directory as the recipe')
full_path = os.path.join(os.path.dirname(self.path), file_path)
outfile.write(full_path, arcname=file_path)
written_files.add(file_path) | Archive this recipe and all associated files into a .ship archive.
Args:
output_path (str): The path where the .ship file should be saved. | codesearchnet |
def __rmod__(self, other):
other = as_dimension(other)
return other % self | Returns `other` modulo `self`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is `other` modulo `self`. | github-repos |
def set_float(self, option, value):
if (not isinstance(value, float)):
raise TypeError('Value must be a float')
self.options[option] = value | Set a float option.
Args:
option (str): name of option.
value (float): value of the option.
Raises:
TypeError: Value must be a float. | codesearchnet |
def get_font(self, weight='medium', slant='upright', width='normal'):
def find_closest_style(style, styles, alternatives):
try:
return style, styles[style]
except KeyError:
for option in alternatives[style]:
try:
return option, styles[option]
except KeyError:
continue
def find_closest_weight(weight, weights):
index = FontWeight.values.index(weight)
min_distance = len(FontWeight.values)
closest = None
for i, option in enumerate(FontWeight.values):
if option in weights and abs(index - i) < min_distance:
min_distance = abs(index - i)
closest = option
return closest, weights[closest]
available_width, slants = find_closest_style(width, self,
FontWidth.alternatives)
available_slant, weights = find_closest_style(slant, slants,
FontSlant.alternatives)
available_weight, font = find_closest_weight(weight, weights)
if (available_width != width or available_slant != slant or
available_weight != weight):
warn('{} does not include a {} {} {} font. Falling back to {} {} '
'{}'.format(self.name, width, weight, slant, available_width,
available_weight, available_slant))
return font | Return the font matching or closest to the given style
If a font with the given weight, slant and width is available, return
it. Otherwise, return the font that is closest in style.
Args:
weight (FontWeight): weight of the font
slant (FontSlant): slant of the font
width (FontWidth): width of the font
Returns:
Font: the requested font | juraj-google-style |
def rename(self, new_folder_name):
headers = self.headers
endpoint = ('https:
payload = (('{ "DisplayName": "' + new_folder_name) + '"}')
r = requests.patch(endpoint, headers=headers, data=payload)
if check_response(r):
return_folder = r.json()
return self._json_to_folder(self.account, return_folder) | Renames the Folder to the provided name.
Args:
new_folder_name: A string of the replacement name.
Raises:
AuthError: Raised if Outlook returns a 401, generally caused by an invalid or expired access token.
Returns:
A new Folder representing the folder with the new name on Outlook. | codesearchnet |
def step1_get_device_and_user_codes(self, http=None):
if (self.device_uri is None):
raise ValueError('The value of device_uri must not be None.')
body = urllib.parse.urlencode({'client_id': self.client_id, 'scope': self.scope})
headers = {'content-type': 'application/x-www-form-urlencoded'}
if (self.user_agent is not None):
headers['user-agent'] = self.user_agent
if (http is None):
http = transport.get_http_object()
(resp, content) = transport.request(http, self.device_uri, method='POST', body=body, headers=headers)
content = _helpers._from_bytes(content)
if (resp.status == http_client.OK):
try:
flow_info = json.loads(content)
except ValueError as exc:
raise OAuth2DeviceCodeError('Could not parse server response as JSON: "{0}", error: "{1}"'.format(content, exc))
return DeviceFlowInfo.FromResponse(flow_info)
else:
error_msg = 'Invalid response {0}.'.format(resp.status)
try:
error_dict = json.loads(content)
if ('error' in error_dict):
error_msg += ' Error: {0}'.format(error_dict['error'])
except ValueError:
pass
raise OAuth2DeviceCodeError(error_msg) | Returns a user code and the verification URL where to enter it
Returns:
A user code as a string for the user to authorize the application
An URL as a string where the user has to enter the code | codesearchnet |
def get_country_name(self, callsign, timestamp=timestamp_now):
return self.get_all(callsign, timestamp)[const.COUNTRY] | Returns the country name where the callsign is located
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
str: name of the Country
Raises:
KeyError: No Country found for callsign
Note:
Don't rely on the country name when working with several instances of
py:class:`Callinfo`. Clublog and Country-files.org use slightly different names
for countries. Example:
- Country-files.com: "Fed. Rep. of Germany"
- Clublog: "FEDERAL REPUBLIC OF GERMANY" | codesearchnet |
def process_event(self, event_name: str, data: dict) -> None:
if event_name == "after_validation":
if data['impatience'] > self._learning_rate_last_impatience:
self._learning_rate_cur_impatience += 1
else:
self._learning_rate_cur_impatience = 0
self._learning_rate_last_impatience = data['impatience']
if (self._learning_rate_drop_patience is not None) and\
(self._learning_rate_cur_impatience >=
self._learning_rate_drop_patience):
self._learning_rate_cur_impatience = 0
self._learning_rate_cur_div *= self._learning_rate_drop_div
self._lr /= self._learning_rate_drop_div
self._update_graph_variables(learning_rate=self._lr)
log.info(f"New learning rate dividor = {self._learning_rate_cur_div}")
if event_name == 'after_batch':
if (self._lr is not None) and self._lr_update_on_batch:
self._lr = self._lr_schedule.next_val() / self._learning_rate_cur_div
self._update_graph_variables(learning_rate=self._lr)
if (self._mom is not None) and self._mom_update_on_batch:
self._mom = min(1., max(0., self._mom_schedule.next_val()))
self._update_graph_variables(momentum=self._mom)
if event_name == 'after_epoch':
if (self._lr is not None) and not self._lr_update_on_batch:
self._lr = self._lr_schedule.next_val() / self._learning_rate_cur_div
self._update_graph_variables(learning_rate=self._lr)
if (self._mom is not None) and not self._mom_update_on_batch:
self._mom = min(1., max(0., self._mom_schedule.next_val()))
self._update_graph_variables(momentum=self._mom)
if event_name == 'after_train_log':
if (self._lr is not None) and ('learning_rate' not in data):
data['learning_rate'] = self._lr
if (self._mom is not None) and ('momentum' not in data):
data['momentum'] = self._mom | Update learning rate and momentum variables after event (given by `event_name`)
Args:
event_name: name of event after which the method was called.
Set of values: `"after_validation"`, `"after_batch"`, `"after_epoch"`, `"after_train_log"`
data: dictionary with parameters values
Returns:
None | juraj-google-style |
def dark(app):
_apply_base_theme(app)
darkPalette = QPalette()
darkPalette.setColor(QPalette.WindowText, QColor(180, 180, 180))
darkPalette.setColor(QPalette.Button, QColor(53, 53, 53))
darkPalette.setColor(QPalette.Light, QColor(180, 180, 180))
darkPalette.setColor(QPalette.Midlight, QColor(90, 90, 90))
darkPalette.setColor(QPalette.Dark, QColor(35, 35, 35))
darkPalette.setColor(QPalette.Text, QColor(180, 180, 180))
darkPalette.setColor(QPalette.BrightText, QColor(180, 180, 180))
darkPalette.setColor(QPalette.ButtonText, QColor(180, 180, 180))
darkPalette.setColor(QPalette.Base, QColor(42, 42, 42))
darkPalette.setColor(QPalette.Window, QColor(53, 53, 53))
darkPalette.setColor(QPalette.Shadow, QColor(20, 20, 20))
darkPalette.setColor(QPalette.Highlight, QColor(42, 130, 218))
darkPalette.setColor(QPalette.HighlightedText, QColor(180, 180, 180))
darkPalette.setColor(QPalette.Link, QColor(56, 252, 196))
darkPalette.setColor(QPalette.AlternateBase, QColor(66, 66, 66))
darkPalette.setColor(QPalette.ToolTipBase, QColor(53, 53, 53))
darkPalette.setColor(QPalette.ToolTipText, QColor(180, 180, 180))
darkPalette.setColor(QPalette.Disabled, QPalette.WindowText,
QColor(127, 127, 127))
darkPalette.setColor(QPalette.Disabled, QPalette.Text,
QColor(127, 127, 127))
darkPalette.setColor(QPalette.Disabled, QPalette.ButtonText,
QColor(127, 127, 127))
darkPalette.setColor(QPalette.Disabled, QPalette.Highlight,
QColor(80, 80, 80))
darkPalette.setColor(QPalette.Disabled, QPalette.HighlightedText,
QColor(127, 127, 127))
app.setPalette(darkPalette) | Apply Dark Theme to the Qt application instance.
Args:
app (QApplication): QApplication instance. | juraj-google-style |
def from_config(cls, config):
config = config.copy()
function_keys = ['kernel_posterior_fn', 'kernel_posterior_tensor_fn', 'kernel_prior_fn', 'kernel_divergence_fn', 'bias_posterior_fn', 'bias_posterior_tensor_fn', 'bias_prior_fn', 'bias_divergence_fn']
for function_key in function_keys:
serial = config[function_key]
function_type = config.pop((function_key + '_type'))
if (serial is not None):
config[function_key] = tfp_layers_util.deserialize_function(serial, function_type=function_type)
return cls(**config) | Creates a layer from its config.
This method is the reverse of `get_config`, capable of instantiating the
same layer from the config dictionary.
Args:
config: A Python dictionary, typically the output of `get_config`.
Returns:
layer: A layer instance. | codesearchnet |
def can_api_key_access_build(param_name):
build_id = (request.args.get(param_name, type=int) or request.form.get(param_name, type=int) or request.json[param_name])
utils.jsonify_assert(build_id, 'build_id required')
if app.config.get('IGNORE_AUTH'):
api_key = models.ApiKey(id='anonymous_superuser', secret='', superuser=True)
build = models.Build.query.get(build_id)
utils.jsonify_assert((build is not None), 'build must exist', 404)
else:
ops = _get_api_key_ops()
(api_key, build) = ops.can_access_build(build_id)
return (api_key, build) | Determines if the current API key can access the build in the request.
Args:
param_name: Parameter name to use for getting the build ID from the
request. Will fetch from GET or POST requests.
Returns:
(api_key, build) The API Key and the Build it has access to. | codesearchnet |
def agent_heartbeat(self, agent_id, metrics, run_states):
mutation = gql()
try:
response = self.gql(mutation, variable_values={
'id': agent_id,
'metrics': json.dumps(metrics),
'runState': json.dumps(run_states)})
except Exception as e:
message = ast.literal_eval(e.args[0])["message"]
logger.error('Error communicating with W&B: %s', message)
return []
else:
return json.loads(response['agentHeartbeat']['commands']) | Notify server about agent state, receive commands.
Args:
agent_id (str): agent_id
metrics (dict): system metrics
run_states (dict): run_id: state mapping
Returns:
List of commands to execute. | juraj-google-style |
def _CreateFeedItems(client, feed_details, label_name):
feed_item_service = client.GetService('FeedItemService', version='v201809')
urls = ('http:
operations = [{'operand': {'feedId': feed_details.feed_id, 'attributeValues': [{'feedAttributeId': feed_details.url_attribute_id, 'stringValues': [url]}, {'feedAttributeId': feed_details.label_attribute_id, 'stringValues': [label_name]}]}, 'operator': 'ADD'} for url in urls]
feed_item_service.mutate(operations) | Creates the page URLs in the DSA page feed.
Args:
client: an AdWordsClient instance.
feed_details: a _DSAFeedDetails instance.
label_name: a str containing the page feed URL label. | codesearchnet |
def _process_new(self, feed_item):
return {'name': feed_item.get(FieldMap.CAMPAIGN_LANDING_PAGE_NAME, None), 'url': feed_item.get(FieldMap.CAMPAIGN_LANDING_PAGE_URL, None), 'advertiserId': feed_item.get(FieldMap.ADVERTISER_ID, None)} | Creates a new landing page DCM object from a feed item representing a landing page from the Bulkdozer feed.
This function simply creates the object to be inserted later by the BaseDAO
object.
Args:
feed_item: Feed item representing the landing page from the Bulkdozer
feed.
Returns:
An landing page object ready to be inserted in DCM through the API. | github-repos |
def __init__(self, worker):
super(ClientStatsCollector, self).__init__()
self.daemon = True
self._worker = worker
self._process = psutil.Process()
self._cpu_samples = []
self._io_samples = []
self._last_send_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0)
self._should_send = False | Initializes the stat collector.
Args:
worker: A `GRRClientWorker` instance that spawned this stat collector. | juraj-google-style |
def free(object_ids, local_only=False, delete_creating_tasks=False):
worker = ray.worker.get_global_worker()
if (ray.worker._mode() == ray.worker.LOCAL_MODE):
return
if isinstance(object_ids, ray.ObjectID):
object_ids = [object_ids]
if (not isinstance(object_ids, list)):
raise TypeError('free() expects a list of ObjectID, got {}'.format(type(object_ids)))
for object_id in object_ids:
if (not isinstance(object_id, ray.ObjectID)):
raise TypeError('Attempting to call `free` on the value {}, which is not an ray.ObjectID.'.format(object_id))
worker.check_connected()
with profiling.profile('ray.free'):
if (len(object_ids) == 0):
return
worker.raylet_client.free_objects(object_ids, local_only, delete_creating_tasks) | Free a list of IDs from object stores.
This function is a low-level API which should be used in restricted
scenarios.
If local_only is false, the request will be send to all object stores.
This method will not return any value to indicate whether the deletion is
successful or not. This function is an instruction to object store. If
the some of the objects are in use, object stores will delete them later
when the ref count is down to 0.
Args:
object_ids (List[ObjectID]): List of object IDs to delete.
local_only (bool): Whether only deleting the list of objects in local
object store or all object stores.
delete_creating_tasks (bool): Whether also delete the object creating
tasks. | codesearchnet |
def forward(self, x):
if self.training and self.drop_prob > 0:
return XDropout.apply(x, self.get_context())
return x | Call the module
Args:
x (`torch.tensor`): The input tensor to apply dropout | github-repos |
def pull(handle, enumerate=False):
assert isinstance(handle, Handle), handle
return Pull(handle, enumerate) | Pulls next message for handle.
Args:
handle: A :class:`.stream.Handle` or GroupHandle.
enumerate (bool): boolean to indicate whether a tuple ``(idx, msg)``
should be returned, not unlike Python's enumerate().
Returns:
A :class:`Pull` task to be yielded. Marv will send the
corresponding message as soon as it is available. For groups
this message will be a handle to a member of the
group. Members of groups are either streams or groups.
Examples:
Pulling (enumerated) message from stream::
msg = yield marv.pull(stream)
idx, msg = yield marv.pull(stream, enumerate=True)
Pulling stream from group and message from stream::
stream = yield marv.pull(group) # a group of streams
msg = yield marv.pull(stream) | codesearchnet |
def __parameter_enum(self, final_subfield):
if isinstance(final_subfield, messages.EnumField):
enum_descriptor = {}
for enum_value in final_subfield.type.to_dict().keys():
enum_descriptor[enum_value] = {'backendValue': enum_value}
return enum_descriptor | Returns enum descriptor of final subfield if it is an enum.
An enum descriptor is a dictionary with keys as the names from the enum and
each value is a dictionary with a single key "backendValue" and value equal
to the same enum name used to stored it in the descriptor.
The key "description" can also be used next to "backendValue", but protorpc
Enum classes have no way of supporting a description for each value.
Args:
final_subfield: A simple field from the end of a subfield list.
Returns:
The enum descriptor for the field, if it's an enum descriptor, else
returns None. | codesearchnet |
def create_reset_score(cls, student_item):
return cls.objects.create(
student_item=student_item,
submission=None,
points_earned=0,
points_possible=0,
reset=True,
) | Create a "reset" score (a score with a null submission).
Only scores created after the most recent "reset" score
should be used to determine a student's effective score.
Args:
student_item (StudentItem): The student item model.
Returns:
Score: The newly created "reset" score.
Raises:
DatabaseError: An error occurred while creating the score | juraj-google-style |
def __init__(self,
application,
project_id,
control_client,
next_operation_id=_next_operation_uuid,
timer=datetime.utcnow):
self._application = application
self._project_id = project_id
self._control_client = control_client
self._next_operation_id = next_operation_id
self._timer = timer | Initializes a new Middleware instance.
Args:
application: the wrapped wsgi application
project_id: the project_id thats providing service control support
control_client: the service control client instance
next_operation_id (func): produces the next operation
timer (func[[datetime.datetime]]): a func that obtains the current time | juraj-google-style |
def early_stop_by_value(step_values: List[Tuple[int, float]], metric: Union[str, Callable[[pg.tuning.Measurement], float]]='reward', maximize: bool=True):
assert isinstance(step_values, list), step_values
for v in step_values:
if not isinstance(v, tuple) or len(v) != 2 or (not isinstance(v[0], int)) or (not isinstance(v[1], numbers.Number)):
raise ValueError(f'Invalid definition in `step_values`: {v}. Expect a tuple of 2 elements: (step: int, threshold: float).')
def _cmp(x, y) -> bool:
return x < y if maximize else x > y
def _value(m: pg.tuning.Measurement) -> float:
if isinstance(metric, str):
return m.reward if metric == 'reward' else m.metrics[metric]
assert callable(metric), metric
return metric(m)
def _make_predicate(threshold: float):
def _predicate(m: pg.tuning.Measurement, unused_history):
v = _value(m)
ret = _cmp(v, threshold)
return ret
return _predicate
return StepWise([(step, _make_predicate(threshold)) for step, threshold in step_values]) | Step-wise early stopping policy based on the value of reward/metric.
Example::
policy = early_stop_by_value([
# Stop at step 1 if trial reward is less than 0.2.
(1, 0.2),
# Stop at step 2 if trial reward is less than 0.8.
(2, 0.8),
])()
Args:
step_values: A list of tuple (gating step, value threshold).
gating step - At which step this rule will be triggered.
value threshold - A float number indicating the threshold value for
early stopping.
metric: Based on which metric the value should be compared against.
Use str for metric name or a callable object that takes a measurement
object at a given step as input and returns a float value.
maximize: If True, reward or metric value below the threshold will be
stopped, otherwise trials with values above the threshold will be stopped.
Returns:
A `StepWise` early stopping policy. | github-repos |
def get_sequence_properties(self, clean_seq=False, representative_only=True):
if representative_only:
if not self.representative_sequence:
log.warning('{}: no representative sequence set, cannot get sequence properties'.format(self.id))
return
if not self.representative_sequence.seq:
log.warning('{}: representative sequence {} set, but no sequence stored. '
'Cannot get sequence properties.'.format(self.id, self.representative_sequence.id))
return
self.representative_sequence.get_biopython_pepstats(clean_seq=clean_seq)
self.representative_sequence.get_emboss_pepstats()
if not representative_only:
for s in self.sequences:
if not s.seq:
log.warning('{}: no sequence stored. '
'Cannot get sequence properties.'.format(s.id))
continue
else:
s.get_biopython_pepstats(clean_seq=clean_seq)
s.get_emboss_pepstats() | Run Biopython ProteinAnalysis and EMBOSS pepstats to summarize basic statistics of the protein sequences.
Results are stored in the protein's respective SeqProp objects at ``.annotations``
Args:
representative_only (bool): If analysis should only be run on the representative sequence | juraj-google-style |
def addFixedEffect(self, F=None, A=None):
if (A == None):
A = SP.eye(self.P)
if (F == None):
F = SP.ones((self.N, 1))
assert (A.shape[1] == self.P), 'Incompatible shape'
assert (F.shape[0] == self.N), 'Incompatible shape'
if (F.shape[1] > 1):
for m in range(F.shape[1]):
self.vd.addFixedEffTerm(A, F[(:, m:(m + 1))])
else:
self.vd.addFixedEffTerm(A, F)
self.gp = None
self.init = False
self.fast = False
self.optimum = None
self.cache['Sigma'] = None
self.cache['Hessian'] = None
self.cache['Lparams'] = None
self.cache['paramsST'] = None | add fixed effect to the model
Args:
F: fixed effect matrix [N,1]
A: design matrix [K,P] (e.g. SP.ones((1,P)) common effect; SP.eye(P) any effect) | codesearchnet |
def encrypt(self, message, public_key):
max_str_len = rsa.common.byte_size(public_key.n) - 11
if len(message) > max_str_len:
message = textwrap.wrap(message, width=max_str_len)
else:
message = [message]
enc_msg = []
for line in message:
enc_line = rsa.encrypt(line, public_key)
enc_line_converted = binascii.b2a_base64(enc_line)
enc_msg.append(enc_line_converted)
enc_msg = json.dumps(enc_msg)
return enc_msg | Encrypts a string using a given rsa.PublicKey object. If the message
is larger than the key, it will split it up into a list and encrypt
each line in the list.
Args:
message (string): The string to encrypt.
public_key (rsa.PublicKey): The key object used to encrypt the
message. Only the paired private key can decrypt it.
Returns:
A json string of the list of encrypted lines of the message. | juraj-google-style |
def readlink(path):
if (sys.getwindowsversion().major < 6):
raise SaltInvocationError('Symlinks are only supported on Windows Vista or later.')
try:
return salt.utils.path.readlink(path)
except OSError as exc:
if (exc.errno == errno.EINVAL):
raise CommandExecutionError('{0} is not a symbolic link'.format(path))
raise CommandExecutionError(exc.__str__())
except Exception as exc:
raise CommandExecutionError(exc) | Return the path that a symlink points to
This is only supported on Windows Vista or later.
Inline with Unix behavior, this function will raise an error if the path is
not a symlink, however, the error raised will be a SaltInvocationError, not
an OSError.
Args:
path (str): The path to the symlink
Returns:
str: The path that the symlink points to
CLI Example:
.. code-block:: bash
salt '*' file.readlink /path/to/link | codesearchnet |
def gpio_set(self, pins, states):
if len(pins) != len(states):
raise ValueError('Length mismatch between pins and states.')
size = len(pins)
indices = (ctypes.c_uint8 * size)(*pins)
states = (ctypes.c_uint8 * size)(*states)
result_states = (ctypes.c_uint8 * size)()
result = self._dll.JLINK_EMU_GPIO_SetState(ctypes.byref(indices),
ctypes.byref(states),
ctypes.byref(result_states),
size)
if result < 0:
raise errors.JLinkException(result)
return list(result_states) | Sets the state for one or more user-controllable GPIOs.
For each of the given pins, sets the the corresponding state based on
the index.
Args:
self (JLink): the ``JLink`` instance
pins (list): list of GPIO indices
states (list): list of states to set
Returns:
A list of updated states.
Raises:
JLinkException: on error.
ValueError: if ``len(pins) != len(states)`` | juraj-google-style |
def references_json(references):
references_json = []
for r in references:
ref = r.ref
ref['attributes'] = r._to_json_like(include_defaults=False)
references_json.append(ref)
return references_json | Given a list of all models in a graph, return JSON representing
them and their properties.
Args:
references (seq[Model]) :
A list of models to convert to JSON
Returns:
list | juraj-google-style |
def run(self, dag):
if (self.initial_layout is None):
if self.property_set['layout']:
self.initial_layout = self.property_set['layout']
else:
self.initial_layout = Layout.generate_trivial_layout(*dag.qregs.values())
if (len(dag.qubits()) != len(self.initial_layout)):
raise TranspilerError('The layout does not match the amount of qubits in the DAG')
if (len(self.coupling_map.physical_qubits) != len(self.initial_layout)):
raise TranspilerError('Mappers require to have the layout to be the same size as the coupling map')
self.input_layout = self.initial_layout.copy()
self.qregs = dag.qregs
if (self.seed is None):
self.seed = np.random.randint(0, np.iinfo(np.int32).max)
self.rng = np.random.RandomState(self.seed)
logger.debug('StochasticSwap RandomState seeded with seed=%s', self.seed)
new_dag = self._mapper(dag, self.coupling_map, trials=self.trials)
return new_dag | Run the StochasticSwap pass on `dag`.
Args:
dag (DAGCircuit): DAG to map.
Returns:
DAGCircuit: A mapped DAG.
Raises:
TranspilerError: if the coupling map or the layout are not
compatible with the DAG | codesearchnet |
def search(self, files=None, defined_fields=None, **kwargs):
if (defined_fields is None):
defined_fields = []
all_keys = (set(defined_fields) | set(kwargs.keys()))
if (not all_keys):
raise ValueError('At least one field to search on must be passed.')
if (files is None):
files = set(self.layout.files.keys())
for f in files:
self.index_file(f)
filesets = [set(self.key_index.get(k, [])) for k in all_keys]
matches = reduce((lambda x, y: (x & y)), filesets)
if (files is not None):
matches &= set(files)
if (not matches):
return []
def check_matches(f, key, val):
if (isinstance(val, six.string_types) and ('*' in val)):
val = ('^%s$' % val).replace('*', '.*')
return (re.search(str(self.file_index[f][key]), val) is not None)
else:
return (val == self.file_index[f][key])
for (k, val) in kwargs.items():
matches = list(filter((lambda x: check_matches(x, k, val)), matches))
if (not matches):
return []
return matches | Search files in the layout by metadata fields.
Args:
files (list): Optional list of names of files to search. If None,
all files in the layout are scanned.
defined_fields (list): Optional list of names of fields that must
be defined in the JSON sidecar in order to consider the file a
match, but which don't need to match any particular value.
kwargs: Optional keyword arguments defining search constraints;
keys are names of metadata fields, and values are the values
to match those fields against (e.g., SliceTiming=0.017) would
return all files that have a SliceTiming value of 0.071 in
metadata.
Returns: A list of filenames that match all constraints. | codesearchnet |
def get_hostname(url):
if url not in URLHelper.__cache:
URLHelper.__cache[url] = urlparse(url)
parts = URLHelper.__cache[url].netloc.split(".")
if len(parts) == 1:
return parts[0]
else:
return ".".join(parts[-2:-1]) | Get the hostname of the given URL.
Args:
url (str): The URL to get the hostname from.
Returns:
str: The hostname | juraj-google-style |
def remove_server_data(server_id):
logger.debug('Removing server from serverdata')
data = datatools.get_data()
if (server_id in data['discord']['servers']):
data['discord']['servers'].pop(server_id)
datatools.write_data(data) | Remove a server from the server data
Args:
server_id (int): The server to remove from the server data | codesearchnet |
def name_from_scope_name(name) -> str:
return name[:-1] if name and name[-1] == '/' else name | Returns the name of an op given the name of its scope.
Args:
name: the name of the scope.
Returns:
the name of the op (equal to scope name minus any trailing slash). | github-repos |
def size(self, path: str) -> int:
raise NotImplementedError | Get size in bytes of a file on the FileSystem.
Args:
path: string filepath of file.
Returns: int size of file according to the FileSystem.
Raises:
``BeamIOError``: if path doesn't exist. | github-repos |
def epoch_to_log_line_timestamp(epoch_time, time_zone=None):
s, ms = divmod(epoch_time, 1000)
d = datetime.datetime.fromtimestamp(s, tz=time_zone)
return d.strftime('%m-%d %H:%M:%S.') + str(ms) | Converts an epoch timestamp in ms to log line timestamp format, which
is readible for humans.
Args:
epoch_time: integer, an epoch timestamp in ms.
time_zone: instance of tzinfo, time zone information.
Using pytz rather than python 3.2 time_zone implementation for
python 2 compatibility reasons.
Returns:
A string that is the corresponding timestamp in log line timestamp
format. | juraj-google-style |
def heightmap_clamp(hm: np.ndarray, mi: float, ma: float) -> None:
hm.clip(mi, ma) | Clamp all values on this heightmap between ``mi`` and ``ma``
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
mi (float): The lower bound to clamp to.
ma (float): The upper bound to clamp to.
.. deprecated:: 2.0
Do ``hm.clip(mi, ma)`` instead. | juraj-google-style |
def create_forwarding_information_base(self, timeout=-1):
uri = "{}{}".format(self.data["uri"], self.FORWARDING_INFORMATION_PATH)
return self._helper.do_post(uri, None, timeout, None) | Generates the forwarding information base dump file for a logical interconnect.
Args:
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in
OneView, just stops waiting for its completion.
Returns: Interconnect Forwarding Information Base DataInfo. | juraj-google-style |
def save_config(self):
if (not self.opts['dirty_config'][1]):
if logger.isEnabledFor(logging.INFO):
logger.info('Config not saved (not modified)')
return 1
txt = '
copyfile(self.config_file, (self.config_file + '.restore'))
if (self.opts['default_station'][1] is None):
self.opts['default_station'][1] = '-1'
try:
with open(self.config_file, 'w') as cfgfile:
cfgfile.write(txt.format(self.opts['player'][1], self.opts['default_playlist'][1], self.opts['default_station'][1], self.opts['default_encoding'][1], self.opts['connection_timeout'][1], self.opts['theme'][1], self.opts['use_transparency'][1], self.opts['confirm_station_deletion'][1], self.opts['confirm_playlist_reload'][1], self.opts['auto_save_playlist'][1]))
except:
if logger.isEnabledFor(logging.ERROR):
logger.error('Error saving config')
return (- 1)
try:
remove((self.config_file + '.restore'))
except:
pass
if logger.isEnabledFor(logging.INFO):
logger.info('Config saved')
self.opts['dirty_config'][1] = False
return 0 | Save config file
Creates config.restore (back up file)
Returns:
-1: Error saving config
0: Config saved successfully
1: Config not saved (not modified | codesearchnet |
def get_output_embeddings(self) -> Union[None, keras.layers.Layer]:
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
try:
return lm_head.get_output_embeddings()
except AttributeError:
logger.info('Building the model')
self.build_in_name_scope()
return lm_head().get_output_embeddings()
return None | Returns the model's output embeddings
Returns:
`tf.Variable`: The new weights mapping vocabulary to hidden states. | github-repos |
def resolve_revision(self, dest, url, rev_options):
rev = rev_options.arg_rev
sha, is_branch = self.get_revision_sha(dest, rev)
if sha is not None:
rev_options = rev_options.make_new(sha)
rev_options.branch_name = rev if is_branch else None
return rev_options
if not looks_like_hash(rev):
logger.warning(
"Did not find branch or tag '%s', assuming revision or ref.",
rev,
)
if not rev.startswith('refs/'):
return rev_options
self.run_command(
['fetch', '-q', url] + rev_options.to_args(),
cwd=dest,
)
sha = self.get_revision(dest, rev='FETCH_HEAD')
rev_options = rev_options.make_new(sha)
return rev_options | Resolve a revision to a new RevOptions object with the SHA1 of the
branch, tag, or ref if found.
Args:
rev_options: a RevOptions object. | juraj-google-style |
def ricker(f, length, dt):
t = np.linspace(-int(length/2), int((length-dt)/2), int(length/dt))
y = (1. - 2.*(np.pi**2)*(f**2)*(t**2))*np.exp(-(np.pi**2)*(f**2)*(t**2))
return t, y | A Ricker wavelet.
Args:
f (float): frequency in Haz, e.g. 25 Hz.
length (float): Length in s, e.g. 0.128.
dt (float): sample interval in s, e.g. 0.001.
Returns:
tuple. time basis, amplitude values. | juraj-google-style |
def __init__(self, context):
self._credentials = context.credentials
self._project_id = context.project_id | Initializes the Storage helper with context information.
Args:
context: a Context object providing project_id and credentials. | juraj-google-style |
def _start_reader_thread(self, stream, chunks):
import io
import threading
def target():
while True:
chunk = stream.read(io.DEFAULT_BUFFER_SIZE)
if (not chunk):
break
chunks.append(chunk)
thread = threading.Thread(target=target)
thread.start()
return thread | Starts a thread for reading output from FFMPEG.
The thread reads consecutive chunks from the stream and saves them in
the given list.
Args:
stream: output stream of the FFMPEG process.
chunks: list to save output chunks to.
Returns:
Thread | codesearchnet |
def window_partition(self, hidden_states: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]:
batch_size, height, width, channel = hidden_states.shape
pad_h = (window_size - height % window_size) % window_size
pad_w = (window_size - width % window_size) % window_size
hidden_states = F.pad(hidden_states, (0, 0, 0, pad_w, 0, pad_h))
pad_height, pad_width = (height + pad_h, width + pad_w)
hidden_states = hidden_states.reshape(batch_size, pad_height
windows = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().reshape(-1, window_size, window_size, channel)
return (windows, (pad_height, pad_width)) | Args:
Partition into non-overlapping windows with padding if needed.
hidden_states (tensor): input tokens with [batch_size, height, width, channel]. window_size (int): window
size.
Returns:
windows: windows after partition with [batch_size * num_windows, window_size, window_size, channel].
(pad_height, pad_width): padded height and width before partition | github-repos |
def set_extra_selections(self, key, extra_selections):
draw_order = DRAW_ORDERS.get(key)
if draw_order is None:
draw_order = DRAW_ORDERS.get('on_top')
for selection in extra_selections:
selection.draw_order = draw_order
self.clear_extra_selections(key)
self.extra_selections_dict[key] = extra_selections | Set extra selections for a key.
Also assign draw orders to leave current_cell and current_line
in the backgrund (and avoid them to cover other decorations)
NOTE: This will remove previous decorations added to the same key.
Args:
key (str) name of the extra selections group.
extra_selections (list of sourcecode.api.TextDecoration). | juraj-google-style |
def get_keys_from_ldap(self, username=None):
result_dict = {}
filter = ['(sshPublicKey=*)']
if username is not None:
filter.append('(uid={})'.format(username))
attributes = ['uid', 'sshPublicKey']
results = self.client.search(filter, attributes)
for result in results:
result_dict[result.uid.value] = result.sshPublicKey.values
return result_dict | Fetch keys from ldap.
Args:
username Username associated with keys to fetch (optional)
Returns:
Array of dictionaries in '{username: [public keys]}' format | juraj-google-style |
def _create_state_graph(self, name):
import_collections = [
tf_v1.GraphKeys.GLOBAL_VARIABLES,
tf_v1.GraphKeys.MODEL_VARIABLES,
tf_v1.GraphKeys.TABLE_INITIALIZERS,
tf_v1.GraphKeys.ASSET_FILEPATHS,
tf_v1.GraphKeys.COND_CONTEXT,
tf_v1.GraphKeys.WHILE_CONTEXT,
]
if self._trainable:
import_collections.extend([tf_v1.GraphKeys.TRAINABLE_VARIABLES,
tf_v1.GraphKeys.REGULARIZATION_LOSSES])
absolute_scope_name = tf_v1.get_default_graph().unique_name(
name, mark_as_used=False)
relative_scope_name = absolute_scope_name.split("/")[-1]
assert relative_scope_name == name
meta_graph = meta_graph_pb2.MetaGraphDef()
meta_graph.CopyFrom(self._meta_graph)
meta_graph_lib.filter_collections(meta_graph, import_collections)
meta_graph_lib.prefix_shared_name_attributes(meta_graph,
absolute_scope_name)
tf_v1.train.import_meta_graph(
meta_graph,
input_map={},
import_scope=relative_scope_name)
variables_tensor_map = {}
for var in tf_v1.global_variables():
if var.op.name.startswith(absolute_scope_name + "/"):
variables_tensor_map[var.name[len(absolute_scope_name)+1:]] = var
def _get_tensor(tensor_name):
return tf_v1.get_default_graph().get_tensor_by_name(
meta_graph_lib.prepend_name_scope(
tensor_name, import_scope=absolute_scope_name))
state_op_names = list_registered_stateful_ops_without_inputs()
state_map = get_state_map(meta_graph, state_op_names, set(), _get_tensor)
return variables_tensor_map, state_map | Creates the graph nodes that hold the state of the Module.
Args:
name: name scope to create the state graph in.
Returns:
A tuple consisting of:
variables_tensor_map: a map from tensor names in the original graph def
to the created Variables objects.
state_map: a map from tensors names in the original graph def to the
instantiated tensors to be used as a state_map. | juraj-google-style |
def __init__(self, port=None, max_length=UBINT16_MAX_VALUE):
super().__init__(action_type=ActionType.OFPAT_OUTPUT, length=8)
self.port = port
self.max_length = max_length | Create an ActionOutput with the optional parameters below.
Args:
port (:class:`~pyof.v0x01.common.phy_port.Port` or :class:`int`):
Output port.
max_length (int): Max length to send to controller. | juraj-google-style |
def get_trace(self, project_id, trace_id):
trace_pb = self._gapic_api.get_trace(project_id, trace_id)
trace_mapping = _parse_trace_pb(trace_pb)
return trace_mapping | Gets a single trace by its ID.
Args:
trace_id (str): ID of the trace to return.
project_id (str): Required. ID of the Cloud project where the trace
data is stored.
Returns:
A Trace dict. | juraj-google-style |
def init_app(self, app, client_id=None):
if not self.client_id:
if client_id:
self.client_id = client_id
else:
self.client_id = app.name | Initialize the Micropub extension if it was not given app
in the constructor.
Args:
app (flask.Flask): the flask application to extend.
client_id (string, optional): the IndieAuth client id, will be
displayed when the user is asked to authorize this client. If not
provided, the app name will be used. | juraj-google-style |
def delete(self, filename):
for repo in self._children:
if hasattr(repo, 'delete'):
repo.delete(filename) | Delete a file from all repositories which support it.
Individual repositories will determine correct location to
delete from (Scripts vs. Packages).
This will not remove the corresponding Package or Script object
from the JSS's database!
Args:
filename: The filename you wish to delete (do not include a
path). | codesearchnet |
def from_authorized_user_file(cls, filename, scopes=None):
with io.open(filename, 'r', encoding='utf-8') as json_file:
data = json.load(json_file)
return cls.from_authorized_user_info(data, scopes) | Creates a Credentials instance from an authorized user json file.
Args:
filename (str): The path to the authorized user json file.
scopes (Sequence[str]): Optional list of scopes to include in the
credentials.
Returns:
google.oauth2.credentials.Credentials: The constructed
credentials.
Raises:
ValueError: If the file is not in the expected format. | juraj-google-style |
def get_centered_molecule(self):
center = self.center_of_mass
new_coords = (np.array(self.cart_coords) - center)
return self.__class__(self.species_and_occu, new_coords, charge=self._charge, spin_multiplicity=self._spin_multiplicity, site_properties=self.site_properties) | Returns a Molecule centered at the center of mass.
Returns:
Molecule centered with center of mass at origin. | codesearchnet |
def singleprint(self) -> fingerprinting_pywrap.Singleprint:
try:
return fingerprinting_pywrap.Singleprint(self.graph_def_program_hash, self.signature_def_hash, self.saved_object_graph_hash, self.checkpoint_hash)
except (TypeError, fingerprinting_pywrap.FingerprintException) as e:
raise ValueError(f'Encounted invalid fingerprint values when constructing singleprint.graph_def_program_hash: {self.graph_def_program_hash}signature_def_hash: {self.signature_def_hash}saved_object_graph_hash: {self.saved_object_graph_hash}checkpoint_hash: {self.checkpoint_hash}{e}') from None | Canonical fingerprinting ID for a SavedModel.
Uniquely identifies a SavedModel based on the regularized fingerprint
attributes. (saved_model_checksum is sensitive to immaterial changes and
thus non-deterministic.)
Returns:
The string concatenation of `graph_def_program_hash`,
`signature_def_hash`, `saved_object_graph_hash`, and `checkpoint_hash`
fingerprint attributes (separated by '/').
Raises:
ValueError: If the fingerprint fields cannot be used to construct the
singleprint. | github-repos |
def _ProcessGRRMessages(self, fs_client_id, grr_messages):
grr_client_id = fleetspeak_utils.FleetspeakIDToGRRID(fs_client_id)
for grr_message in grr_messages:
grr_message.source = grr_client_id
grr_message.auth_state = (
rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED)
client_is_new = self.frontend.EnrolFleetspeakClient(client_id=grr_client_id)
if not client_is_new and data_store.RelationalDBEnabled():
data_store.REL_DB.WriteClientMetadata(
grr_client_id, last_ping=rdfvalue.RDFDatetime.Now())
self.frontend.ReceiveMessages(
client_id=grr_client_id, messages=grr_messages) | Handles messages from GRR clients received via Fleetspeak.
This method updates the last-ping timestamp of the client before beginning
processing.
Args:
fs_client_id: The Fleetspeak client-id for the client.
grr_messages: An Iterable of GrrMessages. | juraj-google-style |
def handle(self, message):
opcode = message['op']
if opcode == 10:
self.on_hello(message)
elif opcode == 11:
self.on_heartbeat(message)
elif opcode == 0:
self.on_message(message)
else:
logger.debug("Not a message we handle: OPCODE {}".format(opcode))
return | Dispatches messages to appropriate handler based on opcode
Args:
message (dict): Full message from Discord websocket connection | juraj-google-style |
def save_subset_weights_to_hdf5_group(f, weights):
weight_values = [backend.convert_to_numpy(w) for w in weights]
weight_names = [str(w.path).encode('utf8') for w in weights]
save_attributes_to_hdf5_group(f, 'weight_names', weight_names)
for name, val in zip(weight_names, weight_values):
param_dset = f.create_dataset(name, val.shape, dtype=val.dtype)
if not val.shape:
param_dset[()] = val
else:
param_dset[:] = val | Save top-level weights of a model to a HDF5 group.
Args:
f: HDF5 group.
weights: List of weight variables. | github-repos |
def _FormatTokenData(self, token_type, token_data):
token_data_format_function = self._TOKEN_DATA_FORMAT_FUNCTIONS.get(
token_type)
if token_data_format_function:
token_data_format_function = getattr(
self, token_data_format_function, None)
if not token_data_format_function:
return {}
return token_data_format_function(token_data) | Formats the token data as a dictionary of values.
Args:
token_type (int): token type.
token_data (object): token data.
Returns:
dict[str, str]: formatted token values or an empty dictionary if no
formatted token values could be determined. | juraj-google-style |
def build_pipeline_args(cls, project, script, job_params, task_params, reserved_labels, preemptible, logging_uri, scopes, keep_alive):
inputs = {}
inputs.update({SCRIPT_VARNAME: script})
inputs.update({var.name: var.value for var in (job_params['envs'] | task_params['envs']) if var.value})
inputs.update({var.name: var.uri for var in (job_params['inputs'] | task_params['inputs']) if ((not var.recursive) and var.value)})
outputs = {}
for var in (job_params['outputs'] | task_params['outputs']):
if (var.recursive or (not var.value)):
continue
if ('*' in var.uri.basename):
outputs[var.name] = var.uri.path
else:
outputs[var.name] = var.uri
labels = {}
labels.update({label.name: (label.value if label.value else '') for label in ((reserved_labels | job_params['labels']) | task_params['labels'])})
args = {'pipelineArgs': {'projectId': project, 'resources': {'preemptible': preemptible}, 'inputs': inputs, 'outputs': outputs, 'labels': labels, 'serviceAccount': {'email': 'default', 'scopes': scopes}, 'logging': {'gcsPath': logging_uri}}}
if keep_alive:
args['pipelineArgs']['keep_vm_alive_on_failure_duration'] = ('%ss' % keep_alive)
return args | Builds pipeline args for execution.
Args:
project: string name of project.
script: Body of the script to execute.
job_params: dictionary of values for labels, envs, inputs, and outputs
for this job.
task_params: dictionary of values for labels, envs, inputs, and outputs
for this task.
reserved_labels: dictionary of reserved labels (e.g. task-id,
task-attempt)
preemptible: use a preemptible VM for the job
logging_uri: path for job logging output.
scopes: list of scope.
keep_alive: Seconds to keep VM alive on failure
Returns:
A nested dictionary with one entry under the key pipelineArgs containing
the pipeline arguments. | codesearchnet |
def _build_insert_compiler(self, rows: List[Dict]):
objs = []
field_count = len(rows[0])
for (index, row) in enumerate(rows):
if (field_count != len(row)):
raise SuspiciousOperation('In bulk upserts, you cannot have rows with different field configurations. Row {0} has a different field config than the first row.'.format(index))
objs.append(self.model(**row))
self._for_write = True
(insert_fields, update_fields) = self._get_upsert_fields(rows[0])
query = PostgresInsertQuery(self.model)
query.conflict_action = self.conflict_action
query.conflict_target = self.conflict_target
query.index_predicate = self.index_predicate
query.values(objs, insert_fields, update_fields)
connection = django.db.connections[self.db]
compiler = PostgresInsertCompiler(query, connection, self.db)
return compiler | Builds the SQL compiler for a insert query.
Arguments:
rows:
A list of dictionaries, where each entry
describes a record to insert.
Returns:
The SQL compiler for the insert. | codesearchnet |
def upload_benchmark_data(client, data):
test_result = json.loads(data)
test_name = str(test_result['name'])
start_time = datetime.datetime.utcfromtimestamp(float(test_result['startTime']))
batch = []
t_key = client.key('Test')
t_val = datastore.Entity(t_key, exclude_from_indexes=['info'])
t_val.update({'test': test_name, 'start': start_time, 'info': str(data)})
batch.append(t_val)
for ent in test_result['entries'].get('entry', []):
ent_name = str(ent['name'])
e_key = client.key('Entry')
e_val = datastore.Entity(e_key, exclude_from_indexes=['info'])
e_val.update({'test': test_name, 'start': start_time, 'entry': ent_name, 'timing': ent['wallTime'], 'info': str(json.dumps(ent))})
batch.append(e_val)
client.put_multi(batch) | Parse benchmark data and use the client to upload it to the datastore.
Parse the given benchmark data from the serialized JSON-format used to write
the test results file. Create the different datastore Entities from that data
and upload them to the datastore in a batch using the client connection.
Args:
client: datastore client connection
data: JSON-encoded benchmark data | github-repos |
def console_get_background_flag(con: tcod.console.Console) -> int:
return int(lib.TCOD_console_get_background_flag(_console(con))) | Return this consoles current blend mode.
Args:
con (Console): Any Console instance.
.. deprecated:: 8.5
Check :any:`Console.default_bg_blend` instead. | juraj-google-style |
def save_own_variables(self, store):
all_vars = self._trainable_variables + self._non_trainable_variables
for i, v in enumerate(all_vars):
store[f'{i}'] = v | Saves the state of the layer.
You can override this method to take full control of how the state of
the layer is saved upon calling `model.save()`.
Args:
store: Dict where the state of the model will be saved. | github-repos |
def date_added(self, date_added):
date_added = self._utils.format_datetime(date_added, date_format='%Y-%m-%dT%H:%M:%SZ')
self._data['dateAdded'] = date_added
request = self._base_request
request['dateAdded'] = date_added
return self._tc_requests.update(request, owner=self.owner) | Updates the security labels date_added
Args:
date_added: Converted to %Y-%m-%dT%H:%M:%SZ date format | codesearchnet |
def __driver_completer(self, toks, text, state):
if state != 0:
return self.__completion_candidates[state]
if not toks or (len(toks) == 1 and text == toks[0]):
try:
self.__completion_candidates = self.__complete_cmds(text)
except:
self.stderr.write('\n')
self.stderr.write(traceback.format_exc())
self.__completion_candidates = []
return self.__completion_candidates[state]
cmd = toks[0]
args = toks[1:] if len(toks) > 1 else None
if text and args:
del args[-1]
if cmd in self._completer_map.keys():
completer_name = self._completer_map[cmd]
completer_method = getattr(self, completer_name)
try:
self.__completion_candidates = completer_method(cmd, args, text)
except:
self.stderr.write('\n')
self.stderr.write(traceback.format_exc())
self.__completion_candidates = []
else:
self.__completion_candidates = []
return self.__completion_candidates[state] | Driver level completer.
Arguments:
toks: A list of tokens, tokenized from the original input line.
text: A string, the text to be replaced if a completion candidate is
chosen.
state: An integer, the index of the candidate out of the list of
candidates.
Returns:
A string, the candidate. | juraj-google-style |
def _map_query_path_to_location_info(query_metadata_table):
query_path_to_location_info = {}
for location, location_info in query_metadata_table.registered_locations:
if not isinstance(location, Location):
continue
if location.query_path in query_path_to_location_info:
equivalent_location_info = query_path_to_location_info[location.query_path]
if not _location_infos_equal(location_info, equivalent_location_info):
raise AssertionError(
u'Differing LocationInfos at query_path {} between {} and {}. Expected '
u'parent_location.query_path, optional_scopes_depth, recursive_scopes_depth '
u'and types to be equal for LocationInfos sharing the same query path.'.format(
location.query_path, location_info, equivalent_location_info))
query_path_to_location_info[location.query_path] = location_info
return query_path_to_location_info | Create a map from each query path to a LocationInfo at that path.
Args:
query_metadata_table: QueryMetadataTable, object containing all metadata collected during
query processing, including location metadata (e.g. which locations
are folded or optional).
Returns:
Dict[Tuple[str], LocationInfo], dictionary mapping query path to LocationInfo at that path. | juraj-google-style |
def __init__(self, message=None, parser_chain=None, path_spec=None):
super(ExtractionWarning, self).__init__()
self.message = message
self.parser_chain = parser_chain
self.path_spec = path_spec | Initializes an extraction warning.
Args:
message (Optional[str]): warning message.
parser_chain (Optional[str]): parser chain to which the warning applies.
path_spec (Optional[dfvfs.PathSpec]): path specification of the file entry
to which the warning applies. | juraj-google-style |
def mark_typed_map(self, name, type_object):
if (not hasattr(type_object, 'dump')):
raise ArgumentError(('The passed type object %s is missing required method: dump()' % type_object))
if (not hasattr(type_object, 'Restore')):
raise ArgumentError(('The passed type object %s is missing required method: Restore()' % type_object))
def _dump_map(obj):
if (obj is None):
return None
if (not isinstance(obj, dict)):
raise DataError(('Property %s marked as list was not a dict: %s' % (name, repr(obj))))
return {key: val.dump() for (key, val) in obj.items()}
def _restore_map(obj):
if (obj is None):
return obj
return {key: type_object.Restore(val) for (key, val) in obj.items()}
self.mark_complex(name, _dump_map, _restore_map) | Mark a property as containing a map str to serializable object.
This convenience method allows you to avoid having to call
``mark_complex()`` whenever you need to serialize a dict of objects.
This method requires that all members of the given dict be of a single
class that contains a dump() method and a Restore() class method where
type_object.Restore(x.dump()) == x.
Args:
name (str): The name of the complex property.
type_object: The class object that will be contained inside
this dict. | codesearchnet |
def process_node(layer, node_data):
input_tensors = []
for input_data in nest.flatten(node_data):
input_data = input_data.as_list()
inbound_layer_name = input_data[0]
inbound_node_index = input_data[1]
inbound_tensor_index = input_data[2]
if len(input_data) == 3:
kwargs = {}
elif len(input_data) == 4:
kwargs = input_data[3]
try:
kwargs = _deserialize_keras_tensors(kwargs, created_layers)
except IndexError:
add_unprocessed_node(layer, node_data)
return
else:
raise ValueError('Improperly formatted model config.')
if inbound_layer_name != node_module._CONSTANT_VALUE:
inbound_layer = created_layers[inbound_layer_name]
inbound_node_index = get_node_index(inbound_layer, inbound_node_index)
if inbound_node_index is None:
add_unprocessed_node(layer, node_data)
return
inbound_node = inbound_layer._inbound_nodes[inbound_node_index]
input_tensors.append(nest.flatten(inbound_node.outputs)[inbound_tensor_index])
else:
input_tensors.append(inbound_tensor_index)
input_tensors = nest.pack_sequence_as(node_data, input_tensors)
if input_tensors is not None:
if not layer._preserve_input_structure_in_config:
input_tensors = base_layer_utils.unnest_if_single_tensor(input_tensors)
output_tensors = layer(input_tensors, **kwargs)
output_index = nest.flatten(output_tensors)[0]._keras_history.node_index
node_index_map[layer.name, node_count_by_layer[layer]] = output_index
node_count_by_layer[layer] += 1 | Deserialize a node.
Args:
layer: layer instance.
node_data: Nested structure of `ListWrapper`.
Raises:
ValueError: In case of improperly formatted `node_data`. | github-repos |
def g_step(self, gen_frames, fake_logits_stop):
hparam_to_gen_loss = {
"least_squares": gan_losses.least_squares_generator_loss,
"cross_entropy": gan_losses.modified_generator_loss,
"wasserstein": gan_losses.wasserstein_generator_loss
}
fake_logits = self.discriminator(gen_frames)
mean_fake_logits = tf.reduce_mean(fake_logits)
tf.summary.scalar("mean_fake_logits", mean_fake_logits)
generator_loss_func = hparam_to_gen_loss[self.hparams.gan_loss]
gan_g_loss_pos_d = generator_loss_func(
discriminator_gen_outputs=fake_logits, add_summaries=True)
gan_g_loss_neg_d = -generator_loss_func(
discriminator_gen_outputs=fake_logits_stop, add_summaries=True)
return gan_g_loss_pos_d, gan_g_loss_neg_d | Performs the generator step in computing the GAN loss.
Args:
gen_frames: Generated frames
fake_logits_stop: Logits corresponding to the generated frames as per
the discriminator. Assumed to have a stop-gradient term.
Returns:
gan_g_loss_pos_d: Loss.
gan_g_loss_neg_d: -gan_g_loss_pos_d but with a stop gradient on generator. | juraj-google-style |
def validate_file(fn, options=None):
file_results = FileValidationResults(filepath=fn)
output.info("Performing JSON schema validation on %s" % fn)
if not options:
options = ValidationOptions(files=fn)
try:
with open(fn) as instance_file:
file_results.object_results = validate(instance_file, options)
except Exception as ex:
if 'Expecting value' in str(ex):
line_no = str(ex).split()[3]
file_results.fatal = ValidationErrorResults(
'Invalid JSON input on line %s' % line_no
)
else:
file_results.fatal = ValidationErrorResults(ex)
msg = ("Unexpected error occurred with file '{fn}'. No further "
"validation will be performed: {error}")
output.info(msg.format(fn=fn, error=str(ex)))
file_results.is_valid = (all(object_result.is_valid
for object_result in file_results.object_results)
and not file_results.fatal)
return file_results | Validate the input document `fn` according to the options passed in.
If any exceptions are raised during validation, no further validation
will take place.
Args:
fn: The filename of the JSON file to be validated.
options: An instance of ``ValidationOptions``.
Returns:
An instance of FileValidationResults. | juraj-google-style |
def _get_stack_depth(package, fqdn, defdepth=_def_stackdepth):
global _stack_config
if (package not in _stack_config):
from acorn.config import settings
spack = settings(package)
_stack_config[package] = {}
secname = 'logging.depth'
if spack.has_section(secname):
for ofqdn in spack.options(secname):
_stack_config[package][ofqdn] = spack.getint(secname, ofqdn)
usedef = True
if (fqdn in _stack_config[package]):
result = _stack_config[package][fqdn]
usedef = False
elif ('*' in _stack_config[package]):
result = _stack_config[package]['*']
usedef = False
else:
result = defdepth
if (not usedef):
msg.gen('Using {} for {} stack depth.'.format(result, fqdn), 3)
return result | Loads the stack depth settings from the config file for the specified
package.
Args:
package (str): name of the package to get stack depth info for.
fqdn (str): fully qualified domain name of the member in the package.
defdepth (int): default depth when one has not been configured. | codesearchnet |
def training_step(self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], num_items_in_batch: Optional[torch.Tensor]=None) -> torch.Tensor:
model.train()
if hasattr(self.optimizer, 'train') and callable(self.optimizer.train):
self.optimizer.train()
inputs = self._prepare_inputs(inputs)
if is_sagemaker_mp_enabled():
loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps)
return loss_mb.reduce_mean().detach().to(self.args.device)
with self.compute_loss_context_manager():
loss = self.compute_loss(model, inputs, num_items_in_batch=num_items_in_batch)
del inputs
if self.args.torch_empty_cache_steps is not None and self.state.global_step % self.args.torch_empty_cache_steps == 0:
if is_torch_xpu_available():
torch.xpu.empty_cache()
elif is_torch_mlu_available():
torch.mlu.empty_cache()
elif is_torch_musa_available():
torch.musa.empty_cache()
elif is_torch_npu_available():
torch.npu.empty_cache()
elif is_torch_mps_available():
torch.mps.empty_cache()
elif is_torch_hpu_available():
logger.warning('`torch_empty_cache_steps` is set but HPU device/backend does not support empty_cache().')
else:
torch.cuda.empty_cache()
kwargs = {}
if self.args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]:
kwargs['learning_rate'] = self._get_learning_rate()
if self.args.n_gpu > 1:
loss = loss.mean()
if self.use_apex:
from apex import amp
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
if (not self.model_accepts_loss_kwargs or num_items_in_batch is None) and self.compute_loss_func is None:
loss = loss / self.args.gradient_accumulation_steps
if self.accelerator.distributed_type == DistributedType.DEEPSPEED:
kwargs['scale_wrt_gas'] = False
self.accelerator.backward(loss, **kwargs)
return loss.detach() | Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (`nn.Module`):
The model to train.
inputs (`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument `labels`. Check your model's documentation for all accepted arguments.
Return:
`torch.Tensor`: The tensor with training loss on this batch. | github-repos |
def __spread__(y, yy, n, x, m):
nfac=[0,1,1,2,6,24,120,720,5040,40320,362880]
if m > 10. :
print('factorial table too small in spread')
return
ix=long(x)
if x == float(ix):
yy[ix]=yy[ix]+y
else:
ilo = long(x-0.5*float(m)+1.0)
ilo = min( max( ilo , 1 ), n-m+1 )
ihi = ilo+m-1
nden = nfac[m]
fac=x-ilo
for j in range(ilo+1,ihi+1): fac = fac*(x-j)
yy[ihi] = yy[ihi] + y*fac/(nden*(x-ihi))
for j in range(ihi-1,ilo-1,-1):
nden=(nden/(j+1-ilo))*(j-ihi)
yy[j] = yy[j] + y*fac/(nden*(x-j)) | Given an array yy(0:n-1), extirpolate (spread) a value y into
m actual array elements that best approximate the "fictional"
(i.e., possible noninteger) array element number x. The weights
used are coefficients of the Lagrange interpolating polynomial
Arguments:
y :
yy :
n :
x :
m :
Returns: | juraj-google-style |
def is_applicable_python_file(rel_path: str) -> bool:
return (rel_path.endswith('.py') and
not any(re.search(pat, rel_path) for pat in IGNORED_FILE_PATTERNS)) | Determines if a file should be included in incremental coverage analysis.
Args:
rel_path: The repo-relative file path being considered.
Returns:
Whether to include the file. | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.