content stringlengths 39 9.28k | sha1 stringlengths 40 40 | id int64 8 710k |
|---|---|---|
import time
def dt_to_timestamp(datetime_):
"""Convert :class:`datetime` to UNIX timestamp."""
return time.mktime(datetime_.timetuple()) | 7448f3576f4de3129bb9bae1c8f3a951b27f24c4 | 329,298 |
def compare_content(fpath1, fpath2):
"""Tell if the content of both fpaths are equal.
This does not check modification times, just internal bytes.
"""
with open(fpath1, 'rb') as fh1:
with open(fpath2, 'rb') as fh2:
while True:
data1 = fh1.read(65536)
... | 52bc4a2dcde79bd09e8d175937805b944dc16487 | 499,058 |
import pathlib
def get_directory_contents(path: pathlib.Path):
"""Iterates through a directory and returns a set of its contents."""
contents = set()
for filename in path.glob('**/*'):
# Remove the original parent directories to get just the relative path.
contents.add(filename.relative_to... | a6e5e8bb2a4894de4b0ef54a1f2f727ec68c86a5 | 111,393 |
def stringify_sdg_number(sdg):
"""Converts integer to string and zero pads to 2 digits. Used for saving
and loading individual goal data.
Args:
sdg (int): Typically 1 - 17
Returns:
(str): e.g. '06'
"""
return str(sdg).zfill(2) | a2d854ca6265d28de34fcaf8a0a792f36ba203ae | 444,279 |
from pathlib import Path
def filename_for_plot(directory, zarr_path: str, qualifier: str = "") -> str:
"""Get a suitable output filename for a plot produced from a Zarr path"""
basename = Path(zarr_path).stem.replace(
".zarr", ""
) # handle ".zarr" and ".zarr.zip"
path = Path(directory, f"{ba... | 8361b4fc037c3bf3a28da1c0f9101b1dbac5d6e7 | 569,760 |
def rnn_args_from_config(rnn_config):
"""
Takes a Config object corresponding to RNN settings
(for example `config.algo.rnn` in BCConfig) and extracts
rnn kwargs for instantiating rnn networks.
"""
return dict(
rnn_hidden_dim=rnn_config.hidden_dim,
rnn_num_layers=rnn_config.num_l... | 54cf542122036510c70fe7a53a47dd724880a912 | 697,242 |
def flatten_request_header(header):
"""
Transform a dict representing header parameters into a flat string of
comma separated parameters suitable for inserting into the actual
headers
"""
flattened_header = ''
if isinstance(header, dict):
contents = []
for content_key, conten... | e6a3b97e502ab1afbb6c6eef4241ba45ec00e7ef | 446,585 |
def add_slash(url):
"""Adds a trailing slash for consistency in urls."""
if not url.endswith('/'):
url = url + '/'
return url | 8ac722da072ec0bf83efdb0a290f46195203bd42 | 454,624 |
def format_timedelta(timedelta_obj):
"""Helper function to format timedelta to a human-readable time string"""
if timedelta_obj:
timedelta_string = '%02d:%02d:%02d' % (
int(timedelta_obj.total_seconds() // 3600),
(timedelta_obj.seconds // 60) % 60,
timedelta_obj.secon... | f10ddb22d682fcb6c1f406246f0819451b87a136 | 585,086 |
import six
def as_bytes(bytes_or_text, encoding='utf-8'):
"""Converts bytes or unicode to `bytes`, using utf-8 encoding for text.
# Arguments
bytes_or_text: A `bytes`, `str`, or `unicode` object.
encoding: A string indicating the charset for encoding unicode.
# Returns
A `bytes` ... | b973821db5060c6f8209642024ed71f023b40714 | 271,248 |
def mailverif(mail):
""" Verifica si una cadena contiene un @.
Es incorrecto si la @ está al final de la cadena o
si existe más de una @ en el string.
>>> mailverif("[email protected]")
True
>>> mailverif("Alex@gmail.@com")
False
>>> mailverif("Alexgmail.com")
False
>>> mailverif("Alexgmail.com@")
False
"""
arro... | 6c7ff162eda514bc4c7e7edd173ff9e5f9f552a4 | 335,852 |
import math
def eoq(demand_in_units, cost_of_ordering, cost_of_carrying):
"""Return the Economic Order Quantity (EOQ) for a product.
Args:
demand_in_units (int):
cost_of_ordering (float):
cost_of_carrying (float):
Returns:
Economic Order Quantity or EOQ (float).
"""
... | 97fe8880876bf785157303451a566acedbbfcc3a | 363,196 |
def standard_error(sample_size, successes):
"""
Calculates the standard error of a sample proportion.
Formula: σp = sqrt [ p(1 - p) / n ].
with:
p = proportion of successes in sample (successes / sample size)
:param sample_size: the size of the sample
:param successes: the nu... | 77cbde0689dec5e2432043362337b925c5ea7296 | 107,536 |
def load_ids(filename):
"""Loads document ids from a newline separated list of filenames."""
fin = open(filename, "r")
return set(_.strip() for _ in fin) | dd8df0007a57be9995ef62d8a4032f4afc9395da | 426,396 |
from typing import List
def count_freq_keywords(keywords: List[str]) -> List[tuple]:
"""
Returns the count of each unique keyword of a list of keywords.
Parameters:
keywords (List[str]): list with all keywords as strings.
Returns:
a list of tuples of the form (keyword, ... | 5c28beebc425ef4575e39ae315f78ed776e3f5fd | 250,534 |
def label_to_color_image(label, colormap=None):
"""Adds color defined by the dataset colormap to the label.
Args:
label: A 2D array with integer type, storing the segmentation label.
colormap: A colormap for visualizing segmentation results.
Returns:
result: A 2D array with floating ... | 7dd66d4350e5f82bfe705fc35c7f02abaa4ebee9 | 380,507 |
import torch
def list2vec(z1_list):
"""Convert list of tensors to a vector"""
bsz = z1_list[0].size(0)
return torch.cat([elem.reshape(bsz, -1, 1) for elem in z1_list], dim=1) | 41ad6557f92b5fe602dd1fd2ff81bd20155517d5 | 627,202 |
def normalize(tensor, mean, std):
"""Normalize a ``torch.tensor``
Args:
tensor (torch.tensor): tensor to be normalized.
mean: (list): the mean of BGR
std: (list): the std of BGR
Returns:
Tensor: Normalized tensor.
"""
for t, m, s in zip(tensor, mean, std):
... | 2dea96d14fd52898bd967725d8805d1ab10ea7cd | 691,793 |
def else_while(n, numbers):
"""
A function illustrating the use of else with a loop. This function will determine if n is in the iterable
numbers.
:param n: The thing to search for.
:param numbers: An iterable to search over.
:return: True if the thing is in the iterable, false otherwise.
... | 400a4bd48fd04577707793e12e7782e55fbdacab | 250,867 |
def get_boxes(warp_sudoku_board):
"""
Splits image into 81 small boxes.
:param warp_sudoku_board:
OpenCV image
:return:
9x9 2D list; each cell contains 2D numpy array
"""
temp = [None for i in range(9)]
boxes = [temp.copy() for i in range(9)]
board_height = warp_sudoku_board.shape[0]
board_width = warp_s... | c6e00b320f59540754e9d75ca527b3e41790298a | 439,500 |
def get_tree_root_element(tree_object):
"""
Return the root of the tree_object
:param tree_object: ElementTree instance for the xml_file
:return: Root object of the ElementTree instance
"""
return tree_object.getroot() | 58fc44541dfa0f934958e8bf2f005eff8bc8876e | 119,097 |
def get_file_contents(filename):
"""Returns file content as string."""
with open(filename, 'r') as srcfile:
return srcfile.read() | 5417b3301fe48b72c45172ec88a4f0cd8b2f3006 | 594,364 |
import six
def make(*args):
"""Return the plug built up from the given arguments.
Args:
*args (str | int): Token(s) to build the plug name from.
Returns:
str
"""
parts = []
for arg in args:
if isinstance(arg, int):
parts[-1] = "{}[{}]".format(parts[-1], ... | eb87f2b8facf801c2fb5085f7e6a9627567e2419 | 451,718 |
from pathlib import Path
import re
def normalise_nci_symlinks(input_path: Path) -> Path:
"""
If it's an NCI lustre path, always use the symlink (`/g/data`) rather than specific drives (eg. `/g/data2`).
>>> normalise_nci_symlinks(Path('/g/data2/v10/some/dataset.tar')).as_posix()
'/g/data/v10/some/data... | 9dde3bfc5750ac9bd50680a6ab50b3db9699dd09 | 655,538 |
import torch
def masks_to_boxes(masks):
"""Compute the bounding boxes around the provided masks
The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
Returns a [N, 4] tensors, with the boxes in xyxy format
"""
if masks.numel() == 0:
re... | 0f76285e9c398b48545842c7bd17acf7da589638 | 223,365 |
from typing import List
import collections
import csv
import copy
def read_csv(csv_fpath: str, delimiter="\t") -> List[collections.OrderedDict]:
"""Copy the data out of a csv file, as a list of OrderedDicts.
Args:
csv_fpath: string representing path to a csv file.
Rows:
rows: list of Ord... | f2bdbf7317cd0d0db9abb6aceef2a8b33c0cfc11 | 249,264 |
from typing import List
from typing import Dict
def content_check(data: List[Dict[str, str]], check: str,
json_file: str, msg: str) -> bool:
"""
Checks whether a dictionary contains a column specific to expected data and returns
a corresponding boolean value. This avoids writing files of... | e4ce5389fae39fb36743d81ca5171534202678ec | 661,370 |
def line_search_bisection(f, bound, accuracy):
"""
Maximize c so that constraint fulfilled.
This algorithm assumes continuity of f; that is,
there exists a fixed value c, such that f(x) is
False for x < c and True otherwise. This holds true,
for example, for the level sets that we consider.
... | 123d58b3782d8e70dd3f965ca1b9940f0b32c9a1 | 591,586 |
def check_detection_overlap(gs, dd):
"""
Evaluates if two detections overlap
Paramters
---------
gs: list
Gold standard detection [start,stop]
dd: list
Detector detection [start,stop]
Returns
-------
overlap: bool
Whether two events overlap.
"""
ov... | af59564c268de9cf0d30d98a0fdba6cd0bac1232 | 620,394 |
def longueur_bit(n: int) -> int:
"""
Description:
Renvoie le nombre de bits utilisés dans la représentation binaire d'un nombre entier.
Paramètres:
n: {int} -- Le nombre entier dont on veut connaître la longueur de sa représentation binaire.
Retourne:
{int} -- Le nombre de ... | b793a84b69127e08a5d20aa9a069f69aa98efed4 | 189,482 |
def svg_fill_rule(obj):
"""Returns an SVG style string describing the fill rule.
'fill-rule:nonzero' or 'fill-rule:winding'
"""
if obj.fillrule == 'winding':
return "fill-rule:nonzero"
else:
return "fill-rule:evenodd" | 390e678708def972a03ed6745867cf7f6a3ac901 | 426,485 |
def to_camel(s):
"""Convert an underscored title into camel case. 'PARENT_ORGANISATION_ID' => 'parentOrganisationId'"""
bits = [(x.lower() if i == 0 else x.title())
for (i, x) in enumerate(s.split("_"))]
return "".join(bits) | 34400be6a346d886b2fca562b737b7811b871af1 | 694,831 |
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('-i', '--input', type=str, default="",
help='full path to the input text (phareses separated by new line); \
if not provided then use default text')
parser.add_argument('-... | b6ae40526f7dff6a5f716ffbce15faf3e823773a | 197,689 |
def timeticks_to_str(ticks):
"""Return "days, hours, minutes, seconds and ms" string from ticks"""
days, rem1 = divmod(ticks, 24 * 60 * 60 * 100)
hours, rem2 = divmod(rem1, 60 * 60 * 100)
minutes, rem3 = divmod(rem2, 60 * 100)
seconds, milliseconds = divmod(rem3, 100)
ending = 's' if days > 1 el... | e625ad82e212f265cb01ffe34713f9853a880565 | 542,392 |
def calculate_dynamics(pos_vector, vel_vector, accel_vector):
"""Calculate the effort vector for a custom 2-DoF SCARA."""
m1 = 1.0 # kg weight of first body
r1 = 0.35 # distance to cg
d1 = 0.5 # full link length
i1 = (1./8)*m1*d1**2
m2 = 1.0
r2 = 0.35
i2 = (1./8)*m2*0.5**2
A = ... | b23a54004cd31812d63918b66e8931f269541fe8 | 561,203 |
def padding_mask(seq_k, seq_q):
"""For masking out the padding part of the keys sequence.
Args:
seq_k: Keys tensor, with shape [B, L_k]
seq_q: Query tensor, with shape [B, L_q]
Returns:
A masking tensor, with shape [B, L_1, L_k]
"""
len_q = seq_q.size(1)
# `PAD` is 0
pad_mask = seq_k.eq(0)
... | 22f84e5b2e7b0ef31c0aa36771f50de110a1fa9d | 639,269 |
def err2str(error: Exception, /, *, msg_only: bool = False) -> str:
"""Return string of error"""
if msg_only:
return str(error)
return f"{error.__class__.__name__} - {str(error)}" | 56a8210e8c1f95963fd127cdcf243f5b23ec3cca | 226,534 |
import re
def is_ext_code(s: str) -> bool:
""" Returns True if `s` appears to be a single extended 256 escape code.
"""
return re.match('^\033\\[((38)|(48));5;\\d{1,3}m$', s) is not None | 8010b9400dafd60003ec59c8d269ba76bace0350 | 400,030 |
def composekey(*keys):
"""Compose a sequence of keys into one key.
Example: composekey("attr1.attr2", "attr3") == "attr1.attr2.attr3"
"""
keys = [key.split() for key in keys]
composites = [[]]
for alternatives in keys:
composites = [com + [alt] for alt in alternatives for com in composites]
return " ... | 4695523c2ea670c929389db29ce9e7c3e962b3b2 | 198,972 |
def extract_dict(path):
""" Read dictionary from file """
with open(path, 'rb') as f:
dict_bytes = f.read(4096)
return dict_bytes | 17995949d50e974edef96c3a393570d5d34e98e2 | 348,101 |
def mod_div(n, d, m):
"""Returns (n/d) mod m. Works because the modular multiplicative
inverse of d is equal to d^(m-2) mod m as long as m is prime."""
inverse = pow(d, m-2, m)
return (n*inverse) % m | 68054b52a5884adbc85073e60e69f7739aa6a164 | 377,894 |
import re
def Char_Tokenizer(sentence, boundary_chars, tokenized_chars):
"""
Separates boundary_chars from the boundary of a word
and tokenized_chars from any part of the string
"""
tok_sentence = sentence
# separates boundary_chars when they're found at word boundary
for curr_char in boundary_chars.split()... | e2798047fdf92cf1548a78027e6eebc4eb84b689 | 180,873 |
def locations_of_substring(string, substring):
"""Return a list of locations of a substring."""
substring_length = len(substring)
def recurse(locations_found, start):
location = string.find(substring, start)
if location != -1:
return recurse(locations_found + [location], loc... | e276de58aceb3a84f19773d4ec0b37be948ee5c3 | 604,304 |
def _pearson_corr(mat_X, mat_Y):
"""Pearson's correlation between every columns in mat_X and mat_Y
Args
----
mat_X (N,M1): np.ndarray
mat_Y (N,M2): np.ndarray
Returns
-------
mat_corr: (M1,M2): np.ndarray
Correlation matrix
"""
# Reshap... | 41249c15c213a73e6228875661088b0803e1d1b8 | 396,614 |
import re
import inspect
def extract_code_blocks_from_md(docstr):
"""
Extract code blocks from markdown content.
"""
code_blocks = []
pat = re.compile(r"```i?python[23]?(.*?)```", re.MULTILINE + re.DOTALL)
for cbit in pat.finditer(docstr):
code_blocks.append(inspect.cleandoc(cbit.group... | e6d3d8f5fd85c7f33bbc024f6cde32e1c23e4ea8 | 498,444 |
import json
def format_rpc_response(data, exception=None):
"""
Formats a response from a RPC Manager.
It provides the data and/or a serialized exception so it can be
re-created by the caller.
:param Any data: A JSON Serializable object.
:param Exception exception: An Exception object
:ret... | c900e2512fd486c91789ab4312883061553a2fb1 | 704,379 |
def extract_pose_sequence(pose_results, frame_idx, causal, seq_len, step=1):
"""Extract the target frame from 2D pose results, and pad the sequence to a
fixed length.
Args:
pose_results (List[List[Dict]]): Multi-frame pose detection results
stored in a nested list. Each element of the o... | 429f0296630897f3eff227bb661dbaa572bd7d2b | 492,114 |
from typing import Dict
from typing import Any
def prefix_keys(dict_: Dict[str, Any], *, prefix: str, sep: str = "/") -> Dict[str, Any]:
"""Prepend the prefix to all keys of the dict"""
return {f"{prefix}{sep}{key}": value for key, value in dict_.items()} | a80c8fc1d20222dc60d1be734e242a57d2aa66f9 | 539,504 |
def get_lonlat(iindex, jindex, grd, Cpos='rho'):
"""
lon, lat = get_lonlat(iindex, jindex, grd)
return the longitude (degree east) and latitude (degree north)
for grid point (iindex, jindex)
"""
if Cpos == 'u':
lon = grd.hgrid.lon_u[:,:]
lat = grd.hgrid.lat_u[:,:]
elif Cpos... | f3ae66c0a230d0b33409f98738d2c127c8b46232 | 466,035 |
def enumerate_options(course_list):
"""Given a course_list generates a propt string which enumerats the courses the user may download.
Args:
course_list (list): a list of Course objects
Returns:
str : a string detailing the options a user has (courses to fetch files from, and how to exit)
... | d0cf025f30760f1b16e06c66ffa394d4a876adb6 | 106,528 |
import random
def epsilon_greedy(variant_vals, eps=0.1):
"""Epsilon-greedy algorithm implementation
on Variant model values.
Parameters
----------
variant_vals : list
A list of dictionary mappings of Variant field values for
a given Campaign object. Required ``Variant`` fields ar... | 617f98f05e83b2076ab2b7726e71d22489f8566d | 325,751 |
def get_boundary_from_response(response):
""" Parses the response header and returns the boundary.
:param response: response containing the header that contains the boundary
:return: a binary string of the boundary
"""
# Read only the first value with key 'content-type' (duplicate keys are allowed)... | 66a0112598b2210cca1a2210f6af963dfee641f7 | 5,553 |
def dict_to_json_keys(pydict: dict) -> dict:
"""this converts a dict using the python coding style to a dict
where the keys are in JSON format style
:pydict dict keys are strings in python style format
:returns dict
"""
d = {}
for key in pydict.keys():
new_key = key.title().replace('... | cf13453d16a3e90bbd974d7c2352221165765f5c | 616,820 |
from typing import List
def construct_index_name(table: str, columns: List[str]) -> str:
"""
Constructs the name of an index.
"""
return f"{table}_" + "_".join(columns) | 95186af705e8224c8fcef289af42742984a46d33 | 279,576 |
def get_task_runs(task, task_runs_object):
"""
Return a list of all associated task runs for a task
"""
output_list = []
task_id = task['id']
for tr in task_runs_object:
if task_id == tr['task_id']:
output_list.append(tr)
return output_list | ca3dd0b660b1c2c9c06dd487f7015b2a4e19c57b | 538,491 |
def BUTIA_COUNT(parent, r):
"""Get the number of boards connected"""
return parent.robot.getButiaCount() | 6dde132cdf7609a3cb3a157242b30b9ec851ce75 | 532,872 |
def any_in(a, b):
"""Checks if 'a in b' is true for any element of a."""
return any(x in b for x in a) | bf74a6b0b4e23f280ce4622ba94e2b006f5196cc | 120,534 |
def get_local_as(node, vrf="default"):
"""Discover the local AS of the node."""
cmd = "show bgp instance"
instance_data = node.enable([cmd])[0]["result"]["vrfs"][vrf]
return f"AS{instance_data['localAs']}" | c2555dba93f8e4608665a3aa004928c17b30cd36 | 129,883 |
from typing import List
def _onemax(x: List[int]) -> float:
"""onemax(x) is the most classical case of discrete functions, adapted to minimization.
It is originally designed for lists of bits. It just counts the number of 1,
and returns len(x) - number of ones..
It also works in the continuous case ... | 48b3b1faa7ffbc75ded8fcf14aad11ff618a5d4a | 627,263 |
from typing import Dict
from typing import Any
def _get_max_attempts(config: Dict[str, Any]) -> int:
"""Retrieves the max attempts from the config.
Args:
config: a dictionary with the platform configuration.
Returns:
The number of attempts.
"""
return config['queue_config']['retry_config']['max_at... | ccfb2b9528f18d8d59d10b72876b6328a51de63d | 392,410 |
def _strip_frbr_section_from_pnx(pnx_xml):
""" We must strip the frbr section from a PNX record before re-inserting it """
for frbr_section in pnx_xml.findall('frbr'):
pnx_xml.remove(frbr_section)
return(pnx_xml) | bcc3f45a0cf8f787602c9b3d861ad8c69d2c198d | 630,132 |
from pathlib import Path
import dill
import time
import traceback
def load_session(path, use_backup=True):
"""
Loads a session
:param path: The session folder (labelled .ses) containng the session.dill and session.back file
:param use_backup: If True, will attempt to load backup if main fails
:ret... | ad285cb3d377f5de54d6813885c90bc07902835f | 269,530 |
import requests
def api_get_course(srcdb, crn, code):
"""
Given a CU Boulder course code and CRN, return the API response
for details on that course (including its sections).
"""
url = "https://classes.colorado.edu/api/?page=fose&route=details"
data = {
"group": "code:{}".format(code),... | e8260aefaa5e4f2dd4a919bbc3af6cdce675f8fb | 600,432 |
def make_string_literal(string):
"""Make python string literal out of a given string."""
return "'{}'".format(string.replace("'", "\\'")) | 6bf9694f8a2d2a872ea48f862898715dbf7d1d49 | 525,354 |
def gather_lists(list_):
"""
Concatenate all the sublists of L and return the result.
@param list[list[object]] list_: list of lists to concatenate
@rtype: list[object]
>>> gather_lists([[1, 2], [3, 4, 5]])
[1, 2, 3, 4, 5]
>>> gather_lists([[6, 7], [8], [9, 10, 11]])
[6, 7, 8, 9, 10, 1... | f1da7a201cade349a331cf38651f0ce40fff9a45 | 526,202 |
def plot_number_of_structures_per_kinase_pdb_pair(structures):
"""
Plot the number of structures that have x structures per kinase-PDB ID pair.
Parameters
----------
structures : pandas.DataFrame
Structures DataFrame from opencadd.databases.klifs module.
Returns
-------
matplot... | 405a8581da0c3d2a8c32133d90a06e2f09692cbd | 192,854 |
def remove_empty_terms(pot):
""" Remove terms from the potential that do not have
a value associated with them
"""
return {k: v for k, v in pot.items() if v is not None} | 1566b00853fcb9662f2b6af3da34f7ca2adc03e4 | 203,984 |
import re
def preprocess(text_string):
"""
Accepts a text string and replaces:
1) urls with URLHERE
2) lots of whitespace with one instance
3) mentions with MENTIONHERE
This allows us to get standardized counts of urls and mentions
Without caring about specific people mentioned
"""
... | 6bc82ab98c194b6c7accc777aaa78d23efc12364 | 680,683 |
def _norm_hsl2rgb(h, s, l):
"""Convert HSL to RGB colours. This function assumes the input has
been sanitised and does no validation on the input parameters.
This calculation has been adapted from Wikipedia:
https://en.wikipedia.org/wiki/HSL_and_HSV#To_RGB
:param h: Hue
:param s: Saturation
... | 1964f3647ee8763508ad39f0c6604694f2486c04 | 183,705 |
def educational_building(data):
"""Extract educational.
:param Dataframe data: Pandas Dataframe of building dataset
:return: DataFrame containing educational buildings
:retype: DataFrame
"""
education = data.loc[["kindergarten", "school", "university"], :]
listOfString_edu = ['educational' ... | 102d9d55326f688ad68b8f31a2cf7398efbeb878 | 407,532 |
def fibUnderN(n):
""" This function returns a list of fibonacci numbers under n
--param
n : integer
--return
list : all fibs under n
"""
fibs = [0,1]
ctr = 2
while (fibs[-1] + fibs[-2]) < n:
fibs.append(fibs[ctr-1] + fibs[ctr-2])
ctr += 1
return fibs.copy() | cf6244c19bdd62381ea9a3004d533a389a3806e9 | 401,758 |
def preconvert_preinstanced_type(value, name, type_):
"""
Converts the given `value` to an acceptable value by the wrapper.
Parameters
----------
value : `Any`
The value to convert.
name : `str`
The name of the value.
type_ : ``PreinstancedBase`` instance
The pre... | 257948a0ebed93dcb8772d74c8378b9f5a42af36 | 81,701 |
def is_anagram(word1, word2):
"""Checks whether two words are anagrams
word1: string or list
word2: string or list
returns: boolean
"""
return sorted(word1) == sorted(word2) | dd1417f0ced25149e029ec87c0064fbaed7156c9 | 654,615 |
def orSearch(inverseIndex, query):
"""
Input: an inverse index, as created by makeInverseIndex, and a list of words to query
Output: the set of document ids that contain _any_ of the specified words
"""
rsetlist = [inverseIndex[word] for word in query if word in inverseIndex ]
results = set.unio... | c0f18e999ab1a75aa5bbb16f2a39dbbbe3943f2a | 554,814 |
def square_area(side):
"""
2. Function with one input and one output
This function demonstrates how a function returns a processed output
based on the received input
This function calculates the area of a square
side: the side of the square, must be a positive number
area: the area of the s... | bec6587fb3de8d638ff3e1de8b3ca40a067923d0 | 484,607 |
import random
def get_label_indices(num_labels, sample_label):
"""
Function to get sample label indices for a given number
of labels and a sampling policy
:param num_labels: int number of labels
:param sample_label: method for sampling the labels
:return: list of labels defined by the sampling... | 267cc559386d94f6e4b0eea1576d70074422704d | 330,159 |
def auc(truth, recommend):
"""Area under the ROC curve (AUC).
Args:
truth (numpy 1d array): Set of truth samples.
recommend (numpy 1d array): Ordered set of recommended samples.
Returns:
float: AUC.
"""
tp = correct = 0.
for r in recommend:
if r in truth:
... | eb2f137927cb733729bf3f896dc4d1f897235c43 | 268,359 |
def _get_aliased_variables( role_name, dimension_variables):
"""
Returns list of variable DTOs for a virtual/aliased Dimension "Role"
Keyword Parameters:
role_name -- String, representing name of the OLAP dimension alias
(Role) for which variables are to be returned.
dimension_variables -- ... | 623a1b105e65d4135895aa7ce2ce810db797e0a7 | 581,509 |
from typing import Counter
def get_mode(counter: Counter) -> int:
"""
Helper function to return the count of the most common
element from an instance of Counter()
:param counter: collections.Counter instance
"""
mode = counter.most_common(1)
if not mode:
return 0
# if mode is n... | 2095db6bae8d8ecb93ce4078f1692ecfe3dd38a4 | 299,793 |
def find_nth(s, x, n):
"""
find the nth occurence in a string
takes string where to search, substring, nth-occurence
"""
i = -1
for _ in range(n):
i = s.find(x, i + len(x))
if i == -1:
break
return i | b54998db817272ec534e022a9f04ec8d350b08fb | 5,859 |
def smallest(upper, lower):
"""Returns smallest of the two values."""
val = min(upper, lower)
return val, val | 83e6bbb7d550bf84dde43e748078dfcfa229fcbe | 237,304 |
def transition_model(corpus, page, damping_factor):
"""
Return a probability distribution over which page to visit next,
given a current page.
With probability `damping_factor`, choose a link at random
linked to by `page`. With probability `1 - damping_factor`, choose
a link at random chosen fr... | 8305a40304e2c9cd62f1dbba5721e2319abcab11 | 286,793 |
def partition_by_adjacent_tiles(tile_ids, dimension=2):
"""
Partition a set of tile ids into sets of adjacent tiles. For example,
if we're requesting a set of four tiles that form a rectangle, then
those four tiles will become one set of adjacent tiles. Non-contiguous
tiles are not grouped together.... | c0c37fd4b15a8d1e661412bf5d4eb9c887ba6f98 | 577,443 |
def subsetindex(full,subset):
"""
Get the indices of the subset of a list.
"""
if isinstance(subset,str):subset=[subset]
idx=[]
for s in subset:
idx += [i for i, x in enumerate(full) if x == s]
return idx | 1d8edf2dc270755bbf831aed1539395623c2acd8 | 92,552 |
def _is_epub(file_bytes: bytes) -> bool:
"""
Decide if a file is an epub file.
From https://github.com/h2non/filetype.py (MIT license)
"""
return (len(file_bytes) > 57 and
file_bytes[0] == 0x50 and file_bytes[1] == 0x4B and
file_bytes[2] == 0x3 and file_bytes[3] == 0x4 and
file_bytes[30] == 0x6D and file_by... | 55a536e71964af4d00789c21b42b81bb02487249 | 289,252 |
import torch
def tensor_linspace(start, end, steps=10):
"""
Vectorized version of torch.linspace.
Inputs:
- start: Tensor of any shape
- end: Tensor of the same shape as start
- steps: Integer
Returns:
- out: Tensor of shape start.size() + (steps,), such that
out.select(-1, 0) ... | 50342f991879036bce0c93c76762b8056d8f0705 | 312,194 |
def replace_correction(series, **kwargs):
"""Corrects replacing values.
Parameters
----------
series: pd.Series
The pandas series
**kwargs:
Arguments as per pandas replace function
Returns
-------
pd.Series
The corrected series.
See Also
--------
Ex... | 107a664411bfa3b04911857baa125878658492d8 | 369,136 |
def no_blanks(string):
"""Removes all the blanks in string
:param string: A string to remove blanks from
:type string: str
:returns the same string with all blank characters removed
"""
return string.replace("\n", "").replace("\t", "").replace(" ", "") | 9de5f0fa6bb81b346ea1deed0477ec8e48338e19 | 419,409 |
def getvalue(s):
"""
getvalue() takes a string like <aaa>bbbbb<cc> and returns bbbbb
"""
p = s.find('>')
s = s[p+1:]
s = s[::-1]
# string reverse
p = s.find('<')
s = s[p+1:]
s = s[::-1]
# string reverse, again
return s | 1f0c03ea4f06b1bd71635d921aeebf7e9c1a1014 | 571,923 |
def split_out_internet_rules(rule_list):
"""Separate rules targeting the Internet versus normal rules"""
normal_rules = filter(lambda x: x.target_zone != 'internet', rule_list)
internet_rules = filter(lambda x: x.target_zone == 'internet', rule_list)
return list(normal_rules), list(internet_rules) | aa838ef7655658b3255c127f392c536bceb5a3bd | 30,661 |
def floatify(scalar):
"""
Useful to make float from strings compatible from fortran
Args:
scalar (str, float): When string representing a float that might be
given in fortran notation, otherwise it might be a floating point
Returns:
float. The value associated to scalar as a floa... | 9df5deaf619fe39cd90fc0f100bf0e588ca4d780 | 66,543 |
import json
def read_file(file_path, is_json=False):
"""
Reads the content of a file.
:param file_path: path of the file
:param is_json: True if it's a json file
:return: file's content
"""
with open(file_path, 'r') as infile:
if is_json:
content = json.load(infile)
... | c01d56b0a63b6f616824ae653182566b0b22eda9 | 125,339 |
def hash_point_pair(p1, p2):
"""Helper function to generate a hash from two time/frequency points."""
return hash((p1[0], p2[0], p2[1]-p2[1])) | 8220fcd48d065707908e7c884d04745c2fc6cdcb | 417,212 |
def asInteger(epsg):
""" convert EPSG code to integer """
return int(epsg) | 18a14944f5f29ec09585757f0edc912b896a12ba | 701,720 |
import torch
def normalized_state_to_tensor(state, building):
"""
Transforms a state dict to a pytorch tensor.
The function ensures the correct ordering of the elements according to the list building.global_state_variables.
It expects a **normalized** state as input.
"""
ten = [[ state[sval] ... | 4aea246f388f941290d2e4aeb6da16f91e210caa | 6,420 |
def guess_age_group(swimmer_age: int) -> tuple[int, int]:
"""Guess the age group from a swimmer's age.
Args:
swimmer_age (int): The swimmer's age.
Returns:
tuple[int, int]: The age group in terms of (age_min, age_max).
"""
if swimmer_age <= 8:
# Probably 8&U
return ... | 8f44ad7217ab2d4d273860e732bc68cb2218838c | 63,593 |
def ACF_brute(df_cov, lag):
""" Brute force calculate the ACF from coverage data
Args:
df_cov: a dataframe of coverage data
lag: the lag for which to calculate the ACF
"""
# Assign columns and index
df_cov.columns = ['chrom', 'pos', 'depth']
df_cov.index = df_cov.... | 9d17f8a6799165e6e63b55361d33db672ff3b205 | 348,731 |
def unionRect(rect1, rect2):
"""Determine union of bounding rectangles.
Args:
rect1: First bounding rectangle, expressed as tuples
``(xMin, yMin, xMax, yMax)``.
rect2: Second bounding rectangle.
Returns:
The smallest rectangle in which both input rectangles are fully
... | 1c89439efb082159400acd25396cbf43d7ea1ddf | 675,530 |
from functools import reduce
def chomp_empty_strings(strings, c, reverse=False):
"""
Given a list of strings, some of which are the empty string "", replace the
empty strings with c and combine them with the closest non-empty string on
the left or "" if it is the first string.
Examples:
for c=... | 918bc2b3972140398eee0b90e250cccf0333c8ce | 214,643 |
def remove_additional_whitespace(tokens):
"""
Removes additional whitespaces
:param tokens: A list of tokens
:return: A comparable list of tokens to the input but with additional whitespaces removed
"""
cleaned_tokens = []
for token in tokens:
token = token.replace(' ', '')
... | 096f0ae0d88d21159d4bc7349fd1b7d8eb5ebfe7 | 72,286 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.