code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def get_hops(self, start, end=None, forward=True): if forward: return list(self._iterbfs(start=start, end=end, forward=True)) else: return list(self._iterbfs(start=start, end=end, forward=False))
Computes the hop distance to all nodes centered around a specified node. First order neighbours are at hop 1, their neigbours are at hop 2 etc. Uses :py:meth:`forw_bfs` or :py:meth:`back_bfs` depending on the value of the forward parameter. If the distance between all neighbouring nodes is 1 the hop number corresponds to the shortest distance between the nodes. :param start: the starting node :param end: ending node (optional). When not specified will search the whole graph. :param forward: directionality parameter (optional). If C{True} (default) it uses L{forw_bfs} otherwise L{back_bfs}. :return: returns a list of tuples where each tuple contains the node and the hop. Typical usage:: >>> print graph.get_hops(1, 8) >>> [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)] # node 1 is at 0 hops # node 2 is at 1 hop # ... # node 8 is at 5 hops
def _include_exclude(file_path, include=None, exclude=None): if exclude is not None and exclude: for pattern in exclude: if file_path.match(pattern): return False if include is not None and include: for pattern in include: if file_path.match(pattern): return True return False return True
Check if file matches one of include filters and not in exclude filter. :param file_path: Path to the file. :param include: Tuple containing patterns to which include from result. :param exclude: Tuple containing patterns to which exclude from result.
def newCDataBlock(self, content, len): ret = libxml2mod.xmlNewCDataBlock(self._o, content, len) if ret is None:raise treeError('xmlNewCDataBlock() failed') __tmp = xmlNode(_obj=ret) return __tmp
Creation of a new node containing a CDATA block.
def base62_encode(cls, num): alphabet = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" if num == 0: return alphabet[0] arr = [] base = len(alphabet) while num: rem = num % base num = num // base arr.append(alphabet[rem]) arr.reverse() return ''.join(arr)
Encode a number in Base X. `num`: The number to encode `alphabet`: The alphabet to use for encoding Stolen from: http://stackoverflow.com/a/1119769/1144479
def image_id_from_k8s(): token_path = "/var/run/secrets/kubernetes.io/serviceaccount/token" if os.path.exists(token_path): k8s_server = "https://{}:{}/api/v1/namespaces/default/pods/{}".format( os.getenv("KUBERNETES_SERVICE_HOST"), os.getenv( "KUBERNETES_PORT_443_TCP_PORT"), os.getenv("HOSTNAME") ) try: res = requests.get(k8s_server, verify="/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", timeout=3, headers={"Authorization": "Bearer {}".format(open(token_path).read())}) res.raise_for_status() except requests.RequestException: return None try: return res.json()["status"]["containerStatuses"][0]["imageID"].strip("docker-pullable://") except (ValueError, KeyError, IndexError): logger.exception("Error checking kubernetes for image id") return None
Pings the k8s metadata service for the image id
def nocomment(astr, com='!'): alist = astr.splitlines() for i in range(len(alist)): element = alist[i] pnt = element.find(com) if pnt != -1: alist[i] = element[:pnt] return '\n'.join(alist)
just like the comment in python. removes any text after the phrase 'com'
def resize_bytes(fobj, old_size, new_size, offset): if new_size < old_size: delete_size = old_size - new_size delete_at = offset + new_size delete_bytes(fobj, delete_size, delete_at) elif new_size > old_size: insert_size = new_size - old_size insert_at = offset + old_size insert_bytes(fobj, insert_size, insert_at)
Resize an area in a file adding and deleting at the end of it. Does nothing if no resizing is needed. Args: fobj (fileobj) old_size (int): The area starting at offset new_size (int): The new size of the area offset (int): The start of the area Raises: IOError
def upload(self, response, file): response = response.json() if not response.get('upload_url'): raise ValueError('Bad API response. No upload_url.') if not response.get('upload_params'): raise ValueError('Bad API response. No upload_params.') kwargs = response.get('upload_params') response = self._requester.request( 'POST', use_auth=False, _url=response.get('upload_url'), file=file, _kwargs=combine_kwargs(**kwargs) ) response_json = json.loads(response.text.lstrip('while(1);')) return ('url' in response_json, response_json)
Upload the file. :param response: The response from the upload request. :type response: dict :param file: A file handler pointing to the file to upload. :returns: True if the file uploaded successfully, False otherwise, \ and the JSON response from the API. :rtype: tuple
def sub(self, key): subv = Vyper() data = self.get(key) if isinstance(data, dict): subv._config = data return subv else: return None
Returns new Vyper instance representing a sub tree of this instance.
def _read_opt_lio(self, code, *, desc): _type = self._read_opt_type(code) _size = self._read_unpack(1) _llen = self._read_unpack(1) _line = self._read_fileng(_llen) opt = dict( desc=desc, type=_type, length=_size + 2, lid_len=_llen, lid=_line, ) _plen = _size - _llen if _plen: self._read_fileng(_plen) return opt
Read HOPOPT Line-Identification option. Structure of HOPOPT Line-Identification option [RFC 6788]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Option Type | Option Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | LineIDLen | Line ID... +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 hopopt.lio.type Option Type 0 0 hopopt.lio.type.value Option Number 0 0 hopopt.lio.type.action Action (10) 0 2 hopopt.lio.type.change Change Flag (0) 1 8 hopopt.lio.length Length of Option Data 2 16 hopopt.lio.lid_len Line ID Length 3 24 hopopt.lio.lid Line ID
def without_edge(self, edge: Edge) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]': return BipartiteGraph((e2, v) for e2, v in self._edges.items() if edge != e2)
Returns a copy of this bipartite graph with the given edge removed.
def setValues(self, rows, *values): 'Set our column value for given list of rows to `value`.' for r, v in zip(rows, itertools.cycle(values)): self.setValueSafe(r, v) self.recalc() return status('set %d cells to %d values' % (len(rows), len(values)))
Set our column value for given list of rows to `value`.
def lookup_prefix(self, prefix, timestamp=timestamp_now): prefix = prefix.strip().upper() if self._lookuptype == "clublogxml" or self._lookuptype == "countryfile": return self._check_data_for_date(prefix, timestamp, self._prefixes, self._prefixes_index) elif self._lookuptype == "redis": data_dict, index = self._get_dicts_from_redis("_prefix_", "_prefix_index_", self._redis_prefix, prefix) return self._check_data_for_date(prefix, timestamp, data_dict, index) raise KeyError
Returns lookup data of a Prefix Args: prefix (string): Prefix of a Amateur Radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: dict: Dictionary containing the country specific data of the Prefix Raises: KeyError: No matching Prefix found APIKeyMissingError: API Key for Clublog missing or incorrect Example: The following code shows how to obtain the information for the prefix "DH" from the countryfile.com database (default database). >>> from pyhamtools import LookupLib >>> myLookupLib = LookupLib() >>> print myLookupLib.lookup_prefix("DH") { 'adif': 230, 'country': u'Fed. Rep. of Germany', 'longitude': 10.0, 'cqz': 14, 'ituz': 28, 'latitude': 51.0, 'continent': u'EU' } Note: This method is available for - clublogxml - countryfile - redis
def query_target(target_chembl_id): query_dict = {'query': 'target', 'params': {'target_chembl_id': target_chembl_id, 'limit': 1}} res = send_query(query_dict) target = res['targets'][0] return target
Query ChEMBL API target by id Parameters ---------- target_chembl_id : str Returns ------- target : dict dict parsed from json that is unique for the target
def sadd(self, key, *values): if len(values) == 0: raise ResponseError("wrong number of arguments for 'sadd' command") redis_set = self._get_set(key, 'SADD', create=True) before_count = len(redis_set) redis_set.update(map(self._encode, values)) after_count = len(redis_set) return after_count - before_count
Emulate sadd.
def visualize_model(X, y, estimator, path, **kwargs): y = LabelEncoder().fit_transform(y) model = Pipeline([ ('one_hot_encoder', OneHotEncoder()), ('estimator', estimator) ]) _, ax = plt.subplots() visualizer = ClassificationReport( model, classes=['edible', 'poisonous'], cmap="YlGn", size=(600, 360), ax=ax, **kwargs ) visualizer.fit(X, y) visualizer.score(X, y) visualizer.poof(outpath=path)
Test various estimators.
def add(self, data_source, module, package=None): super(Data, self).add(data_source, module, package) if data_source not in self.layer: self.layer[data_source] = {'module': module, 'package': package} self.objects[data_source] = None
Add data_source to model. Tries to import module, then looks for data source class definition. :param data_source: Name of data source to add. :type data_source: str :param module: Module in which data source resides. Can be absolute or relative. See :func:`importlib.import_module` :type module: str :param package: Optional, but must be used if module is relative. :type package: str .. seealso:: :func:`importlib.import_module`
def validate_signature(self, filename): if not GPG_PRESENT: return False sigfilename = filename + '.sig' try: with open(sigfilename): pass except IOError: return False return verify(sigfilename, filename)
Returns True if a valid signature is present for filename
def get_parent_path(index=2): try: path = _caller_path(index) except RuntimeError: path = os.getcwd() path = os.path.abspath(os.path.join(path, os.pardir)) return path
Get the caller's parent path to sys.path If the caller is a CLI through stdin, the parent of the current working directory is used
def _assertIndex(self, index): if type(index) is not int: raise TypeError('list indices must be integers') if index < 0 or index >= self.nelems: raise IndexError('list index out of range')
Raise TypeError or IndexError if index is not an integer or out of range for the number of elements in this array, respectively.
def get_pattern_additional_cycles(self, patternnumber): _checkPatternNumber(patternnumber) address = _calculateRegisterAddress('cycles', patternnumber) return self.read_register(address)
Get the number of additional cycles for a given pattern. Args: patternnumber (integer): 0-7 Returns: The number of additional cycles (int).
def cardinal(self, to): return sum(1 for _ in filter( lambda d: not d.external and d.target in to, self.dependencies))
Return the number of dependencies of this module to the given node. Args: to (Package/Module): the target node. Returns: int: number of dependencies.
def _restore_group(self, group_id): meta = self.TaskSetModel._default_manager.restore_taskset(group_id) if meta: return meta.to_dict()
Get group metadata for a group by id.
def get_all_hosted_routers(self, context): cctxt = self.client.prepare() return cctxt.call(context, 'cfg_sync_all_hosted_routers', host=self.host)
Make a remote process call to retrieve the sync data for routers that have been scheduled to a hosting device. :param context: session context
def _db(self): if not hasattr(self, "_db_client") or getattr(self, "_db_client") is None: self._db_client = get_db_client() return self._db_client
Database client for accessing storage. :returns: :class:`livebridge.storages.base.BaseStorage`
def _check_available(name): _status = _systemctl_status(name) sd_version = salt.utils.systemd.version(__context__) if sd_version is not None and sd_version >= 231: return 0 <= _status['retcode'] < 4 out = _status['stdout'].lower() if 'could not be found' in out: return False for line in salt.utils.itertools.split(out, '\n'): match = re.match(r'\s+loaded:\s+(\S+)', line) if match: ret = match.group(1) != 'not-found' break else: raise CommandExecutionError( 'Failed to get information on unit \'%s\'' % name ) return ret
Returns boolean telling whether or not the named service is available
def get_help_text(self): txt = str('\n') for name, info in self.commands.items(): command_txt = "\t{0: <22} {1}\n".format(name, info['description']) if info['attributes']: command_txt = ''.join([command_txt, "\t Attributes:\n"]) for attrname, attrdesc in info['attributes'].items(): attr_txt = "\t {0: <22} {1}\n".format(attrname, attrdesc) command_txt = ''.join([command_txt, attr_txt]) if info['elements']: command_txt = ''.join([command_txt, "\t Elements:\n", self.elements_as_text(info['elements'])]) txt = ''.join([txt, command_txt]) return txt
Returns the help output in plain text format.
def MetatagDistinctValuesGet(self, metatag_name, namespace = None): ns = "default" if namespace is None else namespace if self.__SenseApiCall__("/metatag_name/{0}/distinct_values.json", "GET", parameters = {'namespace': ns}): return True else: self.__error__ = "api call unsuccessful" return False
Find the distinct value of a metatag name in a certain namespace @param metatag_name (string) - Name of the metatag for which to find the distinct values @param namespace (stirng) - Namespace in which to find the distinct values @return (bool) - Boolean indicating whether MetatagDistinctValuesGet was successful
def check_platforms(platforms): if len(platforms) > 0: return all(platform in PLATFORM_IDS for platform in platforms) return True
Checks if the platforms have a valid platform code
def encipher(self,string): string = self.remove_punctuation(string) ret = '' for (i,c) in enumerate(string): if i<len(self.key): offset = self.a2i(self.key[i]) else: offset = self.a2i(string[i-len(self.key)]) ret += self.i2a(self.a2i(c)+offset) return ret
Encipher string using Autokey cipher according to initialised key. Punctuation and whitespace are removed from the input. Example:: ciphertext = Autokey('HELLO').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string.
def get_requested_quarter_data(self, zero_qtr_data, zeroth_quarter_idx, stacked_last_per_qtr, num_announcements, dates): zero_qtr_data_idx = zero_qtr_data.index requested_qtr_idx = pd.MultiIndex.from_arrays( [ zero_qtr_data_idx.get_level_values(0), zero_qtr_data_idx.get_level_values(1), self.get_shifted_qtrs( zeroth_quarter_idx.get_level_values( NORMALIZED_QUARTERS, ), num_announcements, ), ], names=[ zero_qtr_data_idx.names[0], zero_qtr_data_idx.names[1], SHIFTED_NORMALIZED_QTRS, ], ) requested_qtr_data = stacked_last_per_qtr.loc[requested_qtr_idx] requested_qtr_data = requested_qtr_data.reset_index( SHIFTED_NORMALIZED_QTRS, ) (requested_qtr_data[FISCAL_YEAR_FIELD_NAME], requested_qtr_data[FISCAL_QUARTER_FIELD_NAME]) = \ split_normalized_quarters( requested_qtr_data[SHIFTED_NORMALIZED_QTRS] ) return requested_qtr_data.unstack(SID_FIELD_NAME).reindex(dates)
Selects the requested data for each date. Parameters ---------- zero_qtr_data : pd.DataFrame The 'time zero' data for each calendar date per sid. zeroth_quarter_idx : pd.Index An index of calendar dates, sid, and normalized quarters, for only the rows that have a next or previous earnings estimate. stacked_last_per_qtr : pd.DataFrame The latest estimate known with the dates, normalized quarter, and sid as the index. num_announcements : int The number of annoucements out the user requested relative to each date in the calendar dates. dates : pd.DatetimeIndex The calendar dates for which estimates data is requested. Returns -------- requested_qtr_data : pd.DataFrame The DataFrame with the latest values for the requested quarter for all columns; `dates` are the index and columns are a MultiIndex with sids at the top level and the dataset columns on the bottom.
def parse_startup_message(self): return parse_map(lambda args: OmapiStartupMessage(*args), parse_chain(self.parse_net32int, lambda _: self.parse_net32int()))
results in an OmapiStartupMessage >>> d = b"\\0\\0\\0\\x64\\0\\0\\0\\x18" >>> next(InBuffer(d).parse_startup_message()).validate()
def cal_frame_according_boundaries(left, right, top, bottom, parent_size, gaphas_editor=True, group=True): margin = cal_margin(parent_size) if group: rel_pos = max(left - margin, 0), max(top - margin, 0) size = (min(right - left + 2 * margin, parent_size[0] - rel_pos[0]), min(bottom - top + 2 * margin, parent_size[1] - rel_pos[1])) else: rel_pos = left, top size = right - left, bottom - top return margin, rel_pos, size
Generate margin and relative position and size handed boundary parameter and parent size
def create_permissions_from_tuples(model, codename_tpls): if codename_tpls: model_cls = django_apps.get_model(model) content_type = ContentType.objects.get_for_model(model_cls) for codename_tpl in codename_tpls: app_label, codename, name = get_from_codename_tuple( codename_tpl, model_cls._meta.app_label ) try: Permission.objects.get(codename=codename, content_type=content_type) except ObjectDoesNotExist: Permission.objects.create( name=name, codename=codename, content_type=content_type ) verify_codename_exists(f"{app_label}.{codename}")
Creates custom permissions on model "model".
async def apply_command(self, cmd): if cmd: if cmd.prehook: await cmd.prehook(ui=self, dbm=self.dbman, cmd=cmd) try: if asyncio.iscoroutinefunction(cmd.apply): await cmd.apply(self) else: cmd.apply(self) except Exception as e: self._error_handler(e) else: if cmd.posthook: logging.info('calling post-hook') await cmd.posthook(ui=self, dbm=self.dbman, cmd=cmd)
applies a command This calls the pre and post hooks attached to the command, as well as :meth:`cmd.apply`. :param cmd: an applicable command :type cmd: :class:`~alot.commands.Command`
def add_record(post_id, catalog_id, order=0): rec = MPost2Catalog.__get_by_info(post_id, catalog_id) if rec: entry = TabPost2Tag.update( order=order, par_id=rec.tag_id[:2] + '00', ).where(TabPost2Tag.uid == rec.uid) entry.execute() else: TabPost2Tag.create( uid=tools.get_uuid(), par_id=catalog_id[:2] + '00', post_id=post_id, tag_id=catalog_id, order=order, ) MCategory.update_count(catalog_id)
Create the record of post 2 tag, and update the count in g_tag.
def register_piece(self, from_address, to_address, hash, password, min_confirmations=6, sync=False, ownership=True): file_hash, file_hash_metadata = hash path, from_address = from_address verb = Spoolverb() unsigned_tx = self.simple_spool_transaction(from_address, [file_hash, file_hash_metadata, to_address], op_return=verb.piece, min_confirmations=min_confirmations) signed_tx = self._t.sign_transaction(unsigned_tx, password) txid = self._t.push(signed_tx) return txid
Register a piece Args: from_address (Tuple[str]): Federation address. All register transactions originate from the the Federation wallet to_address (str): Address registering the edition hash (Tuple[str]): Hash of the piece. (file_hash, file_hash_metadata) password (str): Federation wallet password. For signing the transaction edition_num (int): The number of the edition to register. User edition_num=0 to register the master edition min_confirmations (int): Override the number of confirmations when chosing the inputs of the transaction. Defaults to 6 sync (bool): Perform the transaction in synchronous mode, the call to the function will block until there is at least on confirmation on the blockchain. Defaults to False ownership (bool): Check ownsership in the blockchain before pushing the transaction. Defaults to True Returns: str: transaction id
def debug(*args): for i in args: click.echo('D:%s' % str(i), err=True)
Send debug messages to the Maltego console.
def amount(self, amount): if amount is None: raise ValueError("Invalid value for `amount`, must not be `None`") if amount < 0: raise ValueError("Invalid value for `amount`, must be a value greater than or equal to `0`") self._amount = amount
Sets the amount of this Money. The amount of money, in the smallest denomination of the currency indicated by `currency`. For example, when `currency` is `USD`, `amount` is in cents. :param amount: The amount of this Money. :type: int
def item_options(self, **kwargs): actions = self._item_actions.copy() if self._resource.is_singular: actions['create'] = ('POST',) methods = self._get_handled_methods(actions) return self._set_options_headers(methods)
Handle collection OPTIONS request. Singular route requests are handled a bit differently because singular views may handle POST requests despite being registered as item routes.
def prepare(cls): if cls._ask_openapi(): napp_path = Path() tpl_path = SKEL_PATH / 'napp-structure/username/napp' OpenAPI(napp_path, tpl_path).render_template() print('Please, update your openapi.yml file.') sys.exit()
Prepare NApp to be uploaded by creating openAPI skeleton.
def get_diff_endpoints_from_commit_range(repo, commit_range): if not commit_range: raise ValueError('commit_range cannot be empty') result = re_find(COMMIT_RANGE_REGEX, commit_range) if not result: raise ValueError( 'Expected diff str of the form \'a..b\' or \'a...b\' (got {})' .format(commit_range)) a, b = result['a'], result['b'] a, b = repo.rev_parse(a), repo.rev_parse(b) if result['thirddot']: a = one_or_raise(repo.merge_base(a, b)) return a, b
Get endpoints of a diff given a commit range The resulting endpoints can be diffed directly:: a, b = get_diff_endpoints_from_commit_range(repo, commit_range) a.diff(b) For details on specifying git diffs, see ``git diff --help``. For details on specifying revisions, see ``git help revisions``. Args: repo (git.Repo): Repo object initialized with project root commit_range (str): commit range as would be interpreted by ``git diff`` command. Unfortunately only patterns of the form ``a..b`` and ``a...b`` are accepted. Note that the latter pattern finds the merge-base of a and b and uses it as the starting point for the diff. Returns: Tuple[git.Commit, git.Commit]: starting commit, ending commit ( inclusive) Raises: ValueError: commit_range is empty or ill-formed See also: <https://stackoverflow.com/q/7251477>
def authenticate(self, user, password): assert user['password_hash'] == '_'.join((password, 'hash')) self.logger.debug('User %s has been successfully authenticated', user['uid'])
Authenticate user.
def add_arguments(self, parser): subparsers = parser.add_subparsers(help='sub-command help', dest='command') add_parser = partial(_add_subparser, subparsers, parser) add_parser('list', help="list concurrency triggers") add_parser('drop', help="drop concurrency triggers") add_parser('create', help="create concurrency triggers") parser.add_argument('-d', '--database', action='store', dest='database', default=None, help='limit to this database') parser.add_argument('-t', '--trigger', action='store', dest='trigger', default=None, help='limit to this trigger name')
Entry point for subclassed commands to add custom arguments.
def toggle_concatenate(self): if not (self.chunk['epoch'].isChecked() and self.lock_to_staging.get_value()): for i,j in zip([self.idx_chan, self.idx_cycle, self.idx_stage, self.idx_evt_type], [self.cat['chan'], self.cat['cycle'], self.cat['stage'], self.cat['evt_type']]): if len(i.selectedItems()) > 1: j.setEnabled(True) else: j.setEnabled(False) j.setChecked(False) if not self.chunk['event'].isChecked(): self.cat['evt_type'].setEnabled(False) if not self.cat['discontinuous'].get_value(): self.cat['chan'].setEnabled(False) self.cat['chan'].setChecked(False) self.update_nseg()
Enable and disable concatenation options.
def convert_machine_list_time_val(text: str) -> datetime.datetime: text = text[:14] if len(text) != 14: raise ValueError('Time value not 14 chars') year = int(text[0:4]) month = int(text[4:6]) day = int(text[6:8]) hour = int(text[8:10]) minute = int(text[10:12]) second = int(text[12:14]) return datetime.datetime(year, month, day, hour, minute, second, tzinfo=datetime.timezone.utc)
Convert RFC 3659 time-val to datetime objects.
def _zoom_rows(self, zoom): self.grid.SetDefaultRowSize(self.grid.std_row_size * zoom, resizeExistingRows=True) self.grid.SetRowLabelSize(self.grid.row_label_size * zoom) for row, tab in self.code_array.row_heights: if tab == self.grid.current_table and \ row < self.grid.code_array.shape[0]: base_row_width = self.code_array.row_heights[(row, tab)] if base_row_width is None: base_row_width = self.grid.GetDefaultRowSize() zoomed_row_size = base_row_width * zoom self.grid.SetRowSize(row, zoomed_row_size)
Zooms grid rows
def bulk_copy(self, ids): schema = DeviceSchema() return self.service.bulk_copy(self.base, self.RESOURCE, ids, schema)
Bulk copy a set of devices. :param ids: Int list of device IDs. :return: :class:`devices.Device <devices.Device>` list
def configure(self, options, conf): self.conf = conf self.when = options.browser_closer_when
Configure plugin. Plugin is enabled by default.
def _make(c): ann = defaultdict(list) for pos in c['ann']: for db in pos: ann[db] += list(pos[db]) logger.debug(ann) valid = [l for l in c['valid']] ann_list = [", ".join(list(set(ann[feature]))) for feature in ann if feature in valid] return valid, ann_list
create html from template, adding figure, annotation and sequences counts
def get_objects(self): return rope.base.oi.soi.get_passed_objects( self.pyfunction, self.index)
Returns the list of objects passed as this parameter
def edit(filename, identifier, data): with open(filename, 'r') as fh: bibtex = bibtexparser.load(fh) bibtex.entries_dict[identifier] = data.entries[0] write(filename, bibtex)
Update an entry in a BibTeX file. :param filename: The name of the BibTeX file to edit. :param identifier: The id of the entry to update, in the BibTeX file. :param data: A dict associating fields and updated values. Fields present \ in the BibTeX file but not in this dict will be kept as is.
def pretty_print(self, indent=0): s = tab = ' '*indent s += '%s: ' %self.tag if isinstance(self.value, basestring): s += self.value else: s += '\n' for e in self.value: s += e.pretty_print(indent+4) s += '\n' return s
Print the document without tags using indentation
def basic_auth(self, username, password): if not (username and password): return self.auth = (username, password) self.headers.pop('Authorization', None)
Set the Basic Auth credentials on this Session. :param str username: Your GitHub username :param str password: Your GitHub password
def searchEnterpriseGroups(self, searchFilter="", maxCount=100): params = { "f" : "json", "filter" : searchFilter, "maxCount" : maxCount } url = self._url + "/groups/searchEnterpriseGroups" return self._post(url=url, param_dict=params, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
This operation searches groups in the configured enterprise group store. You can narrow down the search using the search filter parameter. Parameters: searchFilter - text value to narrow the search down maxCount - maximum number of records to return
def validation_statuses(self, area_uuid): path = "/area/{uuid}/validations".format(uuid=area_uuid) result = self._make_request('get', path) return result.json()
Get count of validation statuses for all files in upload_area :param str area_uuid: A RFC4122-compliant ID for the upload area :return: a dict with key for each state and value being the count of files in that state :rtype: dict :raises UploadApiException: if information could not be obtained
def get_callproc_signature(self, name, param_types): if isinstance(param_types[0], (list, tuple)): params = [self.sql_writer.to_placeholder(*pt) for pt in param_types] else: params = [self.sql_writer.to_placeholder(None, pt) for pt in param_types] return name + self.sql_writer.to_tuple(params)
Returns a procedure's signature from the name and list of types. :name: the name of the procedure :params: can be either strings, or 2-tuples. 2-tuples must be of the form (name, db_type). :return: the procedure's signature
def diff(x, lag=1, differences=1): if any(v < 1 for v in (lag, differences)): raise ValueError('lag and differences must be positive (> 0) integers') x = check_array(x, ensure_2d=False, dtype=np.float32) fun = _diff_vector if x.ndim == 1 else _diff_matrix res = x for i in range(differences): res = fun(res, lag) if not res.shape[0]: return res return res
Difference an array. A python implementation of the R ``diff`` function [1]. This computes lag differences from an array given a ``lag`` and ``differencing`` term. If ``x`` is a vector of length :math:`n`, ``lag=1`` and ``differences=1``, then the computed result is equal to the successive differences ``x[lag:n] - x[:n-lag]``. Examples -------- Where ``lag=1`` and ``differences=1``: >>> x = c(10, 4, 2, 9, 34) >>> diff(x, 1, 1) array([ -6., -2., 7., 25.], dtype=float32) Where ``lag=1`` and ``differences=2``: >>> x = c(10, 4, 2, 9, 34) >>> diff(x, 1, 2) array([ 4., 9., 18.], dtype=float32) Where ``lag=3`` and ``differences=1``: >>> x = c(10, 4, 2, 9, 34) >>> diff(x, 3, 1) array([ -1., 30.], dtype=float32) Where ``lag=6`` (larger than the array is) and ``differences=1``: >>> x = c(10, 4, 2, 9, 34) >>> diff(x, 6, 1) array([], dtype=float32) For a 2d array with ``lag=1`` and ``differences=1``: >>> import numpy as np >>> >>> x = np.arange(1, 10).reshape((3, 3)).T >>> diff(x, 1, 1) array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) Parameters ---------- x : array-like, shape=(n_samples, [n_features]) The array to difference. lag : int, optional (default=1) An integer > 0 indicating which lag to use. differences : int, optional (default=1) An integer > 0 indicating the order of the difference. Returns ------- res : np.ndarray, shape=(n_samples, [n_features]) The result of the differenced arrays. References ---------- .. [1] https://stat.ethz.ch/R-manual/R-devel/library/base/html/diff.html
def parse_macro_params(token): try: bits = token.split_contents() tag_name, macro_name, values = bits[0], bits[1], bits[2:] except IndexError: raise template.TemplateSyntaxError( "{0} tag requires at least one argument (macro name)".format( token.contents.split()[0])) args = [] kwargs = {} kwarg_regex = ( r'^([A-Za-z_][\w_]*)=(".*"|{0}.*{0}|[A-Za-z_][\w_]*)$'.format( "'")) arg_regex = r'^([A-Za-z_][\w_]*|".*"|{0}.*{0}|(\d+))$'.format( "'") for value in values: kwarg_match = regex_match( kwarg_regex, value) if kwarg_match: kwargs[kwarg_match.groups()[0]] = template.Variable( kwarg_match.groups()[1]) else: arg_match = regex_match( arg_regex, value) if arg_match: args.append(template.Variable(arg_match.groups()[0])) else: raise template.TemplateSyntaxError( "Malformed arguments to the {0} tag.".format( tag_name)) return tag_name, macro_name, args, kwargs
Common parsing logic for both use_macro and macro_block
def get_local_user(): import getpass username = None try: username = getpass.getuser() except KeyError: pass except ImportError: if win32: import win32api import win32security import win32profile username = win32api.GetUserName() return username
Return the local executing username, or ``None`` if one can't be found. .. versionadded:: 2.0
def visit_ImportFrom(self, node): if node.level: raise PythranSyntaxError("Relative import not supported", node) if not node.module: raise PythranSyntaxError("import from without module", node) module = node.module current_module = MODULES for path in module.split('.'): if path not in current_module: raise PythranSyntaxError( "Module '{0}' unknown.".format(module), node) else: current_module = current_module[path] for alias in node.names: if alias.name == '*': continue elif alias.name not in current_module: raise PythranSyntaxError( "identifier '{0}' not found in module '{1}'".format( alias.name, module), node)
Check validity of imported functions. Check: - no level specific value are provided. - a module is provided - module/submodule exists in MODULES - imported function exists in the given module/submodule
def _gen_keys_from_multicol_key(key_multicol, n_keys): keys = [('{}{:03}of{:03}') .format(key_multicol, i+1, n_keys) for i in range(n_keys)] return keys
Generates single-column keys from multicolumn key.
def _set_categories(self, categories, fastpath=False): if fastpath: new_dtype = CategoricalDtype._from_fastpath(categories, self.ordered) else: new_dtype = CategoricalDtype(categories, ordered=self.ordered) if (not fastpath and self.dtype.categories is not None and len(new_dtype.categories) != len(self.dtype.categories)): raise ValueError("new categories need to have the same number of " "items than the old categories!") self._dtype = new_dtype
Sets new categories inplace Parameters ---------- fastpath : bool, default False Don't perform validation of the categories for uniqueness or nulls Examples -------- >>> c = pd.Categorical(['a', 'b']) >>> c [a, b] Categories (2, object): [a, b] >>> c._set_categories(pd.Index(['a', 'c'])) >>> c [a, c] Categories (2, object): [a, c]
def current_time(self) -> datetime: _date = datetime.strptime(self.obj.SBRes.SBReq.StartT.get("date"), "%Y%m%d") _time = datetime.strptime(self.obj.SBRes.SBReq.StartT.get("time"), "%H:%M") return datetime.combine(_date.date(), _time.time())
Extract current time.
def pdf_doc_info(instance): for key, obj in instance['objects'].items(): if ('type' in obj and obj['type'] == 'file'): try: did = obj['extensions']['pdf-ext']['document_info_dict'] except KeyError: continue for elem in did: if elem not in enums.PDF_DID: yield JSONError("The 'document_info_dict' property of " "object '%s' contains a key ('%s') that is" " not a valid PDF Document Information " "Dictionary key." % (key, elem), instance['id'], 'pdf-doc-info')
Ensure the keys of the 'document_info_dict' property of the pdf-ext extension of file objects are only valid PDF Document Information Dictionary Keys.
def update_source(self, **kwargs): callback = kwargs.pop('callback', self._callback) ip_addr = ip_interface(unicode(kwargs.pop('neighbor'))) config = self._update_source_xml(neighbor=ip_addr, int_type=kwargs.pop('int_type'), int_name=kwargs.pop('int_name'), rbridge_id=kwargs.pop('rbridge_id', '1'), vrf=kwargs.pop('vrf', 'default')) if kwargs.pop('get', False): return callback(config, handler='get_config') if kwargs.pop('delete', False): config.find('.//*update-source').set('operation', 'delete') return callback(config)
Set BGP update source property for a neighbor. This method currently only supports loopback interfaces. Args: vrf (str): The VRF for this BGP process. rbridge_id (str): The rbridge ID of the device on which BGP will be configured in a VCS fabric. neighbor (str): Address family to configure. (ipv4, ipv6) int_type (str): Interface type (loopback) int_name (str): Interface identifier (1, 5, 7, etc) get (bool): Get config instead of editing config. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: ``AttributeError``: When `neighbor` is not a valid IPv4 or IPv6 address. ``KeyError``: When `int_type` or `int_name` are not specified. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.230'] >>> for switch in switches: ... conn = (switch, '22') ... auth = ('admin', 'password') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... dev.interface.ip_address(int_type='loopback', name='6', ... rbridge_id='225', ip_addr='6.6.6.6/32') ... dev.interface.ip_address(int_type='loopback', name='6', ... ip_addr='0:0:0:0:0:ffff:606:606/128', rbridge_id='225') ... dev.bgp.local_asn(local_as='65535', rbridge_id='225') ... dev.bgp.neighbor(ip_addr='10.10.10.10', ... remote_as='65535', rbridge_id='225') ... dev.bgp.neighbor(remote_as='65535', rbridge_id='225', ... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:1') ... dev.bgp.update_source(neighbor='10.10.10.10', ... rbridge_id='225', int_type='loopback', int_name='6') ... dev.bgp.update_source(get=True, neighbor='10.10.10.10', ... rbridge_id='225', int_type='loopback', int_name='6') ... dev.bgp.update_source(rbridge_id='225', int_name='6', ... neighbor='2001:4818:f000:1ab:cafe:beef:1000:1', ... int_type='loopback') ... dev.bgp.update_source(get=True, rbridge_id='225', ... neighbor='2001:4818:f000:1ab:cafe:beef:1000:1', ... int_type='loopback', int_name='6') ... dev.bgp.update_source(neighbor='10.10.10.10', ... rbridge_id='225', delete=True, int_type='loopback', ... int_name='6') ... dev.bgp.update_source(delete=True, int_type='loopback', ... rbridge_id='225', int_name='6', ... neighbor='2001:4818:f000:1ab:cafe:beef:1000:1') ... dev.bgp.neighbor(ip_addr='10.10.10.10', delete=True, ... rbridge_id='225') ... dev.bgp.neighbor(delete=True, rbridge_id='225', ... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:1') ... dev.interface.ip_address(int_type='loopback', name='6', ... rbridge_id='225', ip_addr='6.6.6.6/32', delete=True) ... dev.interface.ip_address(int_type='loopback', name='6', ... ip_addr='0:0:0:0:0:ffff:606:606/128', rbridge_id='225', ... delete=True) ... output = dev.bgp.update_source(rbridge_id='225', ... int_type='loopback') ... # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): NotImplementedError KeyError
def set_wsgi_params(self, module=None, callable_name=None, env_strategy=None): module = module or '' if '/' in module: self._set('wsgi-file', module, condition=module) else: self._set('wsgi', module, condition=module) self._set('callable', callable_name) self._set('wsgi-env-behaviour', env_strategy) return self._section
Set wsgi related parameters. :param str|unicode module: * load .wsgi file as the Python application * load a WSGI module as the application. .. note:: The module (sans ``.py``) must be importable, ie. be in ``PYTHONPATH``. Examples: * mypackage.my_wsgi_module -- read from `application` attr of mypackage/my_wsgi_module.py * mypackage.my_wsgi_module:my_app -- read from `my_app` attr of mypackage/my_wsgi_module.py :param str|unicode callable_name: Set WSGI callable name. Default: application. :param str|unicode env_strategy: Strategy for allocating/deallocating the WSGI env, can be: * ``cheat`` - preallocates the env dictionary on uWSGI startup and clears it after each request. Default behaviour for uWSGI <= 2.0.x * ``holy`` - creates and destroys the environ dictionary at each request. Default behaviour for uWSGI >= 2.1
def _exec_cleanup(self, cursor, fd): LOGGER.debug('Closing cursor and cleaning %s', fd) try: cursor.close() except (psycopg2.Error, psycopg2.Warning) as error: LOGGER.debug('Error closing the cursor: %s', error) self._cleanup_fd(fd) if self._cleanup_callback: self._ioloop.remove_timeout(self._cleanup_callback) self._cleanup_callback = self._ioloop.add_timeout( self._ioloop.time() + self._pool_idle_ttl + 1, self._pool_manager.clean, self.pid)
Close the cursor, remove any references to the fd in internal state and remove the fd from the ioloop. :param psycopg2.extensions.cursor cursor: The cursor to close :param int fd: The connection file descriptor
def _on_connect(self, sequence, topic, message): try: slug = None parts = topic.split('/') slug = parts[-3] uuid = self._extract_device_uuid(slug) except Exception: self._logger.exception("Error parsing slug from connection request (slug=%s, topic=%s)", slug, topic) return if messages.ConnectCommand.matches(message): key = message['key'] client = message['client'] self._loop.add_callback(self._connect_to_device, uuid, key, client) else: self._logger.warn("Unknown message received on connect topic=%s, message=%s", topic, message)
Process a request to connect to an IOTile device A connection message triggers an attempt to connect to a device, any error checking is done by the DeviceManager that is actually managing the devices. A disconnection message is checked to make sure its key matches what we except for this device and is either discarded or forwarded on to the DeviceManager. Args: sequence (int): The sequence number of the packet received topic (string): The topic this message was received on message_type (string): The type of the packet received message (dict): The message itself
def create_build_configuration_set_raw(**kwargs): config_set = _create_build_config_set_object(**kwargs) response = utils.checked_api_call(pnc_api.build_group_configs, 'create_new', body=config_set) if response: return response.content
Create a new BuildConfigurationSet.
def _extract_methods(self): service = self._service all_urls = set() urls_with_options = set() if not service.http: return for rule in service.http.rules: http_method, url = _detect_pattern_option(rule) if not url or not http_method or not rule.selector: _logger.error(u'invalid HTTP binding encountered') continue method_info = self._get_or_create_method_info(rule.selector) if rule.body: method_info.body_field_path = rule.body if not self._register(http_method, url, method_info): continue all_urls.add(url) if http_method == self._OPTIONS: urls_with_options.add(url) self._add_cors_options_selectors(all_urls - urls_with_options) self._update_usage() self._update_system_parameters()
Obtains the methods used in the service.
def send(self, text, thread_ts=None): self._client.rtm_send_message(self._body['channel'], text, thread_ts=thread_ts)
Send a reply using RTM API (This function doesn't supports formatted message when using a bot integration)
def valid_header_waiting(self): if len(self.buffer) < 4: self.logger.debug("Buffer does not yet contain full header") result = False else: result = True result = result and self.buffer[0] == velbus.START_BYTE if not result: self.logger.warning("Start byte not recognized") result = result and (self.buffer[1] in velbus.PRIORITY) if not result: self.logger.warning("Priority not recognized") result = result and (self.buffer[3] & 0x0F <= 8) if not result: self.logger.warning("Message size not recognized") self.logger.debug("Valid Header Waiting: %s(%s)", result, str(self.buffer)) return result
Check if a valid header is waiting in buffer
def transaction(self, compare, success=None, failure=None): compare = [c.build_message() for c in compare] success_ops = self._ops_to_requests(success) failure_ops = self._ops_to_requests(failure) transaction_request = etcdrpc.TxnRequest(compare=compare, success=success_ops, failure=failure_ops) txn_response = self.kvstub.Txn( transaction_request, self.timeout, credentials=self.call_credentials, metadata=self.metadata ) responses = [] for response in txn_response.responses: response_type = response.WhichOneof('response') if response_type in ['response_put', 'response_delete_range', 'response_txn']: responses.append(response) elif response_type == 'response_range': range_kvs = [] for kv in response.response_range.kvs: range_kvs.append((kv.value, KVMetadata(kv, txn_response.header))) responses.append(range_kvs) return txn_response.succeeded, responses
Perform a transaction. Example usage: .. code-block:: python etcd.transaction( compare=[ etcd.transactions.value('/doot/testing') == 'doot', etcd.transactions.version('/doot/testing') > 0, ], success=[ etcd.transactions.put('/doot/testing', 'success'), ], failure=[ etcd.transactions.put('/doot/testing', 'failure'), ] ) :param compare: A list of comparisons to make :param success: A list of operations to perform if all the comparisons are true :param failure: A list of operations to perform if any of the comparisons are false :return: A tuple of (operation status, responses)
def go_offline(self, comment=None): self.make_request( NodeCommandFailed, method='update', resource='go_offline', params={'comment': comment})
Executes a Go-Offline operation on the specified node :param str comment: optional comment to audit :raises NodeCommandFailed: offline not available :return: None
def _parse(reactor, directory, pemdir, *args, **kwargs): def colon_join(items): return ':'.join([item.replace(':', '\\:') for item in items]) sub = colon_join(list(args) + ['='.join(item) for item in kwargs.items()]) pem_path = FilePath(pemdir).asTextMode() acme_key = load_or_create_client_key(pem_path) return AutoTLSEndpoint( reactor=reactor, directory=directory, client_creator=partial(Client.from_url, key=acme_key, alg=RS256), cert_store=DirectoryStore(pem_path), cert_mapping=HostDirectoryMap(pem_path), sub_endpoint=serverFromString(reactor, sub))
Parse a txacme endpoint description. :param reactor: The Twisted reactor. :param directory: ``twisted.python.url.URL`` for the ACME directory to use for issuing certs. :param str pemdir: The path to the certificate directory to use.
def send_post(config, urlpath, post_data): server = config.get("Server", "url") logger.debug("Sending executor payload to " + server) post_data = urlencode(post_data) post_data = post_data.encode("utf-8", errors="ignore") url = server + urlpath try: urlopen(url, post_data) except Exception as e: logger.error('Error while sending data to server: ' + str(e))
Send POST data to an OpenSubmit server url path, according to the configuration.
def insort_event_right(self, event, lo=0, hi=None): if lo < 0: raise ValueError('lo must be non-negative') if hi is None: hi = len(self.queue) while lo < hi: mid = (lo + hi) // 2 if event[0] < self.queue[mid][0]: hi = mid else: lo = mid + 1 self.queue.insert(lo, event)
Insert event in queue, and keep it sorted assuming queue is sorted. If event is already in queue, insert it to the right of the rightmost event (to keep FIFO order). Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. Args: event: a (time in sec since unix epoch, callback, args, kwds) tuple.
def fullqualname_py2(obj): if type(obj).__name__ == 'builtin_function_or_method': return _fullqualname_builtin_py2(obj) elif type(obj).__name__ == 'function': return obj.__module__ + '.' + obj.__name__ elif type(obj).__name__ in ['member_descriptor', 'method_descriptor', 'wrapper_descriptor']: return (obj.__objclass__.__module__ + '.' + obj.__objclass__.__name__ + '.' + obj.__name__) elif type(obj).__name__ == 'instancemethod': return _fullqualname_method_py2(obj) elif type(obj).__name__ == 'method-wrapper': return fullqualname_py2(obj.__self__) + '.' + obj.__name__ elif type(obj).__name__ == 'module': return obj.__name__ elif inspect.isclass(obj): return obj.__module__ + '.' + obj.__name__ return obj.__class__.__module__ + '.' + obj.__class__.__name__
Fully qualified name for objects in Python 2.
def run(self): model = Model(self.fP, self.doc) self.doc.walkabout(model) return model
Parse the script file. :rtype: :py:class:`~turberfield.dialogue.model.Model`
def int(self, *args): return self.random.randint(*self._arg_defaults(args, [-sys.maxint, sys.maxint], int))
Returns a random int between -sys.maxint and sys.maxint INT %{INT} -> '1245123' %{INT:10} -> '10000000' %{INT:10,20} -> '19'
def _check_subnet(self, name): subnets = self._vpc_connection.get_all_subnets( filters={'vpcId': self._vpc_id}) matching_subnets = [ subnet for subnet in subnets if name in [subnet.tags.get('Name'), subnet.id] ] if len(matching_subnets) == 0: raise SubnetError( "the specified subnet %s does not exist" % name) elif len(matching_subnets) == 1: return matching_subnets[0].id else: raise SubnetError( "the specified subnet name %s matches more than " "one subnet" % name)
Checks if the subnet exists. :param str name: name of the subnet :return: str - subnet id of the subnet :raises: `SubnetError` if group does not exist
def get_and_subtract(self, delta): return self._invoke_internal(pn_counter_add_codec, delta=-1 * delta, get_before_update=True)
Subtracts the given value from the current value and returns the previous value. :raises NoDataMemberInClusterError: if the cluster does not contain any data members. :raises UnsupportedOperationError: if the cluster version is less than 3.10. :raises ConsistencyLostError: if the session guarantees have been lost. :param delta: (int), the value to subtract. :return: (int), the previous value.
def prepare_content_length(self, body): if body is not None: length = super_len(body) if length: self.headers['Content-Length'] = builtin_str(length) elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None: self.headers['Content-Length'] = '0'
Prepare Content-Length header based on request method and body
def _calculate_new_overlap(stride, traj_len, skip): overlap = stride * ((traj_len - skip - 1) // stride + 1) - traj_len + skip return overlap
Given two trajectories T_1 and T_2, this function calculates for the first trajectory an overlap, i.e., a skip parameter for T_2 such that the trajectory fragments T_1 and T_2 appear as one under the given stride. Idea for deriving the formula: It is K = ((traj_len - skip - 1) // stride + 1) = #(data points in trajectory of length (traj_len - skip)). Therefore, the first point's position that is not contained in T_1 anymore is given by pos = skip + s * K. Thus the needed skip of T_2 such that the same stride parameter makes T_1 and T_2 "look as one" is overlap = pos - traj_len. :param stride: the (global) stride parameter :param traj_len: length of T_1 :param skip: skip of T_1 :return: skip of T_2
def _get_elevation(self, location): url = self._elevation_query_base % (location.latitude, location.longitude) if self.api_key != "": url += "&key=%s" % self.api_key data = self._read_from_url(url) response = json.loads(data) if response["status"] == "OK": location.elevation = int(float(response["results"][0]["elevation"])) else: location.elevation = 0
Query the elevation information with the latitude and longitude of the specified `location`.
def readerForFd(fd, URL, encoding, options): ret = libxml2mod.xmlReaderForFd(fd, URL, encoding, options) if ret is None:raise treeError('xmlReaderForFd() failed') return xmlTextReader(_obj=ret)
Create an xmltextReader for an XML from a file descriptor. The parsing flags @options are a combination of xmlParserOption. NOTE that the file descriptor will not be closed when the reader is closed or reset.
def parse_pagination(headers): links = { link.rel: parse_qs(link.href).get("page", None) for link in link_header.parse(headers.get("Link", "")).links } return _Navigation( links.get("previous", [None])[0], links.get("next", [None])[0], links.get("last", [None])[0], links.get("current", [None])[0], links.get("first", [None])[0] )
Parses headers to create a pagination objects :param headers: HTTP Headers :type headers: dict :return: Navigation object for pagination :rtype: _Navigation
def collect_consequences(self): consequences = {self.key()} for relation in self.referenced_by.values(): consequences.update(relation.collect_consequences()) return consequences
Recursively collect a set of _ReferenceKeys that would consequentially get dropped if this were dropped via "drop ... cascade". :return Set[_ReferenceKey]: All the relations that would be dropped
def _cmd_down(self): revision = self._get_revision() if not self._rev: self._log(0, "downgrading current revision") else: self._log(0, "downgrading to revision %s" % revision) for rev in reversed(self._revisions[int(revision) - 1:]): sql_files = glob.glob(os.path.join(self._migration_path, rev, "*.down.sql")) sql_files.sort(reverse=True) self._exec(sql_files, rev) self._log(0, "done: downgraded revision to %s" % rev)
Downgrade to a revision
def instant_articles(self, **kwargs): eqs = self.search(**kwargs).sort('-last_modified', '-published') return eqs.filter(InstantArticle())
QuerySet including all published content approved for instant articles. Instant articles are configured via FeatureType. FeatureType.instant_article = True.
def parse_substring(allele, pred, max_len=None): result = "" pos = 0 if max_len is None: max_len = len(allele) else: max_len = min(max_len, len(allele)) while pos < max_len and pred(allele[pos]): result += allele[pos] pos += 1 return result, allele[pos:]
Extract substring of letters for which predicate is True
def set_cache_expiry(response): if response.cache_control.max_age is None and 'CACHE_DEFAULT_TIMEOUT' in config.cache: response.cache_control.max_age = config.cache['CACHE_DEFAULT_TIMEOUT'] return response
Set the cache control headers
def contains_any(self, other): return self.value == other.value or self.value & other.value
Check if any flags are set. (OsuMod.Hidden | OsuMod.HardRock) in flags # Check if either hidden or hardrock are enabled. OsuMod.keyMod in flags # Check if any keymod is enabled.
def md5_for_file(f, block_size=2 ** 20): md5 = hashlib.md5() try: f.seek(0) return md5_for_stream(f, block_size=block_size) except AttributeError: file_name = f with open(file_name, 'rb') as f: return md5_for_file(f, block_size)
Generate an MD5 has for a possibly large file by breaking it into chunks.
def runExperiment( self, e ): space = self.parameterSpace() if len(space) > 0: nb = self.notebook() ps = self._mixup(space) try: self.open() view = self._client.load_balanced_view() jobs = [] for p in ps: jobs.extend((view.apply_async((lambda p: e.set(p).run()), p)).msg_ids) time.sleep(0.01) psjs = zip(ps, jobs) for (p, j) in psjs: nb.addPendingResult(p, j) finally: nb.commit() self.close()
Run the experiment across the parameter space in parallel using all the engines in the cluster. This method returns immediately. The experiments are run asynchronously, with the points in the parameter space being explored randomly so that intermediate retrievals of results are more representative of the overall result. Put another way, for a lot of experiments the results available will converge towards a final answer, so we can plot them and see the answer emerge. :param e: the experiment
def get_project_build(account_project): url = make_url("/projects/{account_project}", account_project=account_project) response = requests.get(url, headers=make_auth_headers()) return response.json()
Get the details of the latest Appveyor build.
def do_local(self, host="localhost", port=8000): port = int(port) if host == "off": self._local_endpoint = None else: self._local_endpoint = (host, port) self.onecmd("use %s" % self.engine.region)
Connect to a local DynamoDB instance. Use 'local off' to disable. > local > local host=localhost port=8001 > local off
def from_vhost(cls, vhost): result = Vhost().list() paas_hosts = {} for host in result: paas_hosts[host['name']] = host['paas_id'] return paas_hosts.get(vhost)
Retrieve paas instance id associated to a vhost.
def _get_all_timers(self, dataframe): s = dataframe['custom_timers'].apply(json.loads) s.index = dataframe['epoch'] for index, value in s.iteritems(): if not value: continue for key, value in six.iteritems(value): self._timers_values[key].append((index, value)) self.total_timers += 1 del dataframe['custom_timers'] del s
Get all timers and set them in the _timers_values property :param pandas.DataFrame dataframe: the main dataframe with row results