code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def set_intermediates(self, intermediates, betas=None, transition_states=None): self.intermediates = intermediates self.betas = betas self.transition_states = transition_states if self.corrections is None: self.net_corrections = [0.0 for _ in intermediates] if not self.betas: self.betas = [0.0 for _ in intermediates] if not self.transition_states: self.transition_states = [False for _ in intermediates] props = [len(self.intermediates), len(self.net_corrections), len(self.transition_states), len(self.betas)] if not len(set(props)) <= 1: raise ValueError('intermediate, net_corrections, transition_states and , ' 'betas all have to have the same length') self.get_corrections() return(True)
Sets up intermediates and specifies whether it's an electrochemical step. Either provide individual contributions or net contributions. If both are given, only the net contributions are used. intermediate_list: list of basestrings transition_states: list of True and False electrochemical_steps: list of True and False betas = list of charge transfer coefficients net_corrections: A sum of all contributions per intermediate.
def visit_ifexp(self, node, parent): newnode = nodes.IfExp(node.lineno, node.col_offset, parent) newnode.postinit( self.visit(node.test, newnode), self.visit(node.body, newnode), self.visit(node.orelse, newnode), ) return newnode
visit a IfExp node by returning a fresh instance of it
def _redraw_screen(self, stdscr): with self._lock: stdscr.clear() stdscr.addstr( 0, 0, "workflows service monitor -- quit with Ctrl+C", curses.A_BOLD ) stdscr.refresh() self.message_box = self._boxwin( 5, curses.COLS, 2, 0, title="last seen message", color_pair=1 ) self.message_box.scrollok(True) self.cards = []
Redraw screen. This could be to initialize, or to redraw after resizing.
def plugin(module, *args, **kwargs): def wrap(f): m = module(f, *args, **kwargs) if inspect.isclass(m): for k, v in m.__dict__.items(): if not k.startswith("__"): setattr(f, k, v) elif inspect.isfunction(m): setattr(f, kls.__name__, m) return f return wrap
Decorator to extend a package to a view. The module can be a class or function. It will copy all the methods to the class ie: # Your module.py my_ext(view, **kwargs): class MyExtension(object): def my_view(self): return {} return MyExtension # Your view.py @plugin(my_ext) class Index(View): pass :param module: object :param args: :param kwargs: :return:
def insert_draft_child(self, child_pid): if child_pid.status != PIDStatus.RESERVED: raise PIDRelationConsistencyError( "Draft child should have status 'RESERVED'") if not self.draft_child: with db.session.begin_nested(): super(PIDNodeVersioning, self).insert_child(child_pid, index=-1) else: raise PIDRelationConsistencyError( "Draft child already exists for this relation: {0}".format( self.draft_child))
Insert a draft child to versioning.
def connection_made(self): assert self.state == BGP_FSM_CONNECT open_msg = self._peer.create_open_msg() self._holdtime = open_msg.hold_time self.state = BGP_FSM_OPEN_SENT if not self.is_reactive: self._peer.state.bgp_state = self.state self.sent_open_msg = open_msg self.send(open_msg) self._peer.connection_made()
Connection to peer handler. We send bgp open message to peer and initialize related attributes.
def receive_trial_result(self, parameter_id, parameters, value): reward = extract_scalar_reward(value) if parameter_id not in self.total_data: raise RuntimeError('Received parameter_id not in total_data.') params = self.total_data[parameter_id] if self.optimize_mode == OptimizeMode.Minimize: reward = -reward indiv = Individual(config=params, result=reward) self.population.append(indiv)
Record the result from a trial Parameters ---------- parameters: dict value : dict/float if value is dict, it should have "default" key. value is final metrics of the trial.
def level_order(self) -> Iterator["BSP"]: next = [self] while next: level = next next = [] yield from level for node in level: next.extend(node.children)
Iterate over this BSP's hierarchy in level order. .. versionadded:: 8.3
def parse(self, value): result = {} rest = {} for k, v in value.iteritems(): if k in self.fields: if (isinstance(v, dict) and not self.fields[k].supports_multiple): if len(v) == 1: v = v.values()[0] else: raise InvalidParameterCombinationError(k) result[k] = self.fields[k].coerce(v) else: rest[k] = v for k, v in self.fields.iteritems(): if k not in result: result[k] = v.coerce(None) if rest: raise UnknownParametersError(result, rest) return result
Convert a dictionary of raw values to a dictionary of processed values.
def setImportDataInterface(self, values): exims = self.getImportDataInterfacesList() new_values = [value for value in values if value in exims] if len(new_values) < len(values): logger.warn("Some Interfaces weren't added...") self.Schema().getField('ImportDataInterface').set(self, new_values)
Return the current list of import data interfaces
def house(self): house = self.chart.houses.getObjectHouse(self.obj) return house
Returns the object's house.
def lighting(im, b, c): if b==0 and c==1: return im mu = np.average(im) return np.clip((im-mu)*c+mu+b,0.,1.).astype(np.float32)
Adjust image balance and contrast
def displayStatusMessage(self, msgObj): msg = msgObj.data if not msg.endswith('\n'): msg = msg + '\n' self.qteLabel.setText(msg)
Display the last status message and partially completed key sequences. |Args| * ``msgObj`` (**QtmacsMessage**): the data supplied by the hook. |Returns| * **None** |Raises| * **None**
def get_shifted_center_blocks(x, indices): center_x = gather_blocks_2d(x, indices) def shift_right_2d_blocks(x): shifted_targets = ( tf.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0], [0, 0]])[:, :, :, :-1, :]) return shifted_targets x_shifted = shift_right_2d_blocks(center_x) return x_shifted
Get right shifted blocks for masked local attention 2d. Args: x: A tensor with shape [batch, heads, height, width, depth] indices: The indices to gather blocks Returns: x_shifted: a tensor of extracted blocks, each block right shifted along length.
def read_scanimage_metadata(fh): fh.seek(0) try: byteorder, version = struct.unpack('<2sH', fh.read(4)) if byteorder != b'II' or version != 43: raise Exception fh.seek(16) magic, version, size0, size1 = struct.unpack('<IIII', fh.read(16)) if magic != 117637889 or version != 3: raise Exception except Exception: raise ValueError('not a ScanImage BigTIFF v3 file') frame_data = matlabstr2py(bytes2str(fh.read(size0)[:-1])) roi_data = read_json(fh, '<', None, size1, None) if size1 > 1 else {} return frame_data, roi_data
Read ScanImage BigTIFF v3 static and ROI metadata from open file. Return non-varying frame data as dict and ROI group data as JSON. The settings can be used to read image data and metadata without parsing the TIFF file. Raise ValueError if file does not contain valid ScanImage v3 metadata.
def _install_signal_handler(self, signal_number, signal_name): old_signal_handler = None def handler(handled_signal_number, frame): signal.signal(signal_number, signal.SIG_DFL) sys.stderr.write("TensorBoard caught %s; exiting...\n" % signal_name) if old_signal_handler not in (signal.SIG_IGN, signal.SIG_DFL): old_signal_handler(handled_signal_number, frame) sys.exit(0) old_signal_handler = signal.signal(signal_number, handler)
Set a signal handler to gracefully exit on the given signal. When this process receives the given signal, it will run `atexit` handlers and then exit with `0`. Args: signal_number: The numeric code for the signal to handle, like `signal.SIGTERM`. signal_name: The human-readable signal name.
def _start(self): params = self._translate(self._options) self._resp = self._r_session.get(self._url, params=params, stream=True) self._resp.raise_for_status() self._lines = self._resp.iter_lines(self._chunk_size)
Starts streaming the feed using the provided session and feed options.
def _get_permission(self, authorizer_name, authorizer_lambda_function_arn): rest_api = ApiGatewayRestApi(self.logical_id, depends_on=self.depends_on, attributes=self.resource_attributes) api_id = rest_api.get_runtime_attr('rest_api_id') partition = ArnGenerator.get_partition_name() resource = '${__ApiId__}/authorizers/*' source_arn = fnSub(ArnGenerator.generate_arn(partition=partition, service='execute-api', resource=resource), {"__ApiId__": api_id}) lambda_permission = LambdaPermission(self.logical_id + authorizer_name + 'AuthorizerPermission', attributes=self.passthrough_resource_attributes) lambda_permission.Action = 'lambda:invokeFunction' lambda_permission.FunctionName = authorizer_lambda_function_arn lambda_permission.Principal = 'apigateway.amazonaws.com' lambda_permission.SourceArn = source_arn return lambda_permission
Constructs and returns the Lambda Permission resource allowing the Authorizer to invoke the function. :returns: the permission resource :rtype: model.lambda_.LambdaPermission
def __get_segmentation_path(self, path): startpath, ext = os.path.splitext(path) segmentation_path = startpath + "_segmentation" + ext return segmentation_path
Create path with "_segmentation" suffix and keep extension. :param path: :return:
def run_configurations(callback, sections_reader): base = dict(OPTIONS) sections = sections_reader() if sections is None: logger.info("Configuration not found in .ini files. " "Running with default settings") recompile() elif sections == []: logger.info("Configuration does not match current runtime. " "Exiting") results = [] for section, options in sections: OPTIONS.clear() OPTIONS.update(base) OPTIONS.update(options) logger.debug("Running configuration from section \"%s\". OPTIONS: %r", section, OPTIONS) results.append(callback()) return results
Parse configurations and execute callback for matching.
def items(sanitize=False): if salt.utils.data.is_true(sanitize): out = dict(__grains__) for key, func in six.iteritems(_SANITIZERS): if key in out: out[key] = func(out[key]) return out else: return __grains__
Return all of the minion's grains CLI Example: .. code-block:: bash salt '*' grains.items Sanitized CLI Example: .. code-block:: bash salt '*' grains.items sanitize=True
def get_lb_pkgs(self): _filter = {'items': {'description': utils.query_filter('*Load Balancer*')}} packages = self.prod_pkg.getItems(id=0, filter=_filter) pkgs = [] for package in packages: if not package['description'].startswith('Global'): pkgs.append(package) return pkgs
Retrieves the local load balancer packages. :returns: A dictionary containing the load balancer packages
def construct_for(self, service_name, resource_name, base_class=None): details = self.details_class( self.session, service_name, resource_name, loader=self.loader ) attrs = { '_details': details, } klass_name = self._build_class_name(resource_name) attrs.update(self._build_methods(details)) if base_class is None: base_class = self.base_resource_class return type( klass_name, (base_class,), attrs )
Builds a new, specialized ``Resource`` subclass as part of a given service. This will load the ``ResourceJSON``, determine the correct mappings/methods & constructs a brand new class with those methods on it. :param service_name: The name of the service to construct a resource for. Ex. ``sqs``, ``sns``, ``dynamodb``, etc. :type service_name: string :param resource_name: The name of the ``Resource``. Ex. ``Queue``, ``Notification``, ``Table``, etc. :type resource_name: string :returns: A new resource class for that service
def echo_event(data): return click.echo(json.dumps(data, sort_keys=True, indent=2))
Echo a json dump of an object using click
def load_lexer_from_file(filename, lexername="CustomLexer", **options): try: custom_namespace = {} exec(open(filename, 'rb').read(), custom_namespace) if lexername not in custom_namespace: raise ClassNotFound('no valid %s class found in %s' % (lexername, filename)) lexer_class = custom_namespace[lexername] return lexer_class(**options) except IOError as err: raise ClassNotFound('cannot read %s' % filename) except ClassNotFound as err: raise except Exception as err: raise ClassNotFound('error when loading custom lexer: %s' % err)
Load a lexer from a file. This method expects a file located relative to the current working directory, which contains a Lexer class. By default, it expects the Lexer to be name CustomLexer; you can specify your own class name as the second argument to this function. Users should be very careful with the input, because this method is equivalent to running eval on the input file. Raises ClassNotFound if there are any problems importing the Lexer. .. versionadded:: 2.2
def set_status(self, id, status, timeout, update_time, history=None): query = {'_id': {'$regex': '^' + id}} update = { '$set': {'status': status, 'timeout': timeout, 'updateTime': update_time}, '$push': { 'history': { '$each': [history.serialize], '$slice': -abs(current_app.config['HISTORY_LIMIT']) } } } return self.get_db().alerts.find_one_and_update( query, update=update, projection={'history': 0}, return_document=ReturnDocument.AFTER )
Set status and update history.
def move_straight_to(self, x, y, steps): start_x = self._rec_x start_y = self._rec_y for i in range(1, steps + 1): self._add_step(( int(start_x + (x - start_x) / float(steps) * i), int(start_y + (y - start_y) / float(steps) * i)))
Move straight to the newly specified location - i.e. create a straight line Path from the current location to the specified point. :param x: X coord for the end position. :param y: Y coord for the end position. :param steps: How many steps to take for the move.
def sort_sections(self, order): order_lc = [e.lower() for e in order] sections = OrderedDict( (k,self.sections[k]) for k in order_lc if k in self.sections) sections.update( (k,self.sections[k]) for k in self.sections.keys() if k not in order_lc) assert len(self.sections) == len(sections) self.sections = sections
Sort sections according to the section names in the order list. All remaining sections are added to the end in their original order :param order: Iterable of section names :return:
def create_combination(list_of_sentences): num_sentences = len(list_of_sentences) - 1 combinations = [] for i, _ in enumerate(list_of_sentences): if i == num_sentences: break num_pairs = num_sentences - i populated = num_pairs * [list_of_sentences[i]] zipped = list(zip(populated, list_of_sentences[i + 1:])) combinations += zipped return combinations
Generates all possible pair combinations for the input list of sentences. For example: input = ["paraphrase1", "paraphrase2", "paraphrase3"] output = [("paraphrase1", "paraphrase2"), ("paraphrase1", "paraphrase3"), ("paraphrase2", "paraphrase3")] Args: list_of_sentences: the list of input sentences. Returns: the list of all possible sentence pairs.
def from_dict(cls, parm, pool = None): if pool is None: pool = Pool() pool.id = parm['id'] pool.name = parm['name'] pool.description = parm['description'] pool.default_type = parm['default_type'] pool.ipv4_default_prefix_length = parm['ipv4_default_prefix_length'] pool.ipv6_default_prefix_length = parm['ipv6_default_prefix_length'] for val in ('member_prefixes_v4', 'member_prefixes_v6', 'used_prefixes_v4', 'used_prefixes_v6', 'free_prefixes_v4', 'free_prefixes_v6', 'total_prefixes_v4', 'total_prefixes_v6', 'total_addresses_v4', 'total_addresses_v6', 'used_addresses_v4', 'used_addresses_v6', 'free_addresses_v4', 'free_addresses_v6'): if parm[val] is not None: setattr(pool, val, int(parm[val])) pool.tags = {} for tag_name in parm['tags']: tag = Tag.from_dict({'name': tag_name }) pool.tags[tag_name] = tag pool.avps = parm['avps'] if parm['vrf_id'] is not None: pool.vrf = VRF.get(parm['vrf_id']) return pool
Create new Pool-object from dict. Suitable for creating objects from XML-RPC data. All available keys must exist.
def _get_binary_from_ipv6(self, ip_addr): hi, lo = struct.unpack("!QQ", socket.inet_pton(socket.AF_INET6, ip_addr)) return (hi << 64) | lo
Converts IPv6 address to binary form.
def rebind(self, **params): new_params = self.__params.copy() new_params.update(params) return self.__class__(new_params, self.__action)
Rebind the parameters into the URI. :return: A new `CallAPI` instance with the new parameters.
def parse_task_declaration(self, declaration_subAST): var_name = self.parse_declaration_name(declaration_subAST.attr("name")) var_type = self.parse_declaration_type(declaration_subAST.attr("type")) var_expressn = self.parse_declaration_expressn(declaration_subAST.attr("expression"), es='') return (var_name, var_type, var_expressn)
Parses the declaration section of the WDL task AST subtree. Examples: String my_name String your_name Int two_chains_i_mean_names = 0 :param declaration_subAST: Some subAST representing a task declaration like: 'String file_name' :return: var_name, var_type, var_value Example: Input subAST representing: 'String file_name' Output: var_name='file_name', var_type='String', var_value=None
def filter_dirs(root, dirs, excl_paths): filtered_dirs = [] for dirpath in dirs: abspath = os.path.abspath(os.path.join(root, dirpath)) if os.path.basename(abspath) in _SKIP_DIRS: continue if abspath not in excl_paths: filtered_dirs.append(dirpath) return filtered_dirs
Filter directory paths based on the exclusion rules defined in 'excl_paths'.
def en_last(self): last_ens = dict() for (k,l) in self.en.items(): last_ens.update({ k : l[-1] if l != [] else None }) return last_ens
Report the energies from the last SCF present in the output. Returns a |dict| providing the various energy values from the last SCF cycle performed in the output. Keys are those of :attr:`~opan.output.OrcaOutput.p_en`. Any energy value not relevant to the parsed output is assigned as |None|. Returns ------- last_ens |dict| of |npfloat_|-- Energies from the last SCF present in the output.
def glover_dispersion_derivative(tr, oversampling=50, time_length=32., onset=0.): dd = .01 dhrf = 1. / dd * ( - _gamma_difference_hrf( tr, oversampling, time_length, onset, delay=6, undershoot=12., dispersion=.9 + dd, ratio=.35) + _gamma_difference_hrf( tr, oversampling, time_length, onset, delay=6, undershoot=12., dispersion=.9, ratio=.35)) return dhrf
Implementation of the Glover dispersion derivative hrf model Parameters ---------- tr: float scan repeat time, in seconds oversampling: int, optional temporal oversampling factor in seconds time_length: float, optional hrf kernel length, in seconds onset : float, optional onset of the response in seconds Returns ------- dhrf: array of shape(length / tr * oversampling), dtype=float dhrf sampling on the oversampled time grid
def context(names): import json contexts = [_context_json(name) for name in set(names)] if contexts: click.echo( json.dumps( contexts[0] if len(contexts) == 1 else contexts, indent=2, ) )
Show JSON-LD context for repository objects.
def fromarray(A): subs = np.nonzero(A) vals = A[subs] return sptensor(subs, vals, shape=A.shape, dtype=A.dtype)
Create a sptensor from a dense numpy array
def force_log_type_file_flag(self, logType, flag): assert logType in self.__logTypeStdoutFlags.keys(), "logType '%s' not defined" %logType if flag is None: self.__forcedFileLevels.pop(logType, None) self.__update_file_flags() else: assert isinstance(flag, bool), "flag must be boolean" self.__logTypeFileFlags[logType] = flag self.__forcedFileLevels[logType] = flag
Force a logtype file logging flag despite minimum and maximum logging level boundaries. :Parameters: #. logType (string): A defined logging type. #. flag (None, boolean): The file logging flag. If None, logtype existing forced flag is released.
def get_sensor_descriptions(self): self.init_sdr() for sensor in self._sdr.get_sensor_numbers(): yield {'name': self._sdr.sensors[sensor].name, 'type': self._sdr.sensors[sensor].sensor_type} self.oem_init() for sensor in self._oem.get_sensor_descriptions(): yield sensor
Get available sensor names Iterates over the available sensor descriptions :returns: Iterator of dicts describing each sensor
def sanitize_word(s): s = re.sub('[^\w-]+', '_', s) s = re.sub('__+', '_', s) return s.strip('_')
Remove non-alphanumerical characters from metric word. And trim excessive underscores.
def _initialize(): global _absl_logger, _absl_handler if _absl_logger: return original_logger_class = logging.getLoggerClass() logging.setLoggerClass(ABSLLogger) _absl_logger = logging.getLogger('absl') logging.setLoggerClass(original_logger_class) python_logging_formatter = PythonFormatter() _absl_handler = ABSLHandler(python_logging_formatter) handlers = [ h for h in logging.root.handlers if isinstance(h, logging.StreamHandler) and h.stream == sys.stderr] for h in handlers: logging.root.removeHandler(h) if not logging.root.handlers: logging.root.addHandler(_absl_handler)
Initializes loggers and handlers.
def as_dict(self, replace_value_names=True): old_children = self.children self.children = self.terms d = super(SectionTerm, self).as_dict(replace_value_names) self.children = old_children return d
Return the whole section as a dict
def request(self, hash_, quickkey, doc_type, page=None, output=None, size_id=None, metadata=None, request_conversion_only=None): if len(hash_) > 4: hash_ = hash_[:4] query = QueryParams({ 'quickkey': quickkey, 'doc_type': doc_type, 'page': page, 'output': output, 'size_id': size_id, 'metadata': metadata, 'request_conversion_only': request_conversion_only }) url = API_ENDPOINT + '?' + hash_ + '&' + urlencode(query) response = self.http.get(url, stream=True) if response.status_code == 204: raise ConversionServerError("Unable to fulfill request. " "The document will not be converted.", response.status_code) response.raise_for_status() if response.headers['content-type'] == 'application/json': return response.json() return response
Query conversion server hash_: 4 characters of file hash quickkey: File quickkey doc_type: "i" for image, "d" for documents page: The page to convert. If page is set to 'initial', the first 10 pages of the document will be provided. (document) output: "pdf", "img", or "swf" (document) size_id: 0,1,2 (document) 0-9, a-f, z (image) metadata: Set to 1 to get metadata dict request_conversion_only: Request conversion w/o content
def _round_field(values, name, freq): if isinstance(values, dask_array_type): from dask.array import map_blocks return map_blocks(_round_series, values, name, freq=freq, dtype=np.datetime64) else: return _round_series(values, name, freq)
Indirectly access pandas rounding functions by wrapping data as a Series and calling through `.dt` attribute. Parameters ---------- values : np.ndarray or dask.array-like Array-like container of datetime-like values name : str (ceil, floor, round) Name of rounding function freq : a freq string indicating the rounding resolution Returns ------- rounded timestamps : same type as values Array-like of datetime fields accessed for each element in values
def knapsack(items, maxweight, method='recursive'): r if method == 'recursive': return knapsack_recursive(items, maxweight) elif method == 'iterative': return knapsack_iterative(items, maxweight) elif method == 'ilp': return knapsack_ilp(items, maxweight) else: raise NotImplementedError('[util_alg] knapsack method=%r' % (method,))
r""" Solve the knapsack problem by finding the most valuable subsequence of `items` subject that weighs no more than `maxweight`. Args: items (tuple): is a sequence of tuples `(value, weight, id_)`, where `value` is a number and `weight` is a non-negative integer, and `id_` is an item identifier. maxweight (scalar): is a non-negative integer. Returns: tuple: (total_value, items_subset) - a pair whose first element is the sum of values in the most valuable subsequence, and whose second element is the subsequence. Subset may be different depending on implementation (ie top-odwn recusrive vs bottom-up iterative) References: http://codereview.stackexchange.com/questions/20569/dynamic-programming-solution-to-knapsack-problem http://stackoverflow.com/questions/141779/solving-the-np-complete-problem-in-xkcd http://www.es.ele.tue.nl/education/5MC10/Solutions/knapsack.pdf CommandLine: python -m utool.util_alg --test-knapsack python -m utool.util_alg --test-knapsack:0 python -m utool.util_alg --exec-knapsack:1 Ignore: annots_per_view = 2 maxweight = 2 items = [ (0.7005208343554686, 0.7005208343554686, 0), (0.669270834329427, 0.669270834329427, 1), (0.669270834329427, 0.669270834329427, 2), (0.7005208343554686, 0.7005208343554686, 3), (0.7005208343554686, 0.7005208343554686, 4), (0.669270834329427, 0.669270834329427, 5), (0.669270834329427, 0.669270834329427, 6), (0.669270834329427, 0.669270834329427, 7), (0.669270834329427, 0.669270834329427, 8), (0.669270834329427, 0.669270834329427, 9), (0.669270834329427, 0.669270834329427, 10), (0.669270834329427, 0.669270834329427, 11), (0.669270834329427, 0.669270834329427, 12), (0.669270834329427, 0.669270834329427, 13), (0.669270834329427, 0.669270834329427, 14), (0.669270834329427, 0.669270834329427, 15), (0.669270834329427, 0.669270834329427, 16), (0.669270834329427, 0.669270834329427, 17), (0.7005208343554686, 0.7005208343554686, 18), (0.7005208343554686, 0.7005208343554686, 19), (0.669270834329427, 0.669270834329427, 20), (0.7005208343554686, 0.7005208343554686, 21), (0.669270834329427, 0.669270834329427, 22), (0.669270834329427, 0.669270834329427, 23), (0.669270834329427, 0.669270834329427, 24), (0.669270834329427, 0.669270834329427, 25), (0.669270834329427, 0.669270834329427, 26), (0.669270834329427, 0.669270834329427, 27), (0.669270834329427, 0.669270834329427, 28), (0.7005208343554686, 0.7005208343554686, 29), (0.669270834329427, 0.669270834329427, 30), (0.669270834329427, 0.669270834329427, 31), (0.669270834329427, 0.669270834329427, 32), (0.669270834329427, 0.669270834329427, 33), (0.7005208343554686, 0.7005208343554686, 34), (0.669270834329427, 0.669270834329427, 35), (0.669270834329427, 0.669270834329427, 36), (0.669270834329427, 0.669270834329427, 37), (0.7005208343554686, 0.7005208343554686, 38), (0.669270834329427, 0.669270834329427, 39), (0.669270834329427, 0.669270834329427, 40), (0.7005208343554686, 0.7005208343554686, 41), (0.669270834329427, 0.669270834329427, 42), (0.669270834329427, 0.669270834329427, 43), (0.669270834329427, 0.669270834329427, 44), ] values = ut.take_column(items, 0) weights = ut.take_column(items, 1) indices = ut.take_column(items, 2) Example: >>> # ENABLE_DOCTEST >>> from utool.util_alg import * # NOQA >>> import utool as ut >>> items = [(4, 12, 0), (2, 1, 1), (6, 4, 2), (1, 1, 3), (2, 2, 4)] >>> maxweight = 15 >>> total_value, items_subset = knapsack(items, maxweight, method='recursive') >>> total_value1, items_subset1 = knapsack(items, maxweight, method='iterative') >>> result = 'total_value = %.2f\n' % (total_value,) >>> result += 'items_subset = %r' % (items_subset,) >>> ut.assert_eq(total_value1, total_value) >>> ut.assert_eq(items_subset1, items_subset) >>> print(result) total_value = 11.00 items_subset = [(2, 1, 1), (6, 4, 2), (1, 1, 3), (2, 2, 4)] Example: >>> # ENABLE_DOCTEST >>> from utool.util_alg import * # NOQA >>> import utool as ut >>> # Solve https://xkcd.com/287/ >>> weights = [2.15, 2.75, 3.35, 3.55, 4.2, 5.8] * 2 >>> items = [(w, w, i) for i, w in enumerate(weights)] >>> maxweight = 15.05 >>> total_value, items_subset = knapsack(items, maxweight, method='recursive') >>> total_value1, items_subset1 = knapsack(items, maxweight, method='iterative') >>> total_weight = sum([t[1] for t in items_subset]) >>> print('total_weight = %r' % (total_weight,)) >>> result = 'total_value = %.2f' % (total_value,) >>> print('items_subset = %r' % (items_subset,)) >>> print('items_subset1 = %r' % (items_subset1,)) >>> #assert items_subset1 == items_subset, 'NOT EQ\n%r !=\n%r' % (items_subset1, items_subset) >>> print(result) total_value = 15.05 Timeit: >>> import utool as ut >>> setup = ut.codeblock( >>> ''' import utool as ut weights = [215, 275, 335, 355, 42, 58] * 40 items = [(w, w, i) for i, w in enumerate(weights)] maxweight = 2505 #import numba #knapsack_numba = numba.autojit(ut.knapsack_iterative) #knapsack_numba = numba.autojit(ut.knapsack_iterative_numpy) ''') >>> # Test load time >>> stmt_list1 = ut.codeblock( >>> ''' #ut.knapsack_recursive(items, maxweight) ut.knapsack_iterative(items, maxweight) ut.knapsack_ilp(items, maxweight) #knapsack_numba(items, maxweight) #ut.knapsack_iterative_numpy(items, maxweight) ''').split('\n') >>> ut.util_dev.timeit_compare(stmt_list1, setup, int(5))
def commit_pushdb(self, coordinates, postscript=None): self.scm.commit('pants build committing publish data for push of {coordinates}' '{postscript}'.format(coordinates=coordinates, postscript=postscript or ''), verify=self.get_options().verify_commit)
Commit changes to the pushdb with a message containing the provided coordinates.
def file_get_contents(self, path): with open(self.get_full_file_path(path), 'r') as f: return f.read()
Returns contents of file located at 'path', not changing FS so does not require journaling
def getUserInfo(self): userJson = self.httpGet(ReaderUrl.USER_INFO_URL) result = json.loads(userJson, strict=False) self.userId = result['userId'] return result
Returns a dictionary of user info that google stores.
def _learn(connections, rng, learningSegments, activeInput, potentialOverlaps, initialPermanence, sampleSize, permanenceIncrement, permanenceDecrement, maxSynapsesPerSegment): connections.adjustSynapses(learningSegments, activeInput, permanenceIncrement, -permanenceDecrement) if sampleSize == -1: maxNew = len(activeInput) else: maxNew = sampleSize - potentialOverlaps[learningSegments] if maxSynapsesPerSegment != -1: synapseCounts = connections.mapSegmentsToSynapseCounts( learningSegments) numSynapsesToReachMax = maxSynapsesPerSegment - synapseCounts maxNew = np.where(maxNew <= numSynapsesToReachMax, maxNew, numSynapsesToReachMax) connections.growSynapsesToSample(learningSegments, activeInput, maxNew, initialPermanence, rng)
Adjust synapse permanences, grow new synapses, and grow new segments. @param learningActiveSegments (numpy array) @param learningMatchingSegments (numpy array) @param segmentsToPunish (numpy array) @param activeInput (numpy array) @param potentialOverlaps (numpy array)
def read_xml_file(self, xml_file): assert self.__config is not None ffname = self.__file_full_name(xml_file) self.logger.debug("Reading xml file: [%s]", xml_file) decls = self.__dcache.cached_value(ffname, self.__config) if not decls: self.logger.debug("File has not been found in cache, parsing...") decls, _ = self.__parse_xml_file(ffname) self.__dcache.update(ffname, self.__config, decls, []) else: self.logger.debug( "File has not been changed, reading declarations from cache.") return decls
Read generated XML file. :param xml_file: path to xml file :type xml_file: str :rtype: declarations tree
def public_dsn(dsn): m = RE_DSN.match(dsn) if not m: log.error('Unable to parse Sentry DSN') public = '{scheme}://{client_id}@{domain}/{site_id}'.format( **m.groupdict()) return public
Transform a standard Sentry DSN into a public one
def events_log(self, details=False, count=0, timestamp=0): if not count: count = 1 + int(os.environ.get('ALIGNAK_EVENTS_LOG_COUNT', self.app.conf.events_log_count)) count = int(count) timestamp = float(timestamp) logger.debug('Get max %d events, newer than %s out of %d', count, timestamp, len(self.app.recent_events)) res = [] for log in reversed(self.app.recent_events): if timestamp and timestamp > log['timestamp']: break if not count: break if details: res.append(log) else: res.append("%s - %s - %s" % (log['date'], log['level'][0].upper(), log['message'])) logger.debug('Got %d events', len(res)) return res
Get the most recent Alignak events If count is specifies it is the maximum number of events to return. If timestamp is specified, events older than this timestamp will not be returned The arbiter maintains a list of the most recent Alignak events. This endpoint provides this list. The default format is: [ "2018-07-23 15:14:43 - E - SERVICE NOTIFICATION: guest;host_0;dummy_random;CRITICAL;1; notify-service-by-log;Service internal check result: 2", "2018-07-23 15:14:43 - E - SERVICE NOTIFICATION: admin;host_0;dummy_random;CRITICAL;1; notify-service-by-log;Service internal check result: 2", "2018-07-23 15:14:42 - E - SERVICE ALERT: host_0;dummy_critical;CRITICAL;SOFT;1; host_0-dummy_critical-2", "2018-07-23 15:14:42 - E - SERVICE ALERT: host_0;dummy_random;CRITICAL;HARD;2; Service internal check result: 2", "2018-07-23 15:14:42 - I - SERVICE ALERT: host_0;dummy_unknown;UNKNOWN;HARD;2; host_0-dummy_unknown-3" ] If you request on this endpoint with the *details* parameter (whatever its value...), you will get a detailed JSON output: [ { timestamp: 1535517701.1817362, date: "2018-07-23 15:16:35", message: "SERVICE ALERT: host_11;dummy_echo;UNREACHABLE;HARD;2;", level: "info" }, { timestamp: 1535517701.1817362, date: "2018-07-23 15:16:32", message: "SERVICE NOTIFICATION: guest;host_0;dummy_random;OK;0; notify-service-by-log;Service internal check result: 0", level: "info" }, { timestamp: 1535517701.1817362, date: "2018-07-23 15:16:32", message: "SERVICE NOTIFICATION: admin;host_0;dummy_random;OK;0; notify-service-by-log;Service internal check result: 0", level: "info" }, { timestamp: 1535517701.1817362, date: "2018-07-23 15:16:32", message: "SERVICE ALERT: host_0;dummy_random;OK;HARD;2; Service internal check result: 0", level: "info" }, { timestamp: 1535517701.1817362, date: "2018-07-23 15:16:19", message: "SERVICE ALERT: host_11;dummy_random;OK;HARD;2; Service internal check result: 0", level: "info" } ] In this example, only the 5 most recent events are provided whereas the default value is to provide the 100 last events. This default counter may be changed thanks to the ``events_log_count`` configuration variable or ``ALIGNAK_EVENTS_LOG_COUNT`` environment variable. The date format may also be changed thanks to the ``events_date_format`` configuration variable. :return: list of the most recent events :rtype: list
def gen_support_records(transaction_manager, min_support, **kwargs): max_length = kwargs.get('max_length') _create_next_candidates = kwargs.get( '_create_next_candidates', create_next_candidates) candidates = transaction_manager.initial_candidates() length = 1 while candidates: relations = set() for relation_candidate in candidates: support = transaction_manager.calc_support(relation_candidate) if support < min_support: continue candidate_set = frozenset(relation_candidate) relations.add(candidate_set) yield SupportRecord(candidate_set, support) length += 1 if max_length and length > max_length: break candidates = _create_next_candidates(relations, length)
Returns a generator of support records with given transactions. Arguments: transaction_manager -- Transactions as a TransactionManager instance. min_support -- A minimum support (float). Keyword arguments: max_length -- The maximum length of relations (integer).
def mark_failed(self, dispatch, error_log): dispatch.error_log = error_log self._st['failed'].append(dispatch)
Marks a dispatch as failed. Sitemessage won't try to deliver already failed messages. Should be used within send(). :param Dispatch dispatch: a Dispatch :param str error_log: str - error message
def save(self): try: response = requests.post(self._upload_url, auth=self.jss.session.auth, verify=self.jss.session.verify, files=self.resource) except JSSPostError as error: if error.status_code == 409: raise JSSPostError(error) else: raise JSSMethodNotAllowedError(self.__class__.__name__) if response.status_code == 201: if self.jss.verbose: print "POST: Success" print response.text.encode("utf-8") elif response.status_code >= 400: error_handler(JSSPostError, response)
POST the object to the JSS.
def time_to_sec(time_str: str) -> int: total_sec = 0 if '-' in time_str: days, time_str = time_str.split('-') total_sec += (int(days) * 24 * 60 * 60) hours_min_raw = time_str.split(':')[:-1] time_parts = [int(round(float(val))) for val in hours_min_raw] total_sec += time_parts[-1] * 60 if len(time_parts) > 1: total_sec += time_parts[-2] * 60 * 60 return total_sec
Convert time in string format to seconds. Skipping seconds since sometimes the last column is truncated for entries where >10 days.
def register_post_processor(func): global POST_PROCESSORS key = func.__name__ POST_PROCESSORS[key] = func return func
Register a post processor function to be run as the final step in serialization. The data passed in will already have gone through the sideloading processor. Usage: @register_post_processor def my_post_processor(data): # do stuff with `data` return data
def ConsultarRemito(self, cod_remito=None, id_req=None, tipo_comprobante=None, punto_emision=None, nro_comprobante=None): "Obtener los datos de un remito generado" print(self.client.help("consultarRemito")) response = self.client.consultarRemito( authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit}, codRemito=cod_remito, idReq=id_req, tipoComprobante=tipo_comprobante, puntoEmision=punto_emision, nroComprobante=nro_comprobante) ret = response.get("consultarRemitoReturn", {}) id_req = ret.get("idReq", 0) self.remito = rec = ret.get("remito", {}) self.__analizar_errores(ret) self.__analizar_observaciones(ret) self.__analizar_evento(ret) self.AnalizarRemito(rec) return id_req
Obtener los datos de un remito generado
def plot(config, image, file): image = np.squeeze(image) print(file, image.shape) imsave(file, image)
Plot a single CIFAR image.
def find_by_username(self, username): data = (db.select(self.table).select('username', 'email', 'real_name', 'password', 'bio', 'status', 'role', 'uid'). condition('username', username).execute() ) if data: return self.load(data[0], self.model)
Return user by username if find in database otherwise None
def _mask_space(self, data): geomask = get_geostationary_mask(area=self.area) return data.where(geomask)
Mask space pixels
def template_name(self, path, base): if not base: path = os.path.basename(path) if path == base: base = os.path.dirname(path) name = re.sub(r"^%s[\/\\]?(.*)%s$" % ( re.escape(base), re.escape(settings.TEMPLATE_EXT) ), r"\1", path) return re.sub(r"[\/\\]", settings.TEMPLATE_SEPARATOR, name)
Find out the name of a JS template
def _scrub_method_name(self, method_name): if method_name not in self._scrubbed_method_names: self._scrubbed_method_names[method_name] = ( scrub_method_name(method_name)) return self._scrubbed_method_names[method_name]
Scrubs a method name, returning result from local cache if available. This method wraps fitparse.utils.scrub_method_name and memoizes results, as scrubbing a method name is expensive. Args: method_name: Method name to scrub. Returns: Scrubbed method name.
def fit_cmd(argv=sys.argv[1:]): arguments = docopt(fit_cmd.__doc__, argv=argv) no_save = arguments['--no-save'] no_activate = arguments['--no-activate'] save_if_better_than = arguments['--save-if-better-than'] evaluate = arguments['--evaluate'] or bool(save_if_better_than) if save_if_better_than is not None: save_if_better_than = float(save_if_better_than) initialize_config(__mode__='fit') fit( persist=not no_save, activate=not no_activate, evaluate=evaluate, persist_if_better_than=save_if_better_than, )
\ Fit a model and save to database. Will use 'dataset_loader_train', 'model', and 'model_perister' from the configuration file, to load a dataset to train a model with, and persist it. Usage: pld-fit [options] Options: -n --no-save Don't persist the fitted model to disk. --no-activate Don't activate the fitted model. --save-if-better-than=<k> Persist only if test score better than given value. -e --evaluate Evaluate fitted model on train and test set and print out results. -h --help Show this screen.
def wrapped_object(self, LayoutClass, fields, *args, **kwargs): if args: if isinstance(fields, list): fields = tuple(fields) else: fields = (fields,) if LayoutClass in self.args_first: arguments = args + fields else: arguments = fields + args return LayoutClass(*arguments, **kwargs) else: if isinstance(fields, list): return LayoutClass(*fields, **kwargs) else: return LayoutClass(fields, **kwargs)
Returns a layout object of type `LayoutClass` with `args` and `kwargs` that wraps `fields` inside.
def clean_history(self, widget, event=None): self.history_tree_store.clear() selected_sm_m = self.model.get_selected_state_machine_model() if selected_sm_m: if state_machine_execution_engine.finished_or_stopped(): selected_sm_m.state_machine.destroy_execution_histories() self.update()
Triggered when the 'Clean History' button is clicked. Empties the execution history tree by adjusting the start index and updates tree store and view.
def initialize_simulation_from_model_specification(model_specification_file: str) -> InteractiveContext: model_specification = build_model_specification(model_specification_file) plugin_config = model_specification.plugins component_config = model_specification.components simulation_config = model_specification.configuration plugin_manager = PluginManager(plugin_config) component_config_parser = plugin_manager.get_plugin('component_configuration_parser') components = component_config_parser.get_components(component_config) return InteractiveContext(simulation_config, components, plugin_manager)
Construct a simulation from a model specification file. The simulation context returned by this method still needs to be setup by calling its setup method. It is mostly useful for testing and debugging. Parameters ---------- model_specification_file The path to a model specification file. Returns ------- An initialized (but not set up) simulation context.
def _listdir(pth, extensions): try: return [fname for fname in os.listdir(pth) if os.path.splitext(fname)[1] in extensions] except OSError: pass
Non-raising listdir.
def transformer_base_vq_ada_32ex_packed(): hparams = transformer_base_v2() expert_utils.update_hparams_for_vq_gating(hparams) hparams.moe_num_experts = 32 hparams.gating_type = "vq" hparams.batch_size = 5072 hparams.ffn_layer = "local_moe" hparams.shared_embedding_and_softmax_weights = False hparams.learning_rate_warmup_steps = 10000 hparams.learning_rate_decay_steps = 27200 hparams.num_heads = 4 hparams.num_blocks = 1 hparams.moe_k = 1 hparams.num_decoder_layers = 6 hparams.label_smoothing = 0. hparams.layer_prepostprocess_dropout = 0.1 hparams.layer_postprocess_sequence = "dan" hparams.layer_preprocess_sequence = "none" hparams.weight_decay = 1e-06 hparams.attention_dropout = 0.1 hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "linear_warmup*rsqrt_decay*linear_decay" hparams.activation_dtype = "float32" hparams.learning_rate = 0.1 hparams.learning_rate_constant = 1.0 return hparams
Set of hyperparameters for lm1b packed following tpu params.
def jinja_block_as_fragment_extension(name, tagname=None, classname=None): if tagname is None: tagname = name if classname is None: classname = "%sBlockFragmentExtension" % name.capitalize() return type(classname, (BaseJinjaBlockAsFragmentExtension,), { "tags": set([tagname]), "end_tag": "end" + tagname, "block_name": name})
Creates a fragment extension which will just act as a replacement of the block statement.
def site_url(url): base_url = 'http://%s' % socket.gethostname() if server.port is not 80: base_url += ':%d' % server.port return urlparse.urljoin(base_url, url)
Determine the server URL.
def records(self): output = tempfile.NamedTemporaryFile(suffix='.json') try: log.info("Loading table from (%s)...", self._obj) shutil.copyfileobj(self.fh(), output) output.seek(0) for line in output.file: yield json.loads(line, object_hook=json_hook) finally: try: output.close() except: pass
Get each record that has been stored in the table.
def multipoint(self, points): shapeType = MULTIPOINT points = [points] self._shapeparts(parts=points, shapeType=shapeType)
Creates a MULTIPOINT shape. Points is a list of xy values.
def _canonicalize_name(prefix, qvm_type, noisy): if noisy: noise_suffix = '-noisy' else: noise_suffix = '' if qvm_type is None: qvm_suffix = '' elif qvm_type == 'qvm': qvm_suffix = '-qvm' elif qvm_type == 'pyqvm': qvm_suffix = '-pyqvm' else: raise ValueError(f"Unknown qvm_type {qvm_type}") name = f'{prefix}{noise_suffix}{qvm_suffix}' return name
Take the output of _parse_name to create a canonical name.
def main(self, args): if args.action: self.runcmd(args.action, args.arguments) else: self.cmdloop()
Run a single command, or else the main shell loop. `args` should be the :class:`argparse.Namespace` object after being set up via :meth:`add_arguments`.
def elementMaker(name, *children, **attrs): formattedAttrs = ' '.join('{}={}'.format(key, _gvquote(str(value))) for key, value in sorted(attrs.items())) formattedChildren = ''.join(children) return u'<{name} {attrs}>{children}</{name}>'.format( name=name, attrs=formattedAttrs, children=formattedChildren)
Construct a string from the HTML element description.
def logger(): scriptlogger = logging.getLogger(__program__) if not scriptlogger.hasHandlers(): scriptlogger.setLevel(logging.INFO) fmt = '%(name)s:%(levelname)s: %(message)s' streamhandler = logging.StreamHandler() streamhandler.setFormatter(logging.Formatter(fmt)) scriptlogger.addHandler(streamhandler)
Configure program logger.
def render(self, template_name, __data=None, **kw): return self.template.render(template_name, **self._vars(__data, **kw))
Given a template name and template data. Renders a template and returns as string
def replace(self, to_replace, value, inplace=False, filter=None, regex=False, convert=True): inplace = validate_bool_kwarg(inplace, 'inplace') original_to_replace = to_replace try: values, to_replace = self._try_coerce_args(self.values, to_replace) mask = missing.mask_missing(values, to_replace) if filter is not None: filtered_out = ~self.mgr_locs.isin(filter) mask[filtered_out.nonzero()[0]] = False blocks = self.putmask(mask, value, inplace=inplace) if convert: blocks = [b.convert(by_item=True, numeric=False, copy=not inplace) for b in blocks] return blocks except (TypeError, ValueError): if is_object_dtype(self): raise block = self.astype(object) return block.replace(to_replace=original_to_replace, value=value, inplace=inplace, filter=filter, regex=regex, convert=convert)
replace the to_replace value with value, possible to create new blocks here this is just a call to putmask. regex is not used here. It is used in ObjectBlocks. It is here for API compatibility.
def Platform(name = platform_default()): module = platform_module(name) spec = PlatformSpec(name, module.generate) return spec
Select a canned Platform specification.
def jamo_to_hangul(lead, vowel, tail=''): lead = hcj_to_jamo(lead, "lead") vowel = hcj_to_jamo(vowel, "vowel") if not tail or ord(tail) == 0: tail = None elif is_hcj(tail): tail = hcj_to_jamo(tail, "tail") if (is_jamo(lead) and get_jamo_class(lead) == "lead") and\ (is_jamo(vowel) and get_jamo_class(vowel) == "vowel") and\ ((not tail) or (is_jamo(tail) and get_jamo_class(tail) == "tail")): result = _jamo_to_hangul_char(lead, vowel, tail) if is_hangul_char(result): return result raise InvalidJamoError("Could not synthesize characters to Hangul.", '\x00')
Return the Hangul character for the given jamo input. Integers corresponding to U+11xx jamo codepoints, U+11xx jamo characters, or HCJ are valid inputs. Outputs a one-character Hangul string. This function is identical to j2h.
def sample_string(self, individual=-1): base = str(self) extra = self.get_sample_info(individual=individual) extra = [':'.join([str(j) for j in i]) for i in zip(*extra.values())] return '\t'.join([base, '\t'.join(extra)])
Returns the VCF entry as it appears in the vcf file
def get_ladders_metadata(session, parsed): ladders = {} for ladder in parsed.find_all('a', href=re.compile(LADDER_URL_REGEX)): ladders[ladder.text] = get_ladder_metadata(session, ladder['href']) return ladders
Get metadata for all ladders.
def find_pair(self, crypto="", fiat="", verbose=False): self.fetch_pairs() if not crypto and not fiat: raise Exception("Fiat or Crypto required") def is_matched(crypto, fiat, pair): if crypto and not fiat: return pair.startswith("%s-" % crypto) if crypto and fiat: return pair == "%s-%s" % (crypto, fiat) if not crypto: return pair.endswith("-%s" % fiat) matched_pairs = {} for Service, pairs in self._all_pairs.items(): matched = [p for p in pairs if is_matched(crypto, fiat, p)] if matched: matched_pairs[Service] = matched return matched_pairs
This utility is used to find an exchange that supports a given exchange pair.
def _handle_key_value(t_dict, key, value): if key in t_dict: val = t_dict[key] if isinstance(val, str): val = [val] val.append(value) return val return value
Function to handle key has multi value, and return the values as list.
def payment(self, origin, destination, amount): if type(amount) != Decimal: amount = Decimal(amount) if amount <= 0: raise Exception("Amount must be a positive number") all_addresses = [] accounts = self.listaccounts() if origin in accounts: if destination in accounts: with self.openwallet(): result = self.move(origin, destination, amount) return self.record_tx(origin, None, amount, result, destination) for account in accounts: addresses = self.getaddressesbyaccount(account) if destination in addresses: with self.openwallet(): result = self.move(origin, account, amount) return self.record_tx(origin, destination, amount, result, account) else: with self.openwallet(): txhash = self.sendfrom(origin, destination, amount) return self.record_tx(origin, destination, amount, txhash)
Convenience method for sending Bitcoins. Send coins from origin to destination. Calls record_tx to log the transaction to database. Uses free, instant "move" transfers if addresses are both local (in the same wallet), and standard "sendfrom" transactions otherwise. The sender is required to be specified by user_id (account label); however, the recipient can be specified either by Bitcoin address (anyone) or user_id (if the user is local). Payment tries sending Bitcoins in this order: 1. "move" from account to account (local) 2. "move" from account to address (local) 3. "sendfrom" account to address (broadcast) Args: origin (str): user_id of the sender destination (str): coin address or user_id of the recipient amount (str, Decimal, number): amount to send Returns: bool: True if successful, False otherwise
def get_mode(self, old_mode=None): if self.mode is not None: return self.mode assert self.can_write, "This format does not have a supported output mode." if old_mode is None: return self.output_modes[0] if old_mode in self.output_modes: return old_mode try: idx = PILLOW_MODES.index(old_mode) except ValueError: return self.output_modes[0] for mode in PILLOW_MODES[idx+1:]: if mode in self.output_modes: return mode opposite = PILLOW_MODES[:idx] opposite.reverse() for mode in opposite: if mode in self.output_modes: return mode
Returns output mode. If `mode` not set it will try to guess best mode, or next best mode comparing to old mode
def match(uidentities, matcher, fastmode=False): if not isinstance(matcher, IdentityMatcher): raise TypeError("matcher is not an instance of IdentityMatcher") if fastmode: try: matcher.matching_criteria() except NotImplementedError: name = "'%s (fast mode)'" % matcher.__class__.__name__.lower() raise MatcherNotSupportedError(matcher=name) filtered, no_filtered, uuids = \ _filter_unique_identities(uidentities, matcher) if not fastmode: matched = _match(filtered, matcher) else: matched = _match_with_pandas(filtered, matcher) matched = _build_matches(matched, uuids, no_filtered, fastmode) return matched
Find matches in a set of unique identities. This function looks for possible similar or equal identities from a set of unique identities. The result will be a list of subsets where each subset is a list of matching identities. When `fastmode` is set a new and experimental matching algorithm will be used. It consumes more resources (a big amount of memory) but it is, at least, two orders of maginute faster than the classic algorithm. :param uidentities: list of unique identities to match :param matcher: instance of the matcher :param fastmode: use a faster algorithm :returns: a list of subsets with the matched unique identities :raises MatcherNotSupportedError: when matcher does not support fast mode matching :raises TypeError: when matcher is not an instance of IdentityMatcher class
def seek(self, n): if self._mode != "r": raise UnsupportedOperation("not available in 'w' mode") if 0 <= n < self._nb_markers: self._n = n self._bed.seek(self._get_seek_position(n)) else: raise ValueError("invalid position in BED: {}".format(n))
Gets to a certain marker position in the BED file. Args: n (int): The index of the marker to seek to.
def from_bytes_list(cls, function_descriptor_list): assert isinstance(function_descriptor_list, list) if len(function_descriptor_list) == 0: return FunctionDescriptor.for_driver_task() elif (len(function_descriptor_list) == 3 or len(function_descriptor_list) == 4): module_name = ensure_str(function_descriptor_list[0]) class_name = ensure_str(function_descriptor_list[1]) function_name = ensure_str(function_descriptor_list[2]) if len(function_descriptor_list) == 4: return cls(module_name, function_name, class_name, function_descriptor_list[3]) else: return cls(module_name, function_name, class_name) else: raise Exception( "Invalid input for FunctionDescriptor.from_bytes_list")
Create a FunctionDescriptor instance from list of bytes. This function is used to create the function descriptor from backend data. Args: cls: Current class which is required argument for classmethod. function_descriptor_list: list of bytes to represent the function descriptor. Returns: The FunctionDescriptor instance created from the bytes list.
def _parse_qualimap_coverage(table): out = {} for row in table.find_all("tr"): col, val = [x.text for x in row.find_all("td")] if col == "Mean": out["Coverage (Mean)"] = val return out
Parse summary qualimap coverage metrics.
def angle(self, deg=False): if self.dtype.str[1] != 'c': warnings.warn('angle() is intended for complex-valued timeseries', RuntimeWarning, 1) da = distob.vectorize(np.angle)(self, deg) return _dts_from_da(da, self.tspan, self.labels)
Return the angle of a complex Timeseries Args: deg (bool, optional): Return angle in degrees if True, radians if False (default). Returns: angle (Timeseries): The counterclockwise angle from the positive real axis on the complex plane, with dtype as numpy.float64.
def _leaf_stmt(self, stmt: Statement, sctx: SchemaContext) -> None: node = LeafNode() node.type = DataType._resolve_type( stmt.find1("type", required=True), sctx) self._handle_child(node, stmt, sctx)
Handle leaf statement.
def _tupleCompare(tuple1, ineq, tuple2, eq=lambda a,b: (a==b), ander=AND, orer=OR): orholder = [] for limit in range(len(tuple1)): eqconstraint = [ eq(elem1, elem2) for elem1, elem2 in zip(tuple1, tuple2)[:limit]] ineqconstraint = ineq(tuple1[limit], tuple2[limit]) orholder.append(ander(*(eqconstraint + [ineqconstraint]))) return orer(*orholder)
Compare two 'in-database tuples'. Useful when sorting by a compound key and slicing into the middle of that query.
def _body(self, paragraphs): body = [] for i in range(paragraphs): paragraph = self._paragraph(random.randint(1, 10)) body.append(paragraph) return '\n'.join(body)
Generate a body of text
def iter_content(self, chunk_size=1, decode_unicode=False): def generate(): try: try: for chunk in self.raw.stream(chunk_size, decode_content=True): yield chunk except ProtocolError as e: raise ChunkedEncodingError(e) except DecodeError as e: raise ContentDecodingError(e) except ReadTimeoutError as e: raise ConnectionError(e) except AttributeError: while True: chunk = self.raw.read(chunk_size) if not chunk: break yield chunk self._content_consumed = True if self._content_consumed and isinstance(self._content, bool): raise StreamConsumedError() reused_chunks = iter_slices(self._content, chunk_size) stream_chunks = generate() chunks = reused_chunks if self._content_consumed else stream_chunks if decode_unicode: chunks = stream_decode_response_unicode(chunks, self) return chunks
Iterates over the response data. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. The chunk size is the number of bytes it should read into memory. This is not necessarily the length of each item returned as decoding can take place. If decode_unicode is True, content will be decoded using the best available encoding based on the response.
def cleanup_temporary_directories(self): while self.build_directories: shutil.rmtree(self.build_directories.pop()) for requirement in self.reported_requirements: requirement.remove_temporary_source() while self.eggs_links: symbolic_link = self.eggs_links.pop() if os.path.islink(symbolic_link): os.unlink(symbolic_link)
Delete the build directories and any temporary directories created by pip.
def _relay_data(self): "relay any data we have" if self._data: d = self._data self._data = b'' self._sender.dataReceived(d)
relay any data we have
def get_key(self, section, key): LOGGER.debug("> Retrieving '{0}' in '{1}' section.".format(key, section)) self.__settings.beginGroup(section) value = self.__settings.value(key) LOGGER.debug("> Key value: '{0}'.".format(value)) self.__settings.endGroup() return value
Gets key value from settings file. :param section: Current section to retrieve key from. :type section: unicode :param key: Current key to retrieve. :type key: unicode :return: Current key value. :rtype: object