code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def hill_climbing_stochastic(problem, iterations_limit=0, viewer=None): return _local_search(problem, _random_best_expander, iterations_limit=iterations_limit, fringe_size=1, stop_when_no_better=iterations_limit==0, viewer=viewer)
Stochastic hill climbing. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.value.
def init_common(app): if app.config['USERPROFILES_EXTEND_SECURITY_FORMS']: security_ext = app.extensions['security'] security_ext.confirm_register_form = confirm_register_form_factory( security_ext.confirm_register_form) security_ext.register_form = register_form_factory( security_ext.register_form)
Post initialization.
def detect_current_filename(): import inspect filename = None frame = inspect.currentframe() try: while frame.f_back and frame.f_globals.get('name') != '__main__': frame = frame.f_back filename = frame.f_globals.get('__file__') finally: del frame return filename
Attempt to return the filename of the currently running Python process Returns None if the filename cannot be detected.
def _sd_of_runs(stats, mean, key='runs'): num_runs = len(stats[key]) first = stats[key][0] standard_deviation = {} for stat_key in first: if isinstance(first[stat_key], numbers.Number): standard_deviation[stat_key] = math.sqrt( sum((run[stat_key] - mean[stat_key])**2 for run in stats[key]) / float(num_runs)) return standard_deviation
Obtain the standard deviation of stats. Args: stats: dict; A set of stats, structured as above. mean: dict; Mean for each key in stats. key: str; Optional key to determine where list of runs is found in stats
def get_roots(self): id_list = [] for r in self._rls.get_relationships_by_genus_type_for_source(self._phantom_root_id, self._relationship_type): id_list.append(r.get_destination_id()) return IdList(id_list)
Gets the root nodes of this hierarchy. return: (osid.id.IdList) - the root nodes raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def use_plenary_vault_view(self): self._vault_view = PLENARY for session in self._get_provider_sessions(): try: session.use_plenary_vault_view() except AttributeError: pass
Pass through to provider AuthorizationVaultSession.use_plenary_vault_view
def tail(self) -> 'List': lambda_list = self._get_value() return List(lambda_list(lambda _, tail: tail))
Return tail of List.
def pprint(self, index=False, delimiter='-'): lines = _build_tree_string(self, 0, index, delimiter)[0] print('\n' + '\n'.join((line.rstrip() for line in lines)))
Pretty-print the binary tree. :param index: If set to True (default: False), display level-order_ indexes using the format: ``{index}{delimiter}{value}``. :type index: bool :param delimiter: Delimiter character between the node index and the node value (default: '-'). :type delimiter: str | unicode **Example**: .. doctest:: >>> from binarytree import Node >>> >>> root = Node(1) # index: 0, value: 1 >>> root.left = Node(2) # index: 1, value: 2 >>> root.right = Node(3) # index: 2, value: 3 >>> root.left.right = Node(4) # index: 4, value: 4 >>> >>> root.pprint() <BLANKLINE> __1 / \\ 2 3 \\ 4 <BLANKLINE> >>> root.pprint(index=True) # Format: {index}-{value} <BLANKLINE> _____0-1_ / \\ 1-2_ 2-3 \\ 4-4 <BLANKLINE> .. note:: If you do not need level-order_ indexes in the output string, use :func:`binarytree.Node.__str__` instead. .. _level-order: https://en.wikipedia.org/wiki/Tree_traversal#Breadth-first_search
def emit(self, record): try: self._callback(self.prepare(record)) except Exception: self.handleError(record)
Send a LogRecord to the callback function, after preparing it for serialization.
def inspect(self): return dedent( ).format( dtype=self.dtype.name, data=self.data, adjustments=self.adjustments, )
Return a string representation of the data stored in this array.
def usage(self): return u' '.join(u'<%s>' % pattern.usage for pattern in self.patterns)
A usage string that describes the signature.
def numpy_bins_with_mask(self) -> Tuple[np.ndarray, np.ndarray]: bwm = to_numpy_bins_with_mask(self.bins) if not self.includes_right_edge: bwm[0].append(np.inf) return bwm
Bins in the numpy format, including the gaps in inconsecutive binnings. Returns ------- edges, mask: np.ndarray See Also -------- bin_utils.to_numpy_bins_with_mask
def first_sunday(self, year, month): date = datetime(year, month, 1, 0) days_until_sunday = 6 - date.weekday() return date + timedelta(days=days_until_sunday)
Get the first sunday of a month.
def vproj(a, b): a = stypes.toDoubleVector(a) b = stypes.toDoubleVector(b) vout = stypes.emptyDoubleVector(3) libspice.vproj_c(a, b, vout) return stypes.cVectorToPython(vout)
Find the projection of one vector onto another vector. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vproj_c.html :param a: The vector to be projected. :type a: 3-Element Array of floats :param b: The vector onto which a is to be projected. :type b: 3-Element Array of floats :return: The projection of a onto b. :rtype: 3-Element Array of floats
def replication_details(host=None, core_name=None): ret = _get_return_dict() if _get_none_or_value(core_name) is None: success = True for name in __opts__['solr.cores']: resp = _replication_request('details', host=host, core_name=name) data = {name: {'data': resp['data']}} ret = _update_return_dict(ret, success, data, resp['errors'], resp['warnings']) else: resp = _replication_request('details', host=host, core_name=core_name) if resp['success']: ret = _update_return_dict(ret, resp['success'], resp['data'], resp['errors'], resp['warnings']) else: return resp return ret
Get the full replication details. host : str (None) The solr host to query. __opts__['host'] is default. core_name : str (None) The name of the solr core if using cores. Leave this blank if you are not using cores or if you want to check all cores. Return : dict<str,obj>:: {'success':boolean, 'data':dict, 'errors':list, 'warnings':list} CLI Example: .. code-block:: bash salt '*' solr.replication_details music
def site_occupation_statistics( self ): if self.time == 0.0: return None occupation_stats = { label : 0.0 for label in self.site_labels } for site in self.sites: occupation_stats[ site.label ] += site.time_occupied for label in self.site_labels: occupation_stats[ label ] /= self.time return occupation_stats
Average site occupation for each site type Args: None Returns: (Dict(Str:Float)): Dictionary of occupation statistics, e.g.:: { 'A' : 2.5, 'B' : 25.3 }
def clear(self): super(Router, self).clear() for name in self.todo: filename = os.path.join(self.migrate_dir, name + '.py') os.remove(filename)
Remove migrations from fs.
def descendents(self): for c in self.children: yield c for d in c.descendents: yield d
Iterate over all descendent terms
def generate_api_for_source(self, source_fpath: str): content = self.convert_content(source_fpath) if content is None: return dest_fpath = self.dest_fpath(source_fpath) self.create_fpath_dir(dest_fpath) with open(dest_fpath, 'w+') as dest_f: json.dump(content, dest_f, cls=DateTimeJsonEncoder)
Generate end json api file with directory structure for concrete source file.
def get_context(self, name, value, attrs): context = super().get_context( name, value, attrs) context['widget']['attrs']['dp_config'] = json_dumps(self.config) return context
Return widget context dictionary.
def _write_abstract_named_entity(self): filename = "%sAbstractNamedEntity.js" % (self._class_prefix) superclass_name = "%sEntity" % (self._class_prefix) self.write(destination = self.abstract_directory, filename = filename, template_name = "abstract_named_entity.js.tpl", class_prefix = self._class_prefix, superclass_name = superclass_name)
This method generates AbstractNamedEntity class js file.
def compress_flood_fill_regions(targets): t = RegionCoreTree() for (x, y), cores in iteritems(targets): for p in cores: t.add_core(x, y, p) return sorted(t.get_regions_and_coremasks())
Generate a reduced set of flood fill parameters. Parameters ---------- targets : {(x, y) : set([c, ...]), ...} For each used chip a set of core numbers onto which an application should be loaded. E.g., the output of :py:func:`~rig.place_and_route.util.build_application_map` when indexed by an application. Yields ------ (region, core mask) Pair of integers which represent a region of a SpiNNaker machine and a core mask of selected cores within that region for use in flood-filling an application. `region` and `core_mask` are both integer representations of bit fields that are understood by SCAMP. The pairs are yielded in an order suitable for direct use with SCAMP's flood-fill core select (FFCS) method of loading.
def clone(cls, name, vhost, directory, origin): paas_info = cls.info(name) if 'php' in paas_info['type'] and not vhost: cls.error('PHP instances require indicating the VHOST to clone ' 'with --vhost <vhost>') paas_access = '%s@%s' % (paas_info['user'], paas_info['git_server']) remote_url = 'ssh+git://%s/%s.git' % (paas_access, vhost) command = 'git clone %s %s --origin %s' \ % (remote_url, directory, origin) init_git = cls.execute(command) if init_git: cls.echo('Use `git push %s master` to push your code to the ' 'instance.' % (origin)) cls.echo('Then `$ gandi deploy` to build and deploy your ' 'application.')
Clone a PaaS instance's vhost into a local git repository.
def findspans(self, type,set=None): if issubclass(type, AbstractAnnotationLayer): layerclass = type else: layerclass = ANNOTATIONTYPE2LAYERCLASS[type.ANNOTATIONTYPE] e = self while True: if not e.parent: break e = e.parent for layer in e.select(layerclass,set,False): for e2 in layer: if isinstance(e2, AbstractSpanAnnotation): if self in e2.wrefs(): yield e2
Find span annotation of the specified type that include this word
def get_vip_request_details(self, vip_request_id): uri = 'api/v3/vip-request/details/%s/' % vip_request_id return super(ApiVipRequest, self).get(uri)
Method to get details of vip request param vip_request_id: vip_request id
def load_with_datetime(pairs): d = {} for k, v in pairs: if isinstance(v, basestring): try: d[k] = dateutil.parser.parse(v) except ValueError: d[k] = v else: d[k] = v return d
Deserialize JSON into python datetime objects.
def ReconcileShadow(self, store_type): for k, v in iteritems(self.entry): if v.pw_entry.store == store_type: shadow_entry = self.shadow.get(k) if shadow_entry is not None: v.pw_entry = shadow_entry else: v.pw_entry.store = "UNKNOWN"
Verify that entries that claim to use shadow files have a shadow entry. If the entries of the non-shadowed file indicate that a shadow file is used, check that there is actually an entry for that file in shadow. Args: store_type: The type of password store that should be used (e.g. /etc/shadow or /etc/gshadow)
def download_file(url, local_filename): local_filename = os.path.abspath(local_filename) path = os.path.dirname(local_filename) if not os.path.isdir(path): os.makedirs(path) with tqdm(unit='B', unit_scale=True) as progress: def report(chunk, chunksize, total): progress.total = total progress.update(chunksize) return urlretrieve(url, local_filename, reporthook=report)
Simple wrapper around urlretrieve that uses tqdm to display a progress bar of download progress
def _confirm_overwrite(filename): message = '{}Would you like to overwrite the contents of {} (y/[n])? '.format( c.Fore.MAGENTA, filename ) response = raw_input(message) response = response.lower() if response in ['y', 'yes']: return True return False
Confirm overwrite of template files. Make sure the user would like to continue downloading a file which will overwrite a file in the current directory. Args: filename (str): The name of the file to overwrite. Returns: bool: True if the user specifies a "yes" response.
def parse_style_rules(styles: str) -> CSSRuleList: rules = CSSRuleList() for m in _style_rule_re.finditer(styles): rules.append(CSSStyleRule(m.group(1), parse_style_decl(m.group(2)))) return rules
Make CSSRuleList object from style string.
def error_for(response): klass = error_classes.get(response.status) if klass is None: if 400 <= response.status < 500: klass = ClientError if 500 <= response.status < 600: klass = ServerError return klass(response)
Return the appropriate initialized exception class for a response.
def fetch_message(self, msgnum): self.imap_connect() status, data = self.mailbox.fetch(msgnum, "(RFC822)") self.imap_disconnect() for response_part in data: if isinstance(response_part, tuple): return email.message_from_string(response_part[1])
Given a message number, return the Email.Message object. @Params msgnum - message number to find @Returns Email.Message object for the given message number
def keystroke_model(): model = Pohmm(n_hidden_states=2, init_spread=2, emissions=['lognormal', 'lognormal'], smoothing='freq', init_method='obs', thresh=1) return model
Generates a 2-state model with lognormal emissions and frequency smoothing
def get_runs(): data = current_app.config["data"] draw = parse_int_arg("draw", 1) start = parse_int_arg("start", 0) length = parse_int_arg("length", -1) length = length if length >= 0 else None order_column = request.args.get("order[0][column]") order_dir = request.args.get("order[0][dir]") query = parse_query_filter() if order_column is not None: order_column = \ request.args.get("columns[%d][name]" % int(order_column)) if order_column == "hostname": order_column = "host.hostname" runs = data.get_run_dao().get_runs( start=start, limit=length, sort_by=order_column, sort_direction=order_dir, query=query) records_total = runs.count() records_filtered = runs.count() return Response(render_template( "api/runs.js", runs=runs, draw=draw, recordsTotal=records_total, recordsFiltered=records_filtered), mimetype="application/json")
Get all runs, sort it and return a response.
def _driver_signing_reg_conversion(cls, val, **kwargs): log.debug('we have %s for the driver signing value', val) if val is not None: _val = val.split(',') if len(_val) == 2: if _val[1] == '0': return 'Silently Succeed' elif _val[1] == '1': return 'Warn but allow installation' elif _val[1] == '2': return 'Do not allow installation' elif _val[1] == 'Not Defined': return 'Not Defined' else: return 'Invalid Value' else: return 'Not Defined' else: return 'Not Defined'
converts the binary value in the registry for driver signing into the correct string representation
def inverse(self, vector, duration=None): ann = jams.Annotation(namespace=self.namespace, duration=duration) if duration is None: duration = 0 ann.append(time=0, duration=duration, value=vector) return ann
Inverse vector transformer
def get_sequence_rule_admin_session_for_bank(self, bank_id): if not self.supports_sequence_rule_admin(): raise errors.Unimplemented() return sessions.SequenceRuleAdminSession(bank_id, runtime=self._runtime)
Gets the ``OsidSession`` associated with the sequence rule administration service for the given bank. arg: bank_id (osid.id.Id): the ``Id`` of the ``Bank`` return: (osid.assessment.authoring.SequenceRuleAdminSession) - a ``SequenceRuleAdminSession`` raise: NotFound - no ``Bank`` found by the given ``Id`` raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_sequence_rule_admin()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_sequence_rule_admin()`` and ``supports_visible_federation()`` are ``true``.*
def imageMsg2Image(img, bridge): image = Image() image.width = img.width image.height = img.height image.format = "RGB8" image.timeStamp = img.header.stamp.secs + (img.header.stamp.nsecs *1e-9) cv_image=0 if (img.encoding == "32FC1"): gray_img_buff = bridge.imgmsg_to_cv2(img, "32FC1") cv_image = depthToRGB8(gray_img_buff) else: cv_image = bridge.imgmsg_to_cv2(img, "rgb8") image.data = cv_image return image
Translates from ROS Image to JderobotTypes Image. @param img: ROS Image to translate @param bridge: bridge to do translation @type img: sensor_msgs.msg.Image @type brige: CvBridge @return a JderobotTypes.Image translated from img
def handle_err(self, exc): if self.reset_rx_on_error and isinstance(exc[1], (RxSyncError, EightbTenbError)): self.fifo_readout.print_readout_status() self.fifo_readout.reset_rx() else: if not self.abort_run.is_set(): self.abort(msg=exc[1].__class__.__name__ + ": " + str(exc[1])) self.err_queue.put(exc)
Handling of Exceptions. Parameters ---------- exc : list, tuple Information of the exception of the format (type, value, traceback). Uses the return value of sys.exc_info().
def proxy_callback(request): pgtIou = request.GET.get('pgtIou') tgt = request.GET.get('pgtId') if not (pgtIou and tgt): logger.info('No pgtIou or tgt found in request.GET') return HttpResponse('No pgtIOO', content_type="text/plain") try: PgtIOU.objects.create(tgt=tgt, pgtIou=pgtIou, created=datetime.datetime.now()) request.session['pgt-TICKET'] = pgtIou return HttpResponse('PGT ticket is: {ticket}'.format(ticket=pgtIou), content_type="text/plain") except Exception as e: logger.warning('PGT storage failed. {message}'.format( message=e )) return HttpResponse('PGT storage failed for {request}'.format(request=str(request.GET)), content_type="text/plain")
Handles CAS 2.0+ XML-based proxy callback call. Stores the proxy granting ticket in the database for future use. NB: Use created and set it in python in case database has issues with setting up the default timestamp value
def __set_method(self, value): if value not in [DELIVERY_METHOD_EMAIL, DELIVERY_METHOD_SMS, DELIVERY_METHOD_SNAILMAIL]: raise ValueError("Invalid deliveries method '%s'" % value) self.__method = value
Sets the method to use. @param value: str
def save(self): storage = get_media_storage() for storage_name in self.cleaned_data['selected_files']: full_path = storage.path(storage_name) try: storage.delete(storage_name) self.success_files.append(full_path) except OSError: self.error_files.append(full_path)
Deletes the selected files from storage
def protectbranch(self, project_id, branch): request = requests.put( '{0}/{1}/repository/branches/{2}/protect'.format(self.projects_url, project_id, branch), headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout) if request.status_code == 200: return True else: return False
Protect a branch from changes :param project_id: project id :param branch: branch id :return: True if success
def __remove_category(self, id): affected_query = affected_ids = [res[0] for res in self.fetchall(affected_query, (id,))] update = "update activities set category_id = -1 where category_id = ?" self.execute(update, (id, )) self.execute("delete from categories where id = ?", (id, )) self.__remove_index(affected_ids)
move all activities to unsorted and remove category
def _parse_arg(func, variables, arg_name, anno): if isinstance(anno, str): var = variables[anno] return var, var.read_latest elif (isinstance(anno, list) and len(anno) == 1 and isinstance(anno[0], str)): var = variables[anno[0]] return var, var.read_all raise StartupError( 'cannot parse annotation %r of parameter %r for %r' % (anno, arg_name, func))
Parse an argument's annotation.
def qteActiveWindow(self): if len(self._qteWindowList) == 0: self.qteLogger.critical('The window list is empty.') return None elif len(self._qteWindowList) == 1: return self._qteWindowList[0] else: for win in self._qteWindowList: if win.isActiveWindow(): return win return self._qteWindowList[0]
Return the currently active ``QtmacsWindow`` object. If no Qtmacs window is currently active (for instance because the user is working with another application at the moment) then the method returns the first window in the window list. The method only returns **None** if the window list is empty, which is definitively a bug. |Args| * **None** |Returns| * **QtmacsWindow**: the currently active window or **None** if no window is currently active. |Raises| * **None**
def actions(self, state): 'actions are index where we can make a move' actions = [] for index, char in enumerate(state): if char == '_': actions.append(index) return actions
actions are index where we can make a move
def get_aggregate_by_id(self, account_id: str) -> AccountAggregate: account = self.get_by_id(account_id) return self.get_account_aggregate(account)
Returns the aggregate for the given id
def calc_tkor_v1(self): con = self.parameters.control.fastaccess inp = self.sequences.inputs.fastaccess flu = self.sequences.fluxes.fastaccess for k in range(con.nhru): flu.tkor[k] = con.kt[k] + inp.teml
Adjust the given air temperature values. Required control parameters: |NHRU| |KT| Required input sequence: |TemL| Calculated flux sequence: |TKor| Basic equation: :math:`TKor = KT + TemL` Example: >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(3) >>> kt(-2.0, 0.0, 2.0) >>> inputs.teml(1.) >>> model.calc_tkor_v1() >>> fluxes.tkor tkor(-1.0, 1.0, 3.0)
def _EncodeString(self, string): try: encoded_string = string.encode(self._encoding, errors=self._errors) except UnicodeEncodeError: if self._errors == 'strict': logging.error( 'Unable to properly write output due to encoding error. ' 'Switching to error tolerant encoding which can result in ' 'non Basic Latin (C0) characters to be replaced with "?" or ' '"\\ufffd".') self._errors = 'replace' encoded_string = string.encode(self._encoding, errors=self._errors) return encoded_string
Encodes the string. Args: string (str): string to encode. Returns: bytes: encoded string.
def log(name, data=None): data = data or {} data.update(core.get_default_values(data)) event_cls = core.find_event(name) event = event_cls(name, data) event.validate() data = core.filter_data_values(data) data = ejson.dumps(data) if conf.getsetting('DEBUG'): core.process(name, data) else: tasks.process_task.delay(name, data)
Entry point for the event lib that starts the logging process This function uses the `name` param to find the event class that will be processed to log stuff. This name must provide two informations separated by a dot: the app name and the event class name. Like this: >>> name = 'deal.ActionLog' The "ActionLog" is a class declared inside the 'deal.events' module and this function will raise an `EventNotFoundError` error if it's not possible to import the right event class. The `data` param *must* be a dictionary, otherwise a `TypeError` will be rised. All keys *must* be strings and all values *must* be serializable by the `json.dumps` function. If you need to pass any unsupported object, you will have to register a serializer function. Consult the RFC-00003-serialize-registry for more information.
def write(self, text='', wrap=True): if not isinstance(text, string_types): raise TypeError('text must be a string') text = text.encode('utf-8').decode('ascii', errors='replace') self._pending_writes.append((text, wrap)) self.update()
Write text and scroll Parameters ---------- text : str Text to write. ``''`` can be used for a blank line, as a newline is automatically added to the end of each line. wrap : str If True, long messages will be wrapped to span multiple lines.
def transformToNative(obj): if obj.isNative: return obj obj.isNative = True obj.value = Address(**dict(zip(ADDRESS_ORDER, splitFields(obj.value)))) return obj
Turn obj.value into an Address.
def get_resource_admin_session_for_bin(self, bin_id, proxy): if not self.supports_resource_admin(): raise errors.Unimplemented() return sessions.ResourceAdminSession(bin_id, proxy, self._runtime)
Gets a resource administration session for the given bin. arg: bin_id (osid.id.Id): the ``Id`` of the bin arg: proxy (osid.proxy.Proxy): a proxy return: (osid.resource.ResourceAdminSession) - ``a ResourceAdminSession`` raise: NotFound - ``bin_id`` not found raise: NullArgument - ``bin_id`` or ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_resource_admin()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_resource_admin()`` and ``supports_visible_federation()`` are ``true``.*
def _coerce_method(converter): def wrapper(self): if len(self) == 1: return converter(self.iloc[0]) raise TypeError("cannot convert the series to " "{0}".format(str(converter))) wrapper.__name__ = "__{name}__".format(name=converter.__name__) return wrapper
Install the scalar coercion methods.
def print_results_from_args(args: argparse.Namespace): path = args.path metrics_name = args.metrics_filename keys = args.keys results_dict = {} for root, _, files in os.walk(path): if metrics_name in files: full_name = os.path.join(root, metrics_name) metrics = json.load(open(full_name)) results_dict[full_name] = metrics sorted_keys = sorted(list(results_dict.keys())) print(f"model_run, {', '.join(keys)}") for name in sorted_keys: results = results_dict[name] keys_to_print = [str(results.get(key, "N/A")) for key in keys] print(f"{name}, {', '.join(keys_to_print)}")
Prints results from an ``argparse.Namespace`` object.
def get_check_digit(unchecked): digits = digits_of(unchecked) checksum = sum(even_digits(unchecked)) + sum([ sum(digits_of(2 * d)) for d in odd_digits(unchecked)]) return 9 * checksum % 10
returns the check digit of the card number.
def _fill_text(self, text, width, indent): parts = text.split('\n\n') for i, part in enumerate(parts): if part.startswith('* '): subparts = part.split('\n') for j, subpart in enumerate(subparts): subparts[j] = super(WrappedTextHelpFormatter, self)._fill_text( subpart, width, indent ) parts[i] = '\n'.join(subparts) else: parts[i] = super(WrappedTextHelpFormatter, self)._fill_text(part, width, indent) return '\n\n'.join(parts)
Wraps text like HelpFormatter, but doesn't squash lines This makes it easier to do lists and paragraphs.
def clean(): shutil.rmtree('{PROJECT_NAME}.egg-info'.format(PROJECT_NAME=PROJECT_NAME), ignore_errors=True) shutil.rmtree('build', ignore_errors=True) shutil.rmtree('dist', ignore_errors=True) shutil.rmtree('htmlcov', ignore_errors=True) shutil.rmtree('__pycache__', ignore_errors=True)
remove build artifacts
def main(): options = _parse_args() archive = download_setuptools(**_download_args(options)) return _install(archive, _build_install_args(options))
Install or upgrade setuptools and EasyInstall.
def split_func_name_args_params_handle(tokens): internal_assert(len(tokens) == 2, "invalid function definition splitting tokens", tokens) func_name = tokens[0] func_args = [] func_params = [] for arg in tokens[1]: if len(arg) > 1 and arg[0] in ("*", "**"): func_args.append(arg[1]) elif arg[0] != "*": func_args.append(arg[0]) func_params.append("".join(arg)) return [ func_name, ", ".join(func_args), "(" + ", ".join(func_params) + ")", ]
Process splitting a function into name, params, and args.
def _get_db_names(self): query = conn = self._connect(self.config['dbname']) cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor) cursor.execute(query) datnames = [d['datname'] for d in cursor.fetchall()] conn.close() if not datnames: datnames = ['postgres'] return datnames
Try to get a list of db names
def fixed(self): decision = False for record in self.history: if record["when"] < self.options.since.date: continue if not decision and record["when"] < self.options.until.date: for change in record["changes"]: if (change["field_name"] == "status" and change["added"] == "MODIFIED" and change["removed"] != "CLOSED"): decision = True else: for change in record["changes"]: if (change["field_name"] == "status" and change["added"] == "ASSIGNED"): decision = False return decision
Moved to MODIFIED and not later moved to ASSIGNED
def setData(self, index: QModelIndex, value, role=None): if not (index.isValid() and role == Qt.CheckStateRole): return False c_id = self.get_item(index).Id self._set_id(c_id, value == Qt.Checked, index) return True
Update selected_ids on click on index cell.
def solve(self): with log_duration(self._print, "memcache get (resolve) took %s"): solver_dict = self._get_cached_solve() if solver_dict: self.from_cache = True self._set_result(solver_dict) else: self.from_cache = False solver = self._solve() solver_dict = self._solver_to_dict(solver) self._set_result(solver_dict) with log_duration(self._print, "memcache set (resolve) took %s"): self._set_cached_solve(solver_dict)
Perform the solve.
def SVD_2_stream(uvectors, stachans, k, sampling_rate): warnings.warn('Depreciated, use svd_to_stream instead.') return svd_to_stream(uvectors=uvectors, stachans=stachans, k=k, sampling_rate=sampling_rate)
Depreciated. Use svd_to_stream
def sha1(self): sha1 = hashlib.sha1(''.join(['%s:%s' % (k,v) for k,v in self.items()])) return str(sha1.hexdigest())
Return a sha1 hash of the model items. :rtype: str
def create_volume(size, name, profile, location_id=None, **libcloud_kwargs): conn = _get_driver(profile=profile) libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) if location_id is not None: location = _get_by_id(conn.list_locations(), location_id) else: location = None volume = conn.create_volume(size, name, location, snapshot=None, **libcloud_kwargs) return _simple_volume(volume)
Create a storage volume :param size: Size of volume in gigabytes (required) :type size: ``int`` :param name: Name of the volume to be created :type name: ``str`` :param location_id: Which data center to create a volume in. If empty, undefined behavior will be selected. (optional) :type location_id: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's list_volumes method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_compute.create_volume 1000 vol1 profile1
def gossip_connect_curve(self, public_key, format, *args): return lib.zyre_gossip_connect_curve(self._as_parameter_, public_key, format, *args)
Set-up gossip discovery with CURVE enabled.
def diabetes(display=False): d = sklearn.datasets.load_diabetes() df = pd.DataFrame(data=d.data, columns=d.feature_names) return df, d.target
Return the diabetes data in a nice package.
def locate(callback, root_frame=None, include_root=False, raw=False): def get_from(maybe_callable): if callable(maybe_callable): return maybe_callable() return maybe_callable new = lambda frame: frame if raw else Frame(frame) current_frame = get_from(root_frame or Frame.current_frame(raw=True)) current_frame = new(current_frame) if not include_root: current_frame = new(current_frame.f_back) while current_frame: found = callback(current_frame) if found: return current_frame current_frame = new(current_frame.f_back) raise Frame.NotFound('No matching frame found')
Locates a frame by criteria. :param callback: One argument function to check the frame against. The frame we are curretly on, is given as that argument. :param root_frame: The root frame to start the search from. Can be a callback taking no arguments. :param include_root: `True` if the search should start from the `root_frame` or the one beneath it. Defaults to `False`. :param raw: whether to use raw frames or wrap them in our own object. Defaults to `False`. :raises RuntimeError: When no matching frame is found. :returns: The first frame which responds to the `callback`.
def force_disconnect_url(self, session_id, connection_id): url = ( self.api_url + '/v2/project/' + self.api_key + '/session/' + session_id + '/connection/' + connection_id ) return url
this method returns the force disconnect url endpoint
def upload_vcl(self, service_id, version_number, name, content, main=None, comment=None): body = self._formdata({ "name": name, "content": content, "comment": comment, "main": main, }, FastlyVCL.FIELDS) content = self._fetch("/service/%s/version/%d/vcl" % (service_id, version_number), method="POST", body=body) return FastlyVCL(self, content)
Upload a VCL for a particular service and version.
def _make_options(x): if isinstance(x, Mapping): import warnings warnings.warn("Support for mapping types has been deprecated and will be dropped in a future release.", DeprecationWarning) return tuple((unicode_type(k), v) for k, v in x.items()) xlist = tuple(x) if all((isinstance(i, (list, tuple)) and len(i) == 2) for i in xlist): return tuple((unicode_type(k), v) for k, v in xlist) return tuple((unicode_type(i), i) for i in xlist)
Standardize the options tuple format. The returned tuple should be in the format (('label', value), ('label', value), ...). The input can be * an iterable of (label, value) pairs * an iterable of values, and labels will be generated
def composition_prediction(self, composition, to_this_composition=True): preds = self.list_prediction(list(composition.keys()), to_this_composition) output = [] for p in preds: if to_this_composition: subs = {v: k for k, v in p['substitutions'].items()} else: subs = p['substitutions'] charge = 0 for k, v in composition.items(): charge += subs[k].oxi_state * v if abs(charge) < 1e-8: output.append(p) logging.info('{} charge balanced substitutions found' .format(len(output))) return output
Returns charged balanced substitutions from a starting or ending composition. Args: composition: starting or ending composition to_this_composition: If true, substitutions with this as a final composition will be found. If false, substitutions with this as a starting composition will be found (these are slightly different) Returns: List of predictions in the form of dictionaries. If to_this_composition is true, the values of the dictionary will be from the list species. If false, the keys will be from that list.
def images(self): return [ MediaImage(item.get('url'), item.get('height'), item.get('width')) for item in self.media_metadata.get('images', []) ]
Return a list of MediaImage objects for this media.
def run_aggregation_by_slug(request, slug): sa = get_object_or_404(Aggregation, slug=slug) sa.execute_now = True sa.save() messages.success(request, _("Saved aggregation executed.")) return HttpResponseRedirect( reverse( 'djmongo_browse_saved_aggregations_w_params', args=( sa.database_name, sa.collection_name)))
Run Aggregation By Slug
def stats_evaluation(stats): statement = stats.get('statement') error = stats.get('error', 0) warning = stats.get('warning', 0) refactor = stats.get('refactor', 0) convention = stats.get('convention', 0) if not statement or statement <= 0: return None malus = float(5 * error + warning + refactor + convention) malus_ratio = malus / statement return 10.0 - (malus_ratio * 10)
Generate an evaluation for the given pylint ``stats``.
def _sim_fill(r1, r2, imsize): bbsize = ( (max(r1["max_x"], r2["max_x"]) - min(r1["min_x"], r2["min_x"])) * (max(r1["max_y"], r2["max_y"]) - min(r1["min_y"], r2["min_y"])) ) return 1.0 - (bbsize - r1["size"] - r2["size"]) / imsize
calculate the fill similarity over the image
def issue_link_types(self): r_json = self._get_json('issueLinkType') link_types = [IssueLinkType(self._options, self._session, raw_link_json) for raw_link_json in r_json['issueLinkTypes']] return link_types
Get a list of issue link type Resources from the server. :rtype: List[IssueLinkType]
def get_config(repo): files = get_files(repo) config = DEFAULT_CONFIG if "config.json" in files: config_file = repo.get_file_contents('/config.json', ref="gh-pages") try: repo_config = json.loads(config_file.decoded_content.decode("utf-8")) config.update(repo_config) except ValueError: click.secho("WARNING: Unable to parse config file. Using defaults.", fg="yellow") return config
Get the config for the repo, merged with the default config. Returns the default config if no config file is found.
def ones(shape, dtype=None, **kwargs): data = np.ones(shape, dtype) return dc.array(data, **kwargs)
Create an array of given shape and type, filled with ones. Args: shape (sequence of ints): 2D shape of the array. dtype (data-type, optional): Desired data-type for the array. kwargs (optional): Other arguments of the array (*coords, attrs, and name). Returns: array (decode.array): Decode array filled with ones.
def update_warning_box(self): self.warning_box.Clear() if self.warning_text == "": self.warning_box.AppendText("No Problems") else: self.warning_box.AppendText(self.warning_text)
updates the warning box with whatever the warning_text variable contains for this specimen
def get(self, service, params=None): url = self._url_format(service) if params is None: params = {} return self.rest_action(self._session.get, url, params=params)
Generic GET operation for retrieving data from Learning Modules API. .. code-block:: python gbk.get('students/{gradebookId}', params=params, gradebookId=gbid) Args: service (str): The endpoint service to use, i.e. gradebook params (dict): additional parameters to add to the call Raises: requests.RequestException: Exception connection error ValueError: Unable to decode response content Returns: list: the json-encoded content of the response
def get_location(conn, vm_): locations = conn.list_locations() vm_location = config.get_cloud_config_value('location', vm_, __opts__) if not six.PY3: vm_location = vm_location.encode( 'ascii', 'salt-cloud-force-ascii' ) for img in locations: if isinstance(img.id, six.string_types) and not six.PY3: img_id = img.id.encode('ascii', 'salt-cloud-force-ascii') else: img_id = str(img.id) if isinstance(img.name, six.string_types) and not six.PY3: img_name = img.name.encode('ascii', 'salt-cloud-force-ascii') else: img_name = str(img.name) if vm_location and vm_location in (img_id, img_name): return img raise SaltCloudNotFound( 'The specified location, \'{0}\', could not be found.'.format( vm_location ) )
Return the location object to use
def get_resources_to_check(client_site_url, apikey): url = client_site_url + u"deadoralive/get_resources_to_check" response = requests.get(url, headers=dict(Authorization=apikey)) if not response.ok: raise CouldNotGetResourceIDsError( u"Couldn't get resource IDs to check: {code} {reason}".format( code=response.status_code, reason=response.reason)) return response.json()
Return a list of resource IDs to check for broken links. Calls the client site's API to get a list of resource IDs. :raises CouldNotGetResourceIDsError: if getting the resource IDs fails for any reason
def parse_buffer(self, s): m = self.parse_char(s) if m is None: return None ret = [m] while True: m = self.parse_char("") if m is None: return ret ret.append(m) return ret
input some data bytes, possibly returning a list of new messages
def add_hyperedges(self, hyperedges, attr_dict=None, **attr): attr_dict = self._combine_attribute_arguments(attr_dict, attr) hyperedge_ids = [] for nodes in hyperedges: hyperedge_id = self.add_hyperedge(nodes, attr_dict.copy()) hyperedge_ids.append(hyperedge_id) return hyperedge_ids
Adds multiple hyperedges to the graph, along with any related attributes of the hyperedges. If any node of a hyperedge has not previously been added to the hypergraph, it will automatically be added here. Hyperedges without a "weight" attribute specified will be assigned the default value of 1. :param hyperedges: iterable container to references of the node sets :param attr_dict: dictionary of attributes shared by all the hyperedges being added. :param attr: keyword arguments of attributes of the hyperedges; attr's values will override attr_dict's values if both are provided. :returns: list -- the IDs of the hyperedges added in the order specified by the hyperedges container's iterator. See also: add_hyperedge Examples: :: >>> H = UndirectedHypergraph() >>> hyperedge_list = (["A", "B", "C"], ("A", "D"), set(["B", "D"])) >>> hyperedge_ids = H.add_hyperedges(hyperedge_list)
def read_collection(self, filename): with open(filename, 'rb') as fd: lines = fd.read().decode('utf-8-sig').splitlines() collection = list(filter(bool, [line.strip() for line in lines])) return collection
Reads and returns a collection of stop words into a file.
def get_network_disconnect_kwargs(self, action, network_name, container_name, kwargs=None): c_kwargs = dict( container=container_name, net_id=network_name, ) update_kwargs(c_kwargs, kwargs) return c_kwargs
Generates keyword arguments for the Docker client to remove a container from a network. :param action: Action configuration. :type action: ActionConfig :param container_name: Container name or id. :type container_name: unicode | str :param network_name: Network name or id. :type network_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict :return: Resulting keyword arguments. :rtype: dict
def _set_aws_environment(task: Task = None): current_zone = os.environ.get('NCLUSTER_ZONE', '') current_region = os.environ.get('AWS_DEFAULT_REGION', '') def log(*args): if task: task.log(*args) else: util.log(*args) if current_region and current_zone: assert current_zone.startswith( current_region), f'Current zone "{current_zone}" ($NCLUSTER_ZONE) is not ' \ f'in current region "{current_region} ($AWS_DEFAULT_REGION)' assert u.get_session().region_name == current_region if current_zone and not current_region: current_region = current_zone[:-1] os.environ['AWS_DEFAULT_REGION'] = current_region if not current_region: current_region = u.get_session().region_name if not current_region: log(f"No default region available, using {NCLUSTER_DEFAULT_REGION}") current_region = NCLUSTER_DEFAULT_REGION os.environ['AWS_DEFAULT_REGION'] = current_region log(f"Using account {u.get_account_number()}, region {current_region}, " f"zone {current_zone}")
Sets up AWS environment from NCLUSTER environment variables
def reconfigure(self, service_id, workers): try: sc = self._services[service_id] except KeyError: raise ValueError("%s service id doesn't exists" % service_id) else: _utils.check_workers(workers, minimum=(1 - sc.workers)) sc.workers = workers self._forktimes = []
Reconfigure a service registered in ServiceManager :param service_id: the service id :type service_id: uuid.uuid4 :param workers: number of processes/workers for this service :type workers: int :raises: ValueError
def find(cls, session, resource_id, include=None): url = session._build_url(cls._resource_path(), resource_id) params = build_request_include(include, None) process = cls._mk_one(session, include=include) return session.get(url, CB.json(200, process), params=params)
Retrieve a single resource. This should only be called from sub-classes. Args: session(Session): The session to find the resource in resource_id: The ``id`` for the resource to look up Keyword Args: include: Resource classes to include Returns: Resource: An instance of a resource, or throws a :class:`NotFoundError` if the resource can not be found.
def create(cls, *operands, **kwargs): converted_operands = [] for op in operands: if not isinstance(op, Scalar): op = ScalarValue.create(op) converted_operands.append(op) return super().create(*converted_operands, **kwargs)
Instantiate the product while applying simplification rules
def data_request(self, payload, timeout=TIMEOUT): request_url = self.base_url + "/data_request" return requests.get(request_url, timeout=timeout, params=payload)
Perform a data_request and return the result.
def stop(self): with self.lock: self._message_received(ConnectionClosed(self._file, self))
Stop the communication with the shield.
def _create_part(self, action, data, **kwargs): if 'suppress_kevents' in kwargs: data['suppress_kevents'] = kwargs.pop('suppress_kevents') query_params = kwargs query_params['select_action'] = action response = self._request('POST', self._build_url('parts'), params=query_params, data=data) if response.status_code != requests.codes.created: raise APIError("Could not create part, {}: {}".format(str(response), response.content)) return Part(response.json()['results'][0], client=self)
Create a part internal core function.
def reset_codenames(self, dry_run=None, clear_existing=None): self.created_codenames = [] self.updated_names = [] actions = ["add", "change", "delete", "view"] if django.VERSION >= (2, 1): actions.append("view") for app in django_apps.get_app_configs(): for model in app.get_models(): try: getattr(model, model._meta.simple_history_manager_attribute) except AttributeError: pass else: self.update_or_create( model, dry_run=dry_run, clear_existing=clear_existing ) if dry_run: print("This is a dry-run. No modifications were made.") if self.created_codenames: print("The following historical permission.codenames were be added:") pprint(self.created_codenames) else: print("No historical permission.codenames were added.") if self.updated_names: print("The following historical permission.names were updated:") pprint(self.updated_names) else: print("No historical permission.names were updated.")
Ensures all historical model codenames exist in Django's Permission model.
def send_sticker(self, sticker, **options): return self.bot.api_call( "sendSticker", chat_id=str(self.id), sticker=sticker, **options )
Send a sticker to the chat. :param sticker: Sticker to send (file or string) :param options: Additional sendSticker options (see https://core.telegram.org/bots/api#sendsticker)
def get_detector_incidents(self, id, **kwargs): resp = self._get( self._u(self._DETECTOR_ENDPOINT_SUFFIX, id, 'incidents'), None, **kwargs ) resp.raise_for_status() return resp.json()
Gets all incidents for a detector