1305 lines
29 KiB
JSON
1305 lines
29 KiB
JSON
{
|
|
"being_migrated": {
|
|
"torch.nn.intrinsic": "torch.ao.nn.intrinsic",
|
|
"torch.nn.intrinsic.modules": "torch.ao.nn.intrinsic.modules",
|
|
"torch.nn.intrinsic.modules.fused": "torch.ao.nn.intrinsic.modules.fused",
|
|
"torch.nn.intrinsic.qat": "torch.ao.nn.intrinsic.qat",
|
|
"torch.nn.intrinsic.qat.modules": "torch.ao.nn.intrinsic.qat.modules",
|
|
"torch.nn.intrinsic.qat.modules.conv_fused": "torch.ao.nn.intrinsic.qat.modules.conv_fused",
|
|
"torch.nn.intrinsic.qat.modules.linear_fused": "torch.ao.nn.intrinsic.qat.modules.linear_fused",
|
|
"torch.nn.intrinsic.qat.modules.linear_relu": "torch.ao.nn.intrinsic.qat.modules.linear_relu",
|
|
"torch.nn.intrinsic.quantized": "torch.ao.nn.intrinsic.quantized",
|
|
"torch.nn.intrinsic.quantized.modules": "torch.ao.nn.intrinsic.quantized.modules",
|
|
"torch.nn.intrinsic.quantized.modules.bn_relu": "torch.ao.nn.intrinsic.quantized.modules.bn_relu",
|
|
"torch.nn.intrinsic.quantized.modules.conv_relu": "torch.ao.nn.intrinsic.quantized.modules.conv_relu",
|
|
"torch.nn.intrinsic.quantized.modules.linear_relu": "torch.ao.nn.intrinsic.quantized.modules.linear_relu",
|
|
"torch.nn.intrinsic.quantized.dynamic": "torch.ao.nn.intrinsic.quantized.dynamic",
|
|
"torch.nn.intrinsic.quantized.dynamic.modules": "torch.ao.nn.intrinsic.quantized.dynamic.modules",
|
|
"torch.nn.intrinsic.quantized.dynamic.modules.linear_relu": "torch.ao.nn.intrinsic.quantized.dynamic.modules.linear_relu",
|
|
"torch.nn.qat": "torch.ao.nn.qat",
|
|
"torch.nn.qat.dynamic": "torch.ao.nn.qat.dynamic",
|
|
"torch.nn.qat.dynamic.modules": "torch.ao.nn.qat.dynamic.modules",
|
|
"torch.nn.qat.dynamic.modules.linear": "torch.ao.nn.qat.dynamic.modules.linear",
|
|
"torch.nn.qat.modules": "torch.ao.nn.qat.modules",
|
|
"torch.nn.qat.modules.conv": "torch.ao.nn.qat.modules.conv",
|
|
"torch.nn.qat.modules.embedding_ops": "torch.ao.nn.qat.modules.embedding_ops",
|
|
"torch.nn.qat.modules.linear": "torch.ao.nn.qat.modules.linear",
|
|
"torch.nn.quantized.functional": "torch.ao.nn.quantized.functional",
|
|
"torch.nn.quantized": "torch.ao.nn.quantized",
|
|
"torch.nn.quantized.modules": "torch.ao.nn.quantized.modules",
|
|
"torch.nn.quantized.dynamic": "torch.ao.nn.quantized.dynamic",
|
|
"torch.nn.quantized.dynamic.modules": "torch.ao.nn.quantized.dynamic.modules",
|
|
"torch.nn.quantized.dynamic.modules.rnn": "torch.ao.nn.quantized.dynamic.modules.rnn",
|
|
"torch.nn.quantizable": "torch.ao.nn.quantizable",
|
|
"torch.nn.quantizable.modules": "torch.ao.nn.quantizable.modules",
|
|
"torch.nn.quantizable.modules.activation": "torch.ao.nn.quantizable.modules.activation",
|
|
"torch.nn.quantizable.modules.rnn": "torch.ao.nn.quantizable.modules.rnn"
|
|
},
|
|
"torch.autograd": [
|
|
"NestedIOFunction",
|
|
"detect_anomaly",
|
|
"enable_grad",
|
|
"grad",
|
|
"gradcheck",
|
|
"gradgradcheck",
|
|
"inference_mode",
|
|
"no_grad",
|
|
"set_detect_anomaly",
|
|
"set_grad_enabled",
|
|
"set_multithreading_enabled",
|
|
"variable"
|
|
],
|
|
"torch.backends": [
|
|
"contextmanager"
|
|
],
|
|
"torch.cuda.comm": [
|
|
"broadcast",
|
|
"broadcast_coalesced",
|
|
"reduce_add",
|
|
"reduce_add_coalesced",
|
|
"scatter",
|
|
"gather"
|
|
],
|
|
"torch.cuda.nccl": [
|
|
"init_rank",
|
|
"is_available",
|
|
"unique_id",
|
|
"version"
|
|
],
|
|
"torch.distributed": [
|
|
"AllToAllOptions",
|
|
"AllreduceCoalescedOptions",
|
|
"AllreduceOptions",
|
|
"BarrierOptions",
|
|
"BroadcastOptions",
|
|
"BuiltinCommHookType",
|
|
"Callable",
|
|
"DebugLevel",
|
|
"Dict",
|
|
"Enum",
|
|
"FileStore",
|
|
"GatherOptions",
|
|
"GradBucket",
|
|
"HashStore",
|
|
"Logger",
|
|
"namedtuple",
|
|
"Optional",
|
|
"PrefixStore",
|
|
"ProcessGroup",
|
|
"ProcessGroupGloo",
|
|
"ReduceOp",
|
|
"ReduceOptions",
|
|
"ReduceScatterOptions",
|
|
"Reducer",
|
|
"ScatterOptions",
|
|
"Store",
|
|
"TCPStore",
|
|
"Tuple",
|
|
"Union",
|
|
"get_debug_level",
|
|
"set_debug_level",
|
|
"set_debug_level_from_env",
|
|
"timedelta",
|
|
"ProcessGroupMPI",
|
|
"ProcessGroupNCCL"
|
|
],
|
|
"torch.distributed.autograd": [
|
|
"DistAutogradContext",
|
|
"backward",
|
|
"get_gradients"
|
|
],
|
|
"torch.distributed.elastic.events": [
|
|
"Dict",
|
|
"Enum",
|
|
"EventMetadataValue",
|
|
"Optional"
|
|
],
|
|
"torch.distributed.elastic.events.handlers": [
|
|
"Dict"
|
|
],
|
|
"torch.distributed.elastic.metrics": [
|
|
"Optional"
|
|
],
|
|
"torch.distributed.elastic.multiprocessing": [
|
|
"Callable",
|
|
"Dict",
|
|
"Tuple",
|
|
"Union",
|
|
"get_logger"
|
|
],
|
|
"torch.distributed.elastic.multiprocessing.redirects": [
|
|
"contextmanager",
|
|
"partial",
|
|
"redirect_stderr",
|
|
"redirect_stdout"
|
|
],
|
|
"torch.distributed.elastic.rendezvous": [
|
|
"RendezvousHandlerCreator"
|
|
],
|
|
"torch.distributed.elastic.rendezvous.api": [
|
|
"ABC",
|
|
"Any",
|
|
"Callable",
|
|
"Dict",
|
|
"Optional",
|
|
"RendezvousHandlerCreator",
|
|
"Store",
|
|
"Tuple",
|
|
"abstractmethod"
|
|
],
|
|
"torch.distributed.elastic.rendezvous.dynamic_rendezvous": [
|
|
"get_method_name"
|
|
],
|
|
"torch.distributed.elastic.utils.api": [
|
|
"Any",
|
|
"List",
|
|
"Template"
|
|
],
|
|
"torch.distributed.elastic.utils.data.elastic_distributed_sampler": [
|
|
"DistributedSampler"
|
|
],
|
|
"torch.distributed.elastic.utils.logging": [
|
|
"Optional",
|
|
"get_log_level"
|
|
],
|
|
"torch.distributed.elastic.utils.store": [
|
|
"List",
|
|
"timedelta"
|
|
],
|
|
"torch.distributed.nn": [
|
|
"Function",
|
|
"ReduceOp",
|
|
"group"
|
|
],
|
|
"torch.distributed.nn.functional": [
|
|
"Function",
|
|
"ReduceOp",
|
|
"group"
|
|
],
|
|
"torch.distributed.nn.jit.instantiator": [
|
|
"Optional",
|
|
"get_remote_module_template"
|
|
],
|
|
"torch.distributed.optim.utils": [
|
|
"Type"
|
|
],
|
|
"torch.distributed.pipeline.sync.pipe": [
|
|
"Pipeline"
|
|
],
|
|
"torch.distributed.pipeline.sync.skip.layout": [
|
|
"SkipLayout",
|
|
"inspect_skip_layout"
|
|
],
|
|
"torch.distributed.pipeline.sync.skip.portal": [
|
|
"Context",
|
|
"Portal",
|
|
"PortalBlue",
|
|
"PortalCopy",
|
|
"PortalOrange"
|
|
],
|
|
"torch.distributed.pipeline.sync.skip.skippable": [
|
|
"Skippable"
|
|
],
|
|
"torch.distributed.pipeline.sync.skip.tracker": [
|
|
"SkipTracker",
|
|
"SkipTrackerThroughPotals",
|
|
"ThreadLocal",
|
|
"current_skip_tracker",
|
|
"use_skip_tracker"
|
|
],
|
|
"torch.distributed.remote_device": [
|
|
"Optional",
|
|
"Union"
|
|
],
|
|
"torch.distributed.rendezvous": [
|
|
"Dict",
|
|
"FileStore",
|
|
"Iterable",
|
|
"Optional",
|
|
"PrefixStore",
|
|
"Store",
|
|
"TCPStore",
|
|
"Tuple",
|
|
"Union",
|
|
"cast",
|
|
"timedelta",
|
|
"urlparse",
|
|
"urlunparse"
|
|
],
|
|
"torch.distributed.rpc": [
|
|
],
|
|
"torch.fft": [
|
|
"Tensor",
|
|
"fft",
|
|
"fft2",
|
|
"fftfreq",
|
|
"fftn",
|
|
"fftshift",
|
|
"hfft",
|
|
"ifft",
|
|
"ifft2",
|
|
"ifftn",
|
|
"ifftshift",
|
|
"ihfft",
|
|
"irfft",
|
|
"irfft2",
|
|
"irfftn",
|
|
"rfft",
|
|
"rfft2",
|
|
"rfftfreq",
|
|
"rfftn"
|
|
],
|
|
"torch.functional": [
|
|
"istft",
|
|
"pca_lowrank",
|
|
"svd_lowrank"
|
|
],
|
|
"torch.futures": [
|
|
"Future"
|
|
],
|
|
"torch.fx": [
|
|
"ProxyableClassMeta",
|
|
"Tracer",
|
|
"symbolic_trace",
|
|
"wrap"
|
|
],
|
|
"torch.fx.experimental.unification.core": [
|
|
"Iterator",
|
|
"assoc",
|
|
"dispatch",
|
|
"isvar",
|
|
"partial",
|
|
"unify",
|
|
"walk"
|
|
],
|
|
"torch.fx.experimental.unification.dispatch": [
|
|
"dispatch",
|
|
"partial"
|
|
],
|
|
"torch.fx.experimental.unification.more": [
|
|
"dispatch",
|
|
"reify",
|
|
"unify"
|
|
],
|
|
"torch.fx.experimental.unification.unification_tools": [
|
|
"first",
|
|
"getter",
|
|
"groupby"
|
|
],
|
|
"torch.fx.experimental.unification.variable": [
|
|
"contextmanager",
|
|
"dispatch",
|
|
"hashable",
|
|
"isvar"
|
|
],
|
|
"torch.fx.proxy": [
|
|
"assert_fn"
|
|
],
|
|
"torch.hub": [
|
|
"HTTPError",
|
|
"Path",
|
|
"Request",
|
|
"tqdm",
|
|
"urlopen",
|
|
"urlparse"
|
|
],
|
|
"torch.jit": [
|
|
"Attribute",
|
|
"Final",
|
|
"Iterator",
|
|
"ONNXTracedModule",
|
|
"RecursiveScriptClass",
|
|
"RecursiveScriptModule",
|
|
"ScriptModule",
|
|
"ScriptWarning",
|
|
"TopLevelTracedModule",
|
|
"TracedModule",
|
|
"TracerWarning",
|
|
"TracingCheckError",
|
|
"contextmanager",
|
|
"export",
|
|
"fork",
|
|
"freeze",
|
|
"fuser",
|
|
"ignore",
|
|
"interface",
|
|
"is_scripting",
|
|
"is_tracing",
|
|
"jit_module_from_flatbuffer",
|
|
"last_executed_optimized_graph",
|
|
"load",
|
|
"optimize_for_inference",
|
|
"optimized_execution",
|
|
"run_frozen_optimizations",
|
|
"save",
|
|
"save_jit_module_to_flatbuffer",
|
|
"script",
|
|
"script_method",
|
|
"set_fusion_strategy",
|
|
"set_module",
|
|
"trace",
|
|
"trace_module",
|
|
"unused",
|
|
"wait"
|
|
],
|
|
"torch.jit.annotations": [
|
|
"Any",
|
|
"AnyType",
|
|
"ComplexType",
|
|
"Dict",
|
|
"DictType",
|
|
"EvalEnv",
|
|
"FloatType",
|
|
"IntType",
|
|
"List",
|
|
"ListType",
|
|
"StringType",
|
|
"TensorType",
|
|
"Tuple",
|
|
"TupleType",
|
|
"get_enum_value_type",
|
|
"is_dict",
|
|
"is_function_or_method",
|
|
"is_list",
|
|
"is_optional",
|
|
"is_tensor",
|
|
"is_tuple",
|
|
"is_union",
|
|
"is_vararg"
|
|
],
|
|
"torch.jit.frontend": [
|
|
"Apply",
|
|
"Assert",
|
|
"Assign",
|
|
"Attribute",
|
|
"AugAssign",
|
|
"BinOp",
|
|
"Break",
|
|
"ClassDef",
|
|
"Const",
|
|
"Continue",
|
|
"Decl",
|
|
"Def",
|
|
"Delete",
|
|
"DictComp",
|
|
"DictLiteral",
|
|
"Dots",
|
|
"EmptyTypeAnnotation",
|
|
"ExprStmt",
|
|
"FalseLiteral",
|
|
"For",
|
|
"FunctionModifiers",
|
|
"Ident",
|
|
"If",
|
|
"List",
|
|
"ListComp",
|
|
"ListLiteral",
|
|
"NoneLiteral",
|
|
"Param",
|
|
"Pass",
|
|
"Property",
|
|
"Raise",
|
|
"Return",
|
|
"Select",
|
|
"SliceExpr",
|
|
"Starred",
|
|
"Stmt",
|
|
"StringLiteral",
|
|
"Subscript",
|
|
"TernaryIf",
|
|
"TrueLiteral",
|
|
"Tuple",
|
|
"TupleLiteral",
|
|
"UnaryOp",
|
|
"Var",
|
|
"While",
|
|
"With",
|
|
"WithItem",
|
|
"dedent",
|
|
"get_qualified_name",
|
|
"get_source_lines_and_file",
|
|
"is_static_fn",
|
|
"make_source_context",
|
|
"namedtuple",
|
|
"parse_def",
|
|
"should_drop",
|
|
"monkeytype_trace"
|
|
],
|
|
"torch.linalg": [
|
|
"LinAlgError",
|
|
"Tensor",
|
|
"cholesky",
|
|
"cholesky_ex",
|
|
"cond",
|
|
"cross",
|
|
"det",
|
|
"diagonal",
|
|
"eig",
|
|
"eigh",
|
|
"eigvals",
|
|
"eigvalsh",
|
|
"householder_product",
|
|
"inv",
|
|
"inv_ex",
|
|
"ldl_factor",
|
|
"ldl_factor_ex",
|
|
"ldl_solve",
|
|
"lstsq",
|
|
"lu",
|
|
"lu_factor",
|
|
"lu_factor_ex",
|
|
"lu_solve",
|
|
"matmul",
|
|
"matrix_exp",
|
|
"matrix_norm",
|
|
"matrix_power",
|
|
"matrix_rank",
|
|
"multi_dot",
|
|
"norm",
|
|
"pinv",
|
|
"qr",
|
|
"slogdet",
|
|
"solve",
|
|
"solve_ex",
|
|
"solve_triangular",
|
|
"svd",
|
|
"svdvals",
|
|
"tensorinv",
|
|
"tensorsolve",
|
|
"vander",
|
|
"vecdot",
|
|
"vector_norm"
|
|
],
|
|
"torch.multiprocessing": [
|
|
"Array",
|
|
"AuthenticationError",
|
|
"Barrier",
|
|
"BoundedSemaphore",
|
|
"BufferTooShort",
|
|
"Condition",
|
|
"Event",
|
|
"JoinableQueue",
|
|
"Lock",
|
|
"Manager",
|
|
"Pipe",
|
|
"Pool",
|
|
"Process",
|
|
"ProcessContext",
|
|
"ProcessError",
|
|
"ProcessExitedException",
|
|
"ProcessRaisedException",
|
|
"Queue",
|
|
"RLock",
|
|
"RawArray",
|
|
"RawValue",
|
|
"Semaphore",
|
|
"SimpleQueue",
|
|
"SpawnContext",
|
|
"TimeoutError",
|
|
"Value",
|
|
"active_children",
|
|
"allow_connection_pickling",
|
|
"cpu_count",
|
|
"current_process",
|
|
"freeze_support",
|
|
"get_all_start_methods",
|
|
"get_context",
|
|
"get_logger",
|
|
"get_start_method",
|
|
"init_reductions",
|
|
"log_to_stderr",
|
|
"set_executable",
|
|
"set_forkserver_preload",
|
|
"set_start_method",
|
|
"spawn",
|
|
"start_processes",
|
|
"parent_process"
|
|
],
|
|
"torch.multiprocessing.reductions": [
|
|
"ForkingPickler",
|
|
"Union",
|
|
"check_serializing_named_tensor",
|
|
"register_after_fork"
|
|
],
|
|
"torch.multiprocessing.spawn": [
|
|
"Optional"
|
|
],
|
|
"torch.nested": [
|
|
"nested_tensor",
|
|
"to_padded_tensor"
|
|
],
|
|
"torch.nn.common_types": [
|
|
"Optional",
|
|
"Tensor",
|
|
"Tuple",
|
|
"TypeVar",
|
|
"Union"
|
|
],
|
|
"torch.nn.functional": [
|
|
"Callable",
|
|
"DType",
|
|
"List",
|
|
"Optional",
|
|
"Tensor",
|
|
"Tuple",
|
|
"Union",
|
|
"adaptive_avg_pool1d",
|
|
"avg_pool1d",
|
|
"avg_pool2d",
|
|
"avg_pool3d",
|
|
"bilinear",
|
|
"boolean_dispatch",
|
|
"celu_",
|
|
"channel_shuffle",
|
|
"conv1d",
|
|
"conv2d",
|
|
"conv3d",
|
|
"conv_tbc",
|
|
"conv_transpose1d",
|
|
"conv_transpose2d",
|
|
"conv_transpose3d",
|
|
"cosine_similarity",
|
|
"elu_",
|
|
"gelu",
|
|
"handle_torch_function",
|
|
"hardshrink",
|
|
"hardtanh_",
|
|
"has_torch_function",
|
|
"has_torch_function_unary",
|
|
"has_torch_function_variadic",
|
|
"leaky_relu_",
|
|
"linear",
|
|
"logsigmoid",
|
|
"native_channel_shuffle",
|
|
"one_hot",
|
|
"pairwise_distance",
|
|
"pdist",
|
|
"pixel_shuffle",
|
|
"pixel_unshuffle",
|
|
"prelu",
|
|
"relu_",
|
|
"rrelu_",
|
|
"scaled_dot_product_attention",
|
|
"selu_",
|
|
"softplus",
|
|
"softshrink",
|
|
"threshold_"
|
|
],
|
|
"torch.nn.init": [
|
|
"Tensor"
|
|
],
|
|
"torch.nn.intrinsic.modules": [
|
|
"_FusedModule"
|
|
],
|
|
"torch.nn.modules.linear": [
|
|
"NonDynamicallyQuantizableLinear"
|
|
],
|
|
"torch.nn.modules.rnn": [
|
|
"apply_permutation"
|
|
],
|
|
"torch.nn.parallel": [
|
|
"DistributedDataParallelCPU"
|
|
],
|
|
"torch.nn.parallel.comm": [
|
|
"List"
|
|
],
|
|
"torch.nn.parallel.parallel_apply": [
|
|
"ExceptionWrapper",
|
|
"autocast"
|
|
],
|
|
"torch.nn.parallel.replicate": [
|
|
"OrderedDict"
|
|
],
|
|
"torch.nn.parallel.scatter_gather": [
|
|
"is_namedtuple"
|
|
],
|
|
"torch.nn.parameter": [
|
|
"OrderedDict"
|
|
],
|
|
"torch.nn.utils.rnn": [
|
|
"bind",
|
|
"PackedSequence_"
|
|
],
|
|
"torch.nn.utils.convert_parameters": [
|
|
"Iterable",
|
|
"Optional"
|
|
],
|
|
"torch.onnx": [
|
|
"Dict",
|
|
"OperatorExportTypes",
|
|
"Optional",
|
|
"TensorProtoDataType",
|
|
"TrainingMode"
|
|
],
|
|
"torch.overrides": [
|
|
"BaseTorchFunctionMode",
|
|
"TorchFunctionMode",
|
|
"TorchFunctionModeMeta",
|
|
"enable_torch_function_mode",
|
|
"get_default_nowrap_functions",
|
|
"has_torch_function"
|
|
],
|
|
"torch.package.analyze.is_from_package": [
|
|
"Any",
|
|
"ModuleType",
|
|
"is_mangled"
|
|
],
|
|
"torch.package.find_file_dependencies": [
|
|
"List",
|
|
"Optional",
|
|
"Tuple"
|
|
],
|
|
"torch.package.glob_group": [
|
|
"GlobPattern",
|
|
"Iterable",
|
|
"Union"
|
|
],
|
|
"torch.profiler": [
|
|
"DeviceType",
|
|
"ProfilerActivity",
|
|
"kineto_available",
|
|
"record_function"
|
|
],
|
|
"torch.quantization": [
|
|
"ABC",
|
|
"DeQuantStub",
|
|
"FakeQuantize",
|
|
"FakeQuantizeBase",
|
|
"FixedQParamsFakeQuantize",
|
|
"FusedMovingAvgObsFakeQuantize",
|
|
"HistogramObserver",
|
|
"MinMaxObserver",
|
|
"MovingAverageMinMaxObserver",
|
|
"MovingAveragePerChannelMinMaxObserver",
|
|
"NoopObserver",
|
|
"ObserverBase",
|
|
"PerChannelMinMaxObserver",
|
|
"PlaceholderObserver",
|
|
"QConfig",
|
|
"QConfigAny",
|
|
"QConfigDynamic",
|
|
"QuantStub",
|
|
"QuantType",
|
|
"QuantWrapper",
|
|
"RecordingObserver",
|
|
"_add_module_to_qconfig_obs_ctr",
|
|
"add_quant_dequant",
|
|
"_assert_valid_qconfig",
|
|
"convert",
|
|
"convert_dynamic_jit",
|
|
"convert_jit",
|
|
"default_fixed_qparams_range_0to1_fake_quant",
|
|
"default_affine_fixed_qparams_fake_quant",
|
|
"default_debug_observer",
|
|
"default_dynamic_quant_observer",
|
|
"default_fake_quant",
|
|
"default_float_qparams_observer",
|
|
"default_fused_act_fake_quant",
|
|
"default_fused_per_channel_wt_fake_quant",
|
|
"default_fused_wt_fake_quant",
|
|
"default_histogram_fake_quant",
|
|
"default_histogram_observer",
|
|
"default_observer",
|
|
"default_per_channel_weight_fake_quant",
|
|
"default_per_channel_weight_observer",
|
|
"default_placeholder_observer",
|
|
"default_fixed_qparams_range_neg1to1_fake_quant",
|
|
"default_symmetric_fixed_qparams_fake_quant",
|
|
"default_weight_fake_quant",
|
|
"default_weight_observer",
|
|
"disable_fake_quant",
|
|
"disable_observer",
|
|
"enable_fake_quant",
|
|
"enable_observer",
|
|
"fuse_conv_bn",
|
|
"fuse_conv_bn_jit",
|
|
"fuse_conv_bn_relu",
|
|
"fuse_linear_bn",
|
|
"fuse_modules",
|
|
"get_default_compare_output_module_list",
|
|
"get_default_dynamic_quant_module_mappings",
|
|
"get_default_float_to_quantized_operator_mappings",
|
|
"get_default_qat_module_mappings",
|
|
"get_default_qat_qconfig",
|
|
"get_default_qconfig",
|
|
"get_default_qconfig_propagation_list",
|
|
"get_default_static_quant_module_mappings",
|
|
"get_dynamic_quant_module_class",
|
|
"get_fuser_method",
|
|
"get_observer_state_dict",
|
|
"get_quantized_operator",
|
|
"get_static_quant_module_class",
|
|
"load_observer_state_dict",
|
|
"no_observer_set",
|
|
"prepare",
|
|
"prepare_dynamic_jit",
|
|
"prepare_jit",
|
|
"prepare_qat",
|
|
"propagate_qconfig_",
|
|
"qconfig_equals",
|
|
"_get_quant_type_to_str",
|
|
"quantize",
|
|
"quantize_dynamic",
|
|
"quantize_dynamic_jit",
|
|
"quantize_jit",
|
|
"quantize_qat",
|
|
"script_qconfig",
|
|
"script_qconfig_dict",
|
|
"swap_module"
|
|
],
|
|
"torch.quantization.fake_quantize": [
|
|
"FakeQuantize",
|
|
"FakeQuantizeBase",
|
|
"FixedQParamsFakeQuantize",
|
|
"FusedMovingAvgObsFakeQuantize",
|
|
"default_fixed_qparams_range_0to1_fake_quant",
|
|
"default_affine_fixed_qparams_fake_quant",
|
|
"default_fake_quant",
|
|
"default_fused_act_fake_quant",
|
|
"default_fused_per_channel_wt_fake_quant",
|
|
"default_fused_wt_fake_quant",
|
|
"default_histogram_fake_quant",
|
|
"default_per_channel_weight_fake_quant",
|
|
"default_fixed_qparams_range_neg1to1_fake_quant",
|
|
"default_symmetric_fixed_qparams_fake_quant",
|
|
"default_weight_fake_quant",
|
|
"disable_fake_quant",
|
|
"disable_observer",
|
|
"enable_fake_quant",
|
|
"enable_observer"
|
|
],
|
|
"torch.quantization.fuse_modules": [
|
|
"fuse_conv_bn",
|
|
"fuse_conv_bn_relu",
|
|
"fuse_known_modules",
|
|
"fuse_modules",
|
|
"get_fuser_method"
|
|
],
|
|
"torch.quantization.fuser_method_mappings": [
|
|
"fuse_conv_bn",
|
|
"fuse_conv_bn_relu",
|
|
"fuse_linear_bn",
|
|
"get_fuser_method"
|
|
],
|
|
"torch.quantization.observer": [
|
|
"ABC",
|
|
"HistogramObserver",
|
|
"MinMaxObserver",
|
|
"MovingAverageMinMaxObserver",
|
|
"MovingAveragePerChannelMinMaxObserver",
|
|
"NoopObserver",
|
|
"ObserverBase",
|
|
"PerChannelMinMaxObserver",
|
|
"PlaceholderObserver",
|
|
"RecordingObserver",
|
|
"default_debug_observer",
|
|
"default_dynamic_quant_observer",
|
|
"default_float_qparams_observer",
|
|
"default_histogram_observer",
|
|
"default_observer",
|
|
"default_per_channel_weight_observer",
|
|
"default_placeholder_observer",
|
|
"default_weight_observer",
|
|
"get_observer_state_dict",
|
|
"load_observer_state_dict"
|
|
],
|
|
"torch.quantization.qconfig": [
|
|
"QConfig",
|
|
"QConfigAny",
|
|
"QConfigDynamic",
|
|
"_add_module_to_qconfig_obs_ctr",
|
|
"_assert_valid_qconfig",
|
|
"get_default_qat_qconfig",
|
|
"get_default_qconfig",
|
|
"qconfig_equals"
|
|
],
|
|
"torch.quantization.quant_type": [
|
|
"QuantType",
|
|
"_get_quant_type_to_str"
|
|
],
|
|
"torch.quantization.quantization_mappings": [
|
|
"get_default_compare_output_module_list",
|
|
"get_default_dynamic_quant_module_mappings",
|
|
"get_default_float_to_quantized_operator_mappings",
|
|
"get_default_qat_module_mappings",
|
|
"get_default_qconfig_propagation_list",
|
|
"get_default_static_quant_module_mappings",
|
|
"get_dynamic_quant_module_class",
|
|
"get_quantized_operator",
|
|
"get_static_quant_module_class",
|
|
"no_observer_set"
|
|
],
|
|
"torch.quantization.quantize": [
|
|
"add_quant_dequant",
|
|
"convert",
|
|
"prepare",
|
|
"prepare_qat",
|
|
"propagate_qconfig_",
|
|
"quantize",
|
|
"quantize_dynamic",
|
|
"quantize_qat",
|
|
"swap_module"
|
|
],
|
|
"torch.quantization.quantize_jit": [
|
|
"convert_dynamic_jit",
|
|
"convert_jit",
|
|
"fuse_conv_bn_jit",
|
|
"prepare_dynamic_jit",
|
|
"prepare_jit",
|
|
"quantize_dynamic_jit",
|
|
"quantize_jit",
|
|
"script_qconfig",
|
|
"script_qconfig_dict"
|
|
],
|
|
"torch.quantization.stubs": [
|
|
"DeQuantStub",
|
|
"QuantStub",
|
|
"QuantWrapper"
|
|
],
|
|
"torch.quasirandom": [
|
|
"Optional"
|
|
],
|
|
"torch.random": [
|
|
"Generator"
|
|
],
|
|
"torch.serialization": [
|
|
"Any",
|
|
"BinaryIO",
|
|
"Dict",
|
|
"IO",
|
|
"Optional",
|
|
"Storage",
|
|
"Tuple",
|
|
"Type",
|
|
"Union",
|
|
"cast",
|
|
"closing",
|
|
"contextmanager",
|
|
"get_source_lines_and_file"
|
|
],
|
|
"torch.sparse": [
|
|
"BFloat16Tensor",
|
|
"ByteTensor",
|
|
"CharTensor",
|
|
"DoubleTensor",
|
|
"FloatTensor",
|
|
"HalfTensor",
|
|
"IntTensor",
|
|
"LongTensor",
|
|
"ShortTensor",
|
|
"addmm",
|
|
"log_softmax",
|
|
"mm",
|
|
"softmax"
|
|
],
|
|
"torch.special": [
|
|
"airy_ai",
|
|
"bessel_j0",
|
|
"bessel_j1",
|
|
"bessel_y0",
|
|
"bessel_y1",
|
|
"chebyshev_polynomial_t",
|
|
"chebyshev_polynomial_u",
|
|
"chebyshev_polynomial_v",
|
|
"chebyshev_polynomial_w",
|
|
"digamma",
|
|
"entr",
|
|
"erf",
|
|
"erfc",
|
|
"erfcx",
|
|
"erfinv",
|
|
"exp2",
|
|
"expit",
|
|
"expm1",
|
|
"gammainc",
|
|
"gammaincc",
|
|
"gammaln",
|
|
"hermite_polynomial_h",
|
|
"hermite_polynomial_he",
|
|
"i0",
|
|
"i0e",
|
|
"i1",
|
|
"i1e",
|
|
"laguerre_polynomial_l",
|
|
"legendre_polynomial_p",
|
|
"log1p",
|
|
"log_ndtr",
|
|
"log_softmax",
|
|
"logit",
|
|
"logsumexp",
|
|
"modified_bessel_i0",
|
|
"modified_bessel_i1",
|
|
"modified_bessel_k0",
|
|
"modified_bessel_k1",
|
|
"multigammaln",
|
|
"ndtr",
|
|
"ndtri",
|
|
"polygamma",
|
|
"psi",
|
|
"round",
|
|
"scaled_modified_bessel_k0",
|
|
"scaled_modified_bessel_k1",
|
|
"shifted_chebyshev_polynomial_t",
|
|
"shifted_chebyshev_polynomial_u",
|
|
"shifted_chebyshev_polynomial_v",
|
|
"shifted_chebyshev_polynomial_w",
|
|
"sinc",
|
|
"softmax",
|
|
"spherical_bessel_j0",
|
|
"xlog1py",
|
|
"xlogy",
|
|
"zeta"
|
|
],
|
|
"torch.storage": [
|
|
"Any",
|
|
"Storage",
|
|
"Type",
|
|
"TypeVar",
|
|
"Union",
|
|
"cast",
|
|
"lru_cache"
|
|
],
|
|
"torch.testing": [
|
|
"FileCheck",
|
|
"all_types",
|
|
"all_types_and",
|
|
"all_types_and_complex",
|
|
"all_types_and_complex_and",
|
|
"all_types_and_half",
|
|
"assert_allclose",
|
|
"assert_close",
|
|
"complex_types",
|
|
"double_types",
|
|
"empty_types",
|
|
"floating_and_complex_types",
|
|
"floating_and_complex_types_and",
|
|
"floating_types",
|
|
"floating_types_and",
|
|
"floating_types_and_half",
|
|
"get_all_complex_dtypes",
|
|
"get_all_device_types",
|
|
"get_all_dtypes",
|
|
"get_all_fp_dtypes",
|
|
"get_all_int_dtypes",
|
|
"get_all_math_dtypes",
|
|
"integral_types",
|
|
"integral_types_and",
|
|
"make_non_contiguous",
|
|
"make_tensor",
|
|
"rand",
|
|
"randn"
|
|
],
|
|
"torch.types": [
|
|
"Any",
|
|
"Device",
|
|
"List",
|
|
"Number",
|
|
"Sequence",
|
|
"Tuple",
|
|
"Union"
|
|
],
|
|
"torch.utils.benchmark.utils.compare": [
|
|
"Colorize",
|
|
"Table",
|
|
"optional_min"
|
|
],
|
|
"torch.utils.benchmark.utils.cpp_jit": [
|
|
"Any",
|
|
"CallgrindModuleType",
|
|
"List",
|
|
"Optional",
|
|
"TimeitModuleType"
|
|
],
|
|
"torch.utils.benchmark.utils.fuzzer": [
|
|
"dtype_size",
|
|
"prod"
|
|
],
|
|
"torch.utils.benchmark.utils.sparse_fuzzer": [
|
|
"FuzzedTensor",
|
|
"Number",
|
|
"Optional",
|
|
"Tuple",
|
|
"Union"
|
|
],
|
|
"torch.utils.benchmark.utils.timer": [
|
|
"CPPTimer",
|
|
"timer"
|
|
],
|
|
"torch.utils.benchmark.utils.valgrind_wrapper.timer_interface": [
|
|
"GlobalsBridge",
|
|
"Serialization",
|
|
"wrapper_singleton"
|
|
],
|
|
"torch.utils.data": [
|
|
"_DatasetKind",
|
|
"argument_validation",
|
|
"default_collate",
|
|
"default_convert",
|
|
"functional_datapipe",
|
|
"get_worker_info",
|
|
"guaranteed_datapipes_determinism",
|
|
"non_deterministic",
|
|
"runtime_validation",
|
|
"runtime_validation_disabled"
|
|
],
|
|
"torch.utils.data.dataloader": [
|
|
"default_collate",
|
|
"default_convert",
|
|
"get_worker_info"
|
|
],
|
|
"torch.utils.data.datapipes.dataframe": [
|
|
"DFIterDataPipe"
|
|
],
|
|
"torch.utils.dlpack": [
|
|
"Any",
|
|
"to_dlpack"
|
|
],
|
|
"torch": [
|
|
"BFloat16Storage",
|
|
"BFloat16Tensor",
|
|
"ComplexDoubleStorage",
|
|
"ComplexFloatStorage",
|
|
"DisableTorchFunction",
|
|
"DisableTorchFunctionSubclass",
|
|
"Generator",
|
|
"HalfStorage",
|
|
"HalfTensor",
|
|
"QInt32Storage",
|
|
"QInt8Storage",
|
|
"QUInt2x4Storage",
|
|
"QUInt4x2Storage",
|
|
"QUInt8Storage",
|
|
"Storage",
|
|
"TypedStorage",
|
|
"_adaptive_avg_pool2d",
|
|
"_adaptive_avg_pool3d",
|
|
"_add_batch_dim",
|
|
"_add_relu",
|
|
"_add_relu_",
|
|
"_addmm_activation",
|
|
"_aminmax",
|
|
"_amp_foreach_non_finite_check_and_unscale_",
|
|
"_amp_update_scale_",
|
|
"_assert_async",
|
|
"_batch_norm_impl_index",
|
|
"_cast_Byte",
|
|
"_cast_Char",
|
|
"_cast_Double",
|
|
"_cast_Float",
|
|
"_cast_Half",
|
|
"_cast_Int",
|
|
"_cast_Long",
|
|
"_cast_Short",
|
|
"_choose_qparams_per_tensor",
|
|
"_coalesce",
|
|
"_compute_linear_combination",
|
|
"_conj",
|
|
"_conj_copy",
|
|
"_conj_physical",
|
|
"_convert_indices_from_coo_to_csr",
|
|
"_convert_indices_from_csr_to_coo",
|
|
"_convolution",
|
|
"_convolution_mode",
|
|
"_copy_from",
|
|
"_copy_from_and_resize",
|
|
"_ctc_loss",
|
|
"_cudnn_ctc_loss",
|
|
"_cudnn_init_dropout_state",
|
|
"_cudnn_rnn",
|
|
"_cudnn_rnn_flatten_weight",
|
|
"_cufft_clear_plan_cache",
|
|
"_cufft_get_plan_cache_max_size",
|
|
"_cufft_get_plan_cache_size",
|
|
"_cufft_set_plan_cache_max_size",
|
|
"_cummax_helper",
|
|
"_cummin_helper",
|
|
"_debug_has_internal_overlap",
|
|
"_det_lu_based_helper_backward_helper",
|
|
"_dim_arange",
|
|
"_dirichlet_grad",
|
|
"_disable_functionalization",
|
|
"_efficientzerotensor",
|
|
"_embedding_bag",
|
|
"_embedding_bag_forward_only",
|
|
"_empty_affine_quantized",
|
|
"_empty_per_channel_affine_quantized",
|
|
"_enable_functionalization",
|
|
"_euclidean_dist",
|
|
"_fake_quantize_learnable_per_channel_affine",
|
|
"_fake_quantize_learnable_per_tensor_affine",
|
|
"_fake_quantize_per_tensor_affine_cachemask_tensor_qparams",
|
|
"_fft_c2c",
|
|
"_fft_c2r",
|
|
"_fft_r2c",
|
|
"_foreach_abs",
|
|
"_foreach_abs_",
|
|
"_foreach_acos",
|
|
"_foreach_acos_",
|
|
"_foreach_add",
|
|
"_foreach_add_",
|
|
"_foreach_addcdiv",
|
|
"_foreach_addcdiv_",
|
|
"_foreach_addcmul",
|
|
"_foreach_addcmul_",
|
|
"_foreach_asin",
|
|
"_foreach_asin_",
|
|
"_foreach_atan",
|
|
"_foreach_atan_",
|
|
"_foreach_ceil",
|
|
"_foreach_ceil_",
|
|
"_foreach_cos",
|
|
"_foreach_cos_",
|
|
"_foreach_cosh",
|
|
"_foreach_cosh_",
|
|
"_foreach_div",
|
|
"_foreach_div_",
|
|
"_foreach_erf",
|
|
"_foreach_erf_",
|
|
"_foreach_erfc",
|
|
"_foreach_erfc_",
|
|
"_foreach_exp",
|
|
"_foreach_exp_",
|
|
"_foreach_expm1",
|
|
"_foreach_expm1_",
|
|
"_foreach_floor",
|
|
"_foreach_floor_",
|
|
"_foreach_frac",
|
|
"_foreach_frac_",
|
|
"_foreach_lgamma",
|
|
"_foreach_lgamma_",
|
|
"_foreach_log",
|
|
"_foreach_log10",
|
|
"_foreach_log10_",
|
|
"_foreach_log1p",
|
|
"_foreach_log1p_",
|
|
"_foreach_log2",
|
|
"_foreach_log2_",
|
|
"_foreach_log_",
|
|
"_foreach_maximum",
|
|
"_foreach_minimum",
|
|
"_foreach_mul",
|
|
"_foreach_mul_",
|
|
"_foreach_neg",
|
|
"_foreach_neg_",
|
|
"_foreach_norm",
|
|
"_foreach_reciprocal",
|
|
"_foreach_reciprocal_",
|
|
"_foreach_round",
|
|
"_foreach_round_",
|
|
"_foreach_sigmoid",
|
|
"_foreach_sigmoid_",
|
|
"_foreach_sign",
|
|
"_foreach_sign_",
|
|
"_foreach_sin",
|
|
"_foreach_sin_",
|
|
"_foreach_sinh",
|
|
"_foreach_sinh_",
|
|
"_foreach_sqrt",
|
|
"_foreach_sqrt_",
|
|
"_foreach_sub",
|
|
"_foreach_sub_",
|
|
"_foreach_tan",
|
|
"_foreach_tan_",
|
|
"_foreach_tanh",
|
|
"_foreach_tanh_",
|
|
"_foreach_trunc",
|
|
"_foreach_trunc_",
|
|
"_foreach_zero_",
|
|
"_from_functional_tensor",
|
|
"_fused_dropout",
|
|
"_fused_moving_avg_obs_fq_helper",
|
|
"_fw_primal_copy",
|
|
"_grid_sampler_2d_cpu_fallback",
|
|
"_has_compatible_shallow_copy_type",
|
|
"_histogramdd_bin_edges",
|
|
"_histogramdd_from_bin_cts",
|
|
"_histogramdd_from_bin_tensors",
|
|
"_index_put_impl_",
|
|
"_indices_copy",
|
|
"_is_functional_tensor",
|
|
"_is_zerotensor",
|
|
"_linalg_check_errors",
|
|
"_linalg_qr_helper",
|
|
"_linalg_svd",
|
|
"_linalg_solve_ex",
|
|
"_log_softmax",
|
|
"_log_softmax_backward_data",
|
|
"_logcumsumexp",
|
|
"_lu_with_info",
|
|
"_make_dual",
|
|
"_make_dual_copy",
|
|
"_make_per_channel_quantized_tensor",
|
|
"_make_per_tensor_quantized_tensor",
|
|
"_masked_scale",
|
|
"_masked_softmax",
|
|
"_mkldnn_reshape",
|
|
"_mkldnn_transpose",
|
|
"_mkldnn_transpose_",
|
|
"_neg_view",
|
|
"_neg_view_copy",
|
|
"_nested_from_padded",
|
|
"_nested_from_padded_and_nested_example",
|
|
"_nnpack_available",
|
|
"_nnpack_spatial_convolution",
|
|
"_pack_padded_sequence",
|
|
"_pad_packed_sequence",
|
|
"_pin_memory",
|
|
"_remove_batch_dim",
|
|
"_reshape_alias_copy",
|
|
"_reshape_from_tensor",
|
|
"_rowwise_prune",
|
|
"_sample_dirichlet",
|
|
"_saturate_weight_to_fp16",
|
|
"_shape_as_tensor",
|
|
"_sobol_engine_draw",
|
|
"_sobol_engine_ff_",
|
|
"_sobol_engine_initialize_state_",
|
|
"_sobol_engine_scramble_",
|
|
"_softmax",
|
|
"_softmax_backward_data",
|
|
"_sparse_broadcast_to",
|
|
"_sparse_broadcast_to_copy",
|
|
"_sparse_coo_tensor_unsafe",
|
|
"_sparse_csr_prod",
|
|
"_sparse_csr_sum",
|
|
"_sparse_csr_tensor_unsafe",
|
|
"_sparse_log_softmax_backward_data",
|
|
"_sparse_softmax_backward_data",
|
|
"_sparse_sparse_matmul",
|
|
"_sparse_sum",
|
|
"_stack",
|
|
"_standard_gamma",
|
|
"_standard_gamma_grad",
|
|
"_sync",
|
|
"_test_serialization_subcmul",
|
|
"_to_cpu",
|
|
"_to_functional_tensor",
|
|
"_torch_cuda_cu_linker_symbol_op",
|
|
"_trilinear",
|
|
"_unique",
|
|
"_unique2",
|
|
"_unpack_dual",
|
|
"_use_cudnn_ctc_loss",
|
|
"_use_cudnn_rnn_flatten_weight",
|
|
"_validate_sparse_compressed_tensor_args",
|
|
"_validate_sparse_coo_tensor_args",
|
|
"_validate_sparse_csr_tensor_args",
|
|
"_values_copy",
|
|
"_weight_norm",
|
|
"_weight_norm_interface",
|
|
"autocast",
|
|
"broadcast_shapes",
|
|
"candidate",
|
|
"compiled_with_cxx11_abi",
|
|
"from_dlpack",
|
|
"lobpcg",
|
|
"lu",
|
|
"obj",
|
|
"segment_reduce",
|
|
"set_default_dtype",
|
|
"set_grad_enabled",
|
|
"set_printoptions",
|
|
"unique"
|
|
]
|
|
}
|